diff --git a/block/blk-mq.c b/block/blk-mq.c index ed3ed86f7dd2..46d8a994e455 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -605,6 +605,11 @@ static void __blk_mq_free_request(struct request *rq) struct blk_mq_hw_ctx *hctx = rq->mq_hctx; const int sched_tag = rq->internal_tag; + if (rq->q->disk && rq->q->disk->first_minor == 16) { + global_sync_cond2 = 0; + smp_wmb(); + } + blk_crypto_free_request(rq); blk_pm_mark_last_busy(rq); rq->mq_hctx = NULL; @@ -2714,6 +2719,10 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) rq->rq_next = NULL; rq_list_add(&plug->mq_list, rq); plug->rq_count++; + if (rq->q->disk->first_minor == 16) { + global_sync_cond2 = 1; + smp_wmb(); + } } static bool blk_mq_attempt_bio_merge(struct request_queue *q, @@ -2807,6 +2816,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, * It will not queue the request if there is an error with the bio, or at the * request creation. */ +int global_sync_cond2 = 0; void blk_mq_submit_bio(struct bio *bio) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 8d18cc7e510e..97793d24c8dc 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1973,9 +1973,9 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) tag_set->ops = &scsi_mq_ops; else tag_set->ops = &scsi_mq_ops_no_commit; - tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; + tag_set->nr_hw_queues = 1; tag_set->nr_maps = shost->nr_maps ? : 1; - tag_set->queue_depth = shost->can_queue; + tag_set->queue_depth = 1; tag_set->cmd_size = cmd_size; tag_set->numa_node = NUMA_NO_NODE; tag_set->flags = BLK_MQ_F_SHOULD_MERGE; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 0e6110da69e7..04a1f2c2abc9 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -875,10 +875,8 @@ static int virtscsi_probe(struct virtio_device *vdev) if (err) goto virtscsi_init_failed; - shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq); - - cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; - shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); + shost->can_queue = 1; + shost->cmd_per_lun = 1; shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; /* LUNs > 256 are reported with format 1, so they go in the range diff --git a/fs/buffer.c b/fs/buffer.c index 2b5561ae5d0b..b37b85f4cf2b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1706,6 +1706,7 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode * * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this * causes the writes to be flagged as synchronous writes. */ +#include int __block_write_full_page(struct inode *inode, struct page *page, get_block_t *get_block, struct writeback_control *wbc, bh_end_io_t *handler) @@ -1781,9 +1782,14 @@ int __block_write_full_page(struct inode *inode, struct page *page, * and kswapd activity, but those code paths have their own * higher-level throttling. */ + smp_rmb(); + if (global_sync_cond && global_sync_cond2 && global_sync_cond3) { + mdelay(20); + } if (wbc->sync_mode != WB_SYNC_NONE) { lock_buffer(bh); } else if (!trylock_buffer(bh)) { + pr_info("%s cannot lock for inode %px bh %px\n", current->comm, inode, bh); redirty_page_for_writepage(wbc, page); continue; } @@ -3110,13 +3116,19 @@ EXPORT_SYMBOL(ll_rw_block); void write_dirty_buffer(struct buffer_head *bh, int op_flags) { lock_buffer(bh); + if (!strcmp(bh->b_page->mapping->host->i_sb->s_type->name, "bdev") && MINOR(I_BDEV(bh->b_page->mapping->host)->bd_dev) == 16 && bh->b_assoc_map && !strcmp(bh->b_assoc_map->host->i_sb->s_type->name, "vfat")) + pr_err("%s:%s lock for inode %px bh %px\n", current->comm, __func__, bh->b_page->mapping->host, bh); if (!test_clear_buffer_dirty(bh)) { + if (!strcmp(bh->b_page->mapping->host->i_sb->s_type->name, "bdev") && MINOR(I_BDEV(bh->b_page->mapping->host)->bd_dev) == 16 && bh->b_assoc_map && !strcmp(bh->b_assoc_map->host->i_sb->s_type->name, "vfat")) + pr_err("%s:%s unlock for inode %px bh %px\n", current->comm, __func__, bh->b_page->mapping->host, bh); unlock_buffer(bh); return; } bh->b_end_io = end_buffer_write_sync; get_bh(bh); submit_bh(REQ_OP_WRITE, op_flags, bh); + if (!strcmp(bh->b_page->mapping->host->i_sb->s_type->name, "bdev") && MINOR(I_BDEV(bh->b_page->mapping->host)->bd_dev) == 16 && bh->b_assoc_map && !strcmp(bh->b_assoc_map->host->i_sb->s_type->name, "vfat")) + pr_err("%s:%s unlock2 for inode %px bh %px\n", current->comm, __func__, bh->b_page->mapping->host, bh); } EXPORT_SYMBOL(write_dirty_buffer); @@ -3131,6 +3143,8 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) WARN_ON(atomic_read(&bh->b_count) < 1); lock_buffer(bh); + if (!strcmp(bh->b_page->mapping->host->i_sb->s_type->name, "bdev") && MINOR(I_BDEV(bh->b_page->mapping->host)->bd_dev) == 16 && bh->b_assoc_map && !strcmp(bh->b_assoc_map->host->i_sb->s_type->name, "vfat")) + pr_err("%s:%s lock for inode %px bh %px\n", current->comm, __func__, bh->b_page->mapping->host, bh); if (test_clear_buffer_dirty(bh)) { /* * The bh should be mapped, but it might not be if the @@ -3138,6 +3152,8 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) */ if (!buffer_mapped(bh)) { unlock_buffer(bh); + if (!strcmp(bh->b_page->mapping->host->i_sb->s_type->name, "bdev") && MINOR(I_BDEV(bh->b_page->mapping->host)->bd_dev) == 16 && bh->b_assoc_map && !strcmp(bh->b_assoc_map->host->i_sb->s_type->name, "vfat")) + pr_err("%s:%s unlock for inode %px bh %px\n", current->comm, __func__, bh->b_page->mapping->host, bh); return -EIO; } @@ -3150,6 +3166,8 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) } else { unlock_buffer(bh); } + if (!strcmp(bh->b_page->mapping->host->i_sb->s_type->name, "bdev") && MINOR(I_BDEV(bh->b_page->mapping->host)->bd_dev) == 16 && bh->b_assoc_map && !strcmp(bh->b_assoc_map->host->i_sb->s_type->name, "vfat")) + pr_err("%s:%s unlock2 for inode %px bh %px\n", current->comm, __func__, bh->b_page->mapping->host, bh); return ret; } EXPORT_SYMBOL(__sync_dirty_buffer); diff --git a/fs/fat/file.c b/fs/fat/file.c index a5a309fcc7fa..828867d1375d 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -183,6 +183,7 @@ static int fat_file_release(struct inode *inode, struct file *filp) return 0; } +#include int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 591fe9cf1659..97eaf4540f77 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1516,6 +1516,8 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * writeback is not making progress due to locked * buffers. Skip this inode for now. */ +// if (!strcmp("bdev", inode->i_sb->s_type->name)) +// pr_info("requeue inode %px\n", inode); redirty_tail_locked(inode, wb); return; } @@ -1866,6 +1868,7 @@ static long writeback_sb_inodes(struct super_block *sb, * unplug, so get our IOs out the door before we * give up the CPU. */ + pr_warn("resched %s\n", current->comm); blk_flush_plug(current->plug, false); cond_resched(); } @@ -1901,11 +1904,13 @@ static long writeback_sb_inodes(struct super_block *sb, return wrote; } +int global_sync_cond = 0; static long __writeback_inodes_wb(struct bdi_writeback *wb, struct wb_writeback_work *work) { unsigned long start_time = jiffies; long wrote = 0; + int has_vfat = 0; while (!list_empty(&wb->b_io)) { struct inode *inode = wb_inode(wb->b_io.prev); @@ -1920,7 +1925,15 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb, redirty_tail(inode, wb); continue; } + + if (!strcmp(sb->s_type->name, "bdev") && MINOR(I_BDEV(inode)->bd_dev) == 16 && has_vfat && wb_inode(wb->b_io.prev) == wb_inode(wb->b_io.next)) { + global_sync_cond = 1; + smp_wmb(); + } + wrote += writeback_sb_inodes(sb, wb, work); + if (!strcmp(sb->s_type->name, "vfat")) + has_vfat++; up_read(&sb->s_umount); /* refer to the same tests at the end of writeback_sb_inodes */ @@ -1997,8 +2010,12 @@ static long wb_writeback(struct bdi_writeback *wb, * after the other works are all done. */ if ((work->for_background || work->for_kupdate) && - !list_empty(&wb->work_list)) + !list_empty(&wb->work_list)) { + if (!list_empty(&wb->b_dirty)) { // sync will add new work + pr_err("exit %s\n", current->comm); + } break; + } /* * For background writeout, stop when we are below the @@ -2058,6 +2075,8 @@ static long wb_writeback(struct bdi_writeback *wb, } spin_unlock(&wb->list_lock); blk_finish_plug(&plug); + global_sync_cond = 0; + smp_wmb(); return nr_pages - work->nr_pages; } diff --git a/include/linux/fs.h b/include/linux/fs.h index bbde95387a23..32c45c8fbe88 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -47,6 +47,10 @@ #include #include +extern int global_sync_cond; // There are bdev inode and fat inode in a same work, and bdev inode is the last inode to writeback +extern int global_sync_cond2; // The only one tag is gotten by one request, and the request is still in plug->mq_list +extern int global_sync_cond3; // wb_writeback is writing last page of bdev inode + struct backing_dev_info; struct bdi_writeback; struct bio; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 7e2da284e427..3ed1b439471e 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2162,6 +2162,7 @@ EXPORT_SYMBOL(tag_pages_for_writeback); * * Return: %0 on success, negative error code otherwise */ +int global_sync_cond3 = 0; int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data) @@ -2177,6 +2178,8 @@ int write_cache_pages(struct address_space *mapping, int range_whole = 0; xa_mark_t tag; + global_sync_cond3 = 0; + smp_wmb(); pagevec_init(&pvec); if (wbc->range_cyclic) { index = mapping->writeback_index; /* prev offset */ @@ -2239,6 +2242,11 @@ int write_cache_pages(struct address_space *mapping, if (!clear_page_dirty_for_io(page)) goto continue_unlock; + if (i == nr_pages - 1 && !strcmp(mapping->host->i_sb->s_type->name, "bdev") && MINOR(I_BDEV(mapping->host)->bd_dev) == 16) { + global_sync_cond3 = 1; + smp_wmb(); + } + trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); error = (*writepage)(page, wbc, data); if (unlikely(error)) {