From fff42f213824fa434a4b6cf906b4331fe6e9302b Mon Sep 17 00:00:00 2001 From: Heming Zhao Date: Tue, 9 Jul 2024 18:41:19 +0800 Subject: [PATCH 01/23] md-cluster: fix hanging issue while a new disk adding The commit 1bbe254e4336 ("md-cluster: check for timeout while a new disk adding") is correct in terms of code syntax but not suite real clustered code logic. When a timeout occurs while adding a new disk, if recv_daemon() bypasses the unlock for ack_lockres:CR, another node will be waiting to grab EX lock. This will cause the cluster to hang indefinitely. How to fix: 1. In dlm_lock_sync(), change the wait behaviour from forever to a timeout, This could avoid the hanging issue when another node fails to handle cluster msg. Another result of this change is that if another node receives an unknown msg (e.g. a new msg_type), the old code will hang, whereas the new code will timeout and fail. This could help cluster_md handle new msg_type from different nodes with different kernel/module versions (e.g. The user only updates one leg's kernel and monitors the stability of the new kernel). 2. The old code for __sendmsg() always returns 0 (success) under the design (must successfully unlock ->message_lockres). This commit makes this function return an error number when an error occurs. Fixes: 1bbe254e4336 ("md-cluster: check for timeout while a new disk adding") Signed-off-by: Heming Zhao Reviewed-by: Su Yue Acked-by: Yu Kuai Signed-off-by: Song Liu Link: https://lore.kernel.org/r/20240709104120.22243-1-heming.zhao@suse.com --- drivers/md/md-cluster.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 139fe2019c1d..f2bd37d72a46 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -15,6 +15,7 @@ #define LVB_SIZE 64 #define NEW_DEV_TIMEOUT 5000 +#define WAIT_DLM_LOCK_TIMEOUT (30 * HZ) struct dlm_lock_resource { dlm_lockspace_t *ls; @@ -130,8 +131,13 @@ static int dlm_lock_sync(struct dlm_lock_resource *res, int mode) 0, sync_ast, res, res->bast); if (ret) return ret; - wait_event(res->sync_locking, res->sync_locking_done); + ret = wait_event_timeout(res->sync_locking, res->sync_locking_done, + WAIT_DLM_LOCK_TIMEOUT); res->sync_locking_done = false; + if (!ret) { + pr_err("locking DLM '%s' timeout!\n", res->name); + return -EBUSY; + } if (res->lksb.sb_status == 0) res->mode = mode; return res->lksb.sb_status; @@ -743,7 +749,7 @@ static void unlock_comm(struct md_cluster_info *cinfo) */ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg) { - int error; + int error, unlock_error; int slot = cinfo->slot_number - 1; cmsg->slot = cpu_to_le32(slot); @@ -751,7 +757,7 @@ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg) error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX); if (error) { pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error); - goto failed_message; + return error; } memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg, @@ -781,14 +787,10 @@ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg) } failed_ack: - error = dlm_unlock_sync(cinfo->message_lockres); - if (unlikely(error != 0)) { + while ((unlock_error = dlm_unlock_sync(cinfo->message_lockres))) pr_err("md-cluster: failed convert to NL on MESSAGE(%d)\n", - error); - /* in case the message can't be released due to some reason */ - goto failed_ack; - } -failed_message: + unlock_error); + return error; } From 35a0a409fa269c287c4378f1aefe84ae8b5211a1 Mon Sep 17 00:00:00 2001 From: Heming Zhao Date: Tue, 9 Jul 2024 18:41:20 +0800 Subject: [PATCH 02/23] md-cluster: fix no recovery job when adding/re-adding a disk The commit db5e653d7c9f ("md: delay choosing sync action to md_start_sync()") delays the start of the sync action. In a clustered environment, this will cause another node to first activate the spare disk and skip recovery. As a result, no nodes will perform recovery when a disk is added or re-added. Before db5e653d7c9f: ``` node1 node2 ---------------------------------------------------------------- md_check_recovery + md_update_sb | sendmsg: METADATA_UPDATED + md_choose_sync_action process_metadata_update | remove_and_add_spares //node1 has not finished adding + call mddev->sync_work //the spare disk:do nothing md_start_sync starts md_do_sync md_do_sync + grabbed resync_lockres:DLM_LOCK_EX + do syncing job md_check_recovery sendmsg: METADATA_UPDATED process_metadata_update //activate spare disk ... ... md_do_sync waiting to grab resync_lockres:EX ``` After db5e653d7c9f: (note: if 'cmd:idle' sets MD_RECOVERY_INTR after md_check_recovery starts md_start_sync, setting the INTR action will exacerbate the delay in node1 calling the md_do_sync function.) ``` node1 node2 ---------------------------------------------------------------- md_check_recovery + md_update_sb | sendmsg: METADATA_UPDATED + calls mddev->sync_work process_metadata_update //node1 has not finished adding //the spare disk:do nothing md_start_sync + md_choose_sync_action | remove_and_add_spares + calls md_do_sync md_check_recovery md_update_sb sendmsg: METADATA_UPDATED process_metadata_update //activate spare disk ... ... ... ... md_do_sync + grabbed resync_lockres:EX + raid1_sync_request skip sync under conf->fullsync:0 md_do_sync 1. waiting to grab resync_lockres:EX 2. when node1 could grab EX lock, node1 will skip resync under recovery_offset:MaxSector ``` How to trigger: ```(commands @node1) # to easily watch the recovery status echo 2000 > /proc/sys/dev/raid/speed_limit_max ssh root@node2 "echo 2000 > /proc/sys/dev/raid/speed_limit_max" mdadm -CR /dev/md0 -l1 -b clustered -n 2 /dev/sda /dev/sdb --assume-clean ssh root@node2 mdadm -A /dev/md0 /dev/sda /dev/sdb mdadm --manage /dev/md0 --fail /dev/sda --remove /dev/sda mdadm --manage /dev/md0 --add /dev/sdc === "cat /proc/mdstat" on both node, there are no recovery action. === ``` How to fix: because md layer code logic is hard to restore for speeding up sync job on local node, we add new cluster msg to pending the another node to active disk. Signed-off-by: Heming Zhao Reviewed-by: Su Yue Acked-by: Yu Kuai Signed-off-by: Song Liu Link: https://lore.kernel.org/r/20240709104120.22243-2-heming.zhao@suse.com --- drivers/md/md-cluster.c | 27 +++++++++++++++++++++++++++ drivers/md/md-cluster.h | 2 ++ drivers/md/md.c | 17 ++++++++++++++--- 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index f2bd37d72a46..6ce75c82969c 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -57,6 +57,7 @@ struct resync_info { #define MD_CLUSTER_ALREADY_IN_CLUSTER 6 #define MD_CLUSTER_PENDING_RECV_EVENT 7 #define MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD 8 +#define MD_CLUSTER_WAITING_FOR_SYNC 9 struct md_cluster_info { struct mddev *mddev; /* the md device which md_cluster_info belongs to */ @@ -92,6 +93,7 @@ struct md_cluster_info { sector_t sync_hi; }; +/* For compatibility, add the new msg_type at the end. */ enum msg_type { METADATA_UPDATED = 0, RESYNCING, @@ -101,6 +103,7 @@ enum msg_type { BITMAP_NEEDS_SYNC, CHANGE_CAPACITY, BITMAP_RESIZE, + RESYNCING_START, }; struct cluster_msg { @@ -461,6 +464,7 @@ static void process_suspend_info(struct mddev *mddev, clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); remove_suspend_info(mddev, slot); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + clear_bit(MD_CLUSTER_WAITING_FOR_SYNC, &cinfo->state); md_wakeup_thread(mddev->thread); return; } @@ -531,6 +535,7 @@ static int process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg) res = -1; } clear_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state); + set_bit(MD_CLUSTER_WAITING_FOR_SYNC, &cinfo->state); return res; } @@ -599,6 +604,9 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) case CHANGE_CAPACITY: set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); break; + case RESYNCING_START: + clear_bit(MD_CLUSTER_WAITING_FOR_SYNC, &mddev->cluster_info->state); + break; case RESYNCING: set_bit(MD_RESYNCING_REMOTE, &mddev->recovery); process_suspend_info(mddev, le32_to_cpu(msg->slot), @@ -1345,6 +1353,23 @@ static void resync_info_get(struct mddev *mddev, sector_t *lo, sector_t *hi) spin_unlock_irq(&cinfo->suspend_lock); } +static int resync_status_get(struct mddev *mddev) +{ + struct md_cluster_info *cinfo = mddev->cluster_info; + + return test_bit(MD_CLUSTER_WAITING_FOR_SYNC, &cinfo->state); +} + +static int resync_start_notify(struct mddev *mddev) +{ + struct md_cluster_info *cinfo = mddev->cluster_info; + struct cluster_msg cmsg = {0}; + + cmsg.type = cpu_to_le32(RESYNCING_START); + + return sendmsg(cinfo, &cmsg, 0); +} + static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) { struct md_cluster_info *cinfo = mddev->cluster_info; @@ -1579,6 +1604,8 @@ static const struct md_cluster_operations cluster_ops = { .resync_start = resync_start, .resync_finish = resync_finish, .resync_info_update = resync_info_update, + .resync_start_notify = resync_start_notify, + .resync_status_get = resync_status_get, .resync_info_get = resync_info_get, .metadata_update_start = metadata_update_start, .metadata_update_finish = metadata_update_finish, diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h index a78e3021775d..470bf18ffde5 100644 --- a/drivers/md/md-cluster.h +++ b/drivers/md/md-cluster.h @@ -14,6 +14,8 @@ struct md_cluster_operations { int (*leave)(struct mddev *mddev); int (*slot_number)(struct mddev *mddev); int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi); + int (*resync_start_notify)(struct mddev *mddev); + int (*resync_status_get)(struct mddev *mddev); void (*resync_info_get)(struct mddev *mddev, sector_t *lo, sector_t *hi); int (*metadata_update_start)(struct mddev *mddev); int (*metadata_update_finish)(struct mddev *mddev); diff --git a/drivers/md/md.c b/drivers/md/md.c index 64693913ed18..d3a837506a36 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8978,7 +8978,8 @@ void md_do_sync(struct md_thread *thread) * This will mean we have to start checking from the beginning again. * */ - + if (mddev_is_clustered(mddev)) + md_cluster_ops->resync_start_notify(mddev); do { int mddev2_minor = -1; mddev->curr_resync = MD_RESYNC_DELAYED; @@ -9992,8 +9993,18 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) */ if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE && !(le32_to_cpu(sb->feature_map) & - MD_FEATURE_RESHAPE_ACTIVE)) { - rdev2->saved_raid_disk = role; + MD_FEATURE_RESHAPE_ACTIVE) && + !md_cluster_ops->resync_status_get(mddev)) { + /* + * -1 to make raid1_add_disk() set conf->fullsync + * to 1. This could avoid skipping sync when the + * remote node is down during resyncing. + */ + if ((le32_to_cpu(sb->feature_map) + & MD_FEATURE_RECOVERY_OFFSET)) + rdev2->saved_raid_disk = -1; + else + rdev2->saved_raid_disk = role; ret = remove_and_add_spares(mddev, rdev2); pr_info("Activated spare: %pg\n", rdev2->bdev); From 36a5c03f232719eb4e2d925f4d584e09cfaf372c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Jo=C5=84czyk?= Date: Thu, 11 Jul 2024 22:23:16 +0200 Subject: [PATCH 03/23] md/raid1: set max_sectors during early return from choose_slow_rdev() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Linux 6.9+ is unable to start a degraded RAID1 array with one drive, when that drive has a write-mostly flag set. During such an attempt, the following assertion in bio_split() is hit: BUG_ON(sectors <= 0); Call Trace: ? bio_split+0x96/0xb0 ? exc_invalid_op+0x53/0x70 ? bio_split+0x96/0xb0 ? asm_exc_invalid_op+0x1b/0x20 ? bio_split+0x96/0xb0 ? raid1_read_request+0x890/0xd20 ? __call_rcu_common.constprop.0+0x97/0x260 raid1_make_request+0x81/0xce0 ? __get_random_u32_below+0x17/0x70 ? new_slab+0x2b3/0x580 md_handle_request+0x77/0x210 md_submit_bio+0x62/0xa0 __submit_bio+0x17b/0x230 submit_bio_noacct_nocheck+0x18e/0x3c0 submit_bio_noacct+0x244/0x670 After investigation, it turned out that choose_slow_rdev() does not set the value of max_sectors in some cases and because of it, raid1_read_request calls bio_split with sectors == 0. Fix it by filling in this variable. This bug was introduced in commit dfa8ecd167c1 ("md/raid1: factor out choose_slow_rdev() from read_balance()") but apparently hidden until commit 0091c5a269ec ("md/raid1: factor out helpers to choose the best rdev from read_balance()") shortly thereafter. Cc: stable@vger.kernel.org # 6.9.x+ Signed-off-by: Mateusz Jończyk Fixes: dfa8ecd167c1 ("md/raid1: factor out choose_slow_rdev() from read_balance()") Cc: Song Liu Cc: Yu Kuai Cc: Paul Luse Cc: Xiao Ni Cc: Mariusz Tkaczyk Link: https://lore.kernel.org/linux-raid/20240706143038.7253-1-mat.jonczyk@o2.pl/ -- Tested on both Linux 6.10 and 6.9.8. Inside a VM, mdadm testsuite for RAID1 on 6.10 did not find any problems: ./test --dev=loop --no-error --raidtype=raid1 (on 6.9.8 there was one failure, caused by external bitmap support not compiled in). Notes: - I was reliably getting deadlocks when adding / removing devices on such an array - while the array was loaded with fsstress with 20 concurrent processes. When the array was idle or loaded with fsstress with 8 processes, no such deadlocks happened in my tests. This occurred also on unpatched Linux 6.8.0 though, but not on 6.1.97-rc1, so this is likely an independent regression (to be investigated). - I was also getting deadlocks when adding / removing the bitmap on the array in similar conditions - this happened on Linux 6.1.97-rc1 also though. fsstress with 8 concurrent processes did cause it only once during many tests. - in my testing, there was once a problem with hot adding an internal bitmap to the array: mdadm: Cannot add bitmap while array is resyncing or reshaping etc. mdadm: failed to set internal bitmap. even though no such reshaping was happening according to /proc/mdstat. This seems unrelated, though. Reviewed-by: Yu Kuai Signed-off-by: Song Liu Link: https://lore.kernel.org/r/20240711202316.10775-1-mat.jonczyk@o2.pl --- drivers/md/raid1.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 04a0c2ca1732..7acfe7c9dc8d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -680,6 +680,7 @@ static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio, len = r1_bio->sectors; read_len = raid1_check_read_range(rdev, this_sector, &len); if (read_len == r1_bio->sectors) { + *max_sectors = read_len; update_read_sectors(conf, disk, this_sector, read_len); return disk; } From 1f5a33315362cb8ade2b15489c985ada0cc8623b Mon Sep 17 00:00:00 2001 From: Jeff Johnson Date: Mon, 15 Jul 2024 13:24:33 +0200 Subject: [PATCH 04/23] s390/dasd: add missing MODULE_DESCRIPTION() macros With ARCH=s390, make allmodconfig && make W=1 C=1 reports: WARNING: modpost: missing MODULE_DESCRIPTION() in drivers/s390/block/dasd_diag_mod.o WARNING: modpost: missing MODULE_DESCRIPTION() in drivers/s390/block/dasd_eckd_mod.o WARNING: modpost: missing MODULE_DESCRIPTION() in drivers/s390/block/dasd_fba_mod.o Add the missing invocations of the MODULE_DESCRIPTION() macro. Signed-off-by: Jeff Johnson Signed-off-by: Stefan Haberland Link: https://lore.kernel.org/r/20240715112434.2111291-2-sth@linux.ibm.com Signed-off-by: Jens Axboe --- drivers/s390/block/dasd_diag.c | 1 + drivers/s390/block/dasd_eckd.c | 1 + drivers/s390/block/dasd_fba.c | 1 + 3 files changed, 3 insertions(+) diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index ea4b1d01bb76..8245b742e4a2 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -29,6 +29,7 @@ #include "dasd_int.h" #include "dasd_diag.h" +MODULE_DESCRIPTION("S/390 Support for DIAG access to DASD Disks"); MODULE_LICENSE("GPL"); /* The maximum number of blocks per request (max_blocks) is dependent on the diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 2f16f543079b..f8113974cfba 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -44,6 +44,7 @@ /* 64k are 128 x 512 byte sectors */ #define DASD_RAW_SECTORS_PER_TRACK 128 +MODULE_DESCRIPTION("S/390 DASD ECKD Disks device driver"); MODULE_LICENSE("GPL"); static struct dasd_discipline dasd_eckd_discipline; diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 361e9bd75257..9ef7b168aba8 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -32,6 +32,7 @@ #define DASD_FBA_CCW_LOCATE 0x43 #define DASD_FBA_CCW_DEFINE_EXTENT 0x63 +MODULE_DESCRIPTION("S/390 DASD FBA Disks device driver"); MODULE_LICENSE("GPL"); static struct dasd_discipline dasd_fba_discipline; From 8e64d2356cbc800b4cd0e3e614797f76bcf0cdb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20L=C3=B3pez?= Date: Mon, 15 Jul 2024 13:24:34 +0200 Subject: [PATCH 05/23] s390/dasd: fix error checks in dasd_copy_pair_store() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit dasd_add_busid() can return an error via ERR_PTR() if an allocation fails. However, two callsites in dasd_copy_pair_store() do not check the result, potentially resulting in a NULL pointer dereference. Fix this by checking the result with IS_ERR() and returning the error up the stack. Fixes: a91ff09d39f9b ("s390/dasd: add copy pair setup") Signed-off-by: Carlos López Signed-off-by: Stefan Haberland Link: https://lore.kernel.org/r/20240715112434.2111291-3-sth@linux.ibm.com Signed-off-by: Jens Axboe --- drivers/s390/block/dasd_devmap.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 0316c20823ee..6adaeb985dde 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -2248,13 +2248,19 @@ static ssize_t dasd_copy_pair_store(struct device *dev, /* allocate primary devmap if needed */ prim_devmap = dasd_find_busid(prim_busid); - if (IS_ERR(prim_devmap)) + if (IS_ERR(prim_devmap)) { prim_devmap = dasd_add_busid(prim_busid, DASD_FEATURE_DEFAULT); + if (IS_ERR(prim_devmap)) + return PTR_ERR(prim_devmap); + } /* allocate secondary devmap if needed */ sec_devmap = dasd_find_busid(sec_busid); - if (IS_ERR(sec_devmap)) + if (IS_ERR(sec_devmap)) { sec_devmap = dasd_add_busid(sec_busid, DASD_FEATURE_DEFAULT); + if (IS_ERR(sec_devmap)) + return PTR_ERR(sec_devmap); + } /* setting copy relation is only allowed for offline secondary */ if (sec_devmap->device) From 6b3789e6c5310a8f517796b0f4a11039f9e5cf8f Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:28:58 +0000 Subject: [PATCH 06/23] block: Add missing entries from cmd_flag_name[] Add missing entries for req_flag_bits. Reviewed-by: Bart Van Assche Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-2-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 344f9e503bdb..786fa4d6e019 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -223,8 +223,13 @@ static const char *const cmd_flag_name[] = { CMD_FLAG_NAME(RAHEAD), CMD_FLAG_NAME(BACKGROUND), CMD_FLAG_NAME(NOWAIT), - CMD_FLAG_NAME(NOUNMAP), CMD_FLAG_NAME(POLLED), + CMD_FLAG_NAME(ALLOC_CACHE), + CMD_FLAG_NAME(SWAP), + CMD_FLAG_NAME(DRV), + CMD_FLAG_NAME(FS_PRIVATE), + CMD_FLAG_NAME(ATOMIC), + CMD_FLAG_NAME(NOUNMAP), }; #undef CMD_FLAG_NAME From af54963f193533dd7c1fe8f3d4e7af18de2406d8 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:28:59 +0000 Subject: [PATCH 07/23] block: Add zone write plugging entry to rqf_name[] Add missing entry. Reviewed-by: Bart Van Assche Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-3-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 786fa4d6e019..49d4f6e0a719 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -248,6 +248,7 @@ static const char *const rqf_name[] = { RQF_NAME(HASHED), RQF_NAME(STATS), RQF_NAME(SPECIAL_PAYLOAD), + RQF_NAME(ZONE_WRITE_PLUGGING), RQF_NAME(TIMED_OUT), RQF_NAME(RESV), }; From 1c83c5375e2f1bc7b59fa3ec5aa1e5909ec8710c Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:29:00 +0000 Subject: [PATCH 08/23] block: Add missing entry to hctx_flag_name[] Add missing entry for NO_SCHED_BY_DEFAULT and reorder to match the enum. Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-4-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 49d4f6e0a719..5f53796bd6e2 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -181,10 +181,11 @@ static const char *const alloc_policy_name[] = { static const char *const hctx_flag_name[] = { HCTX_FLAG_NAME(SHOULD_MERGE), HCTX_FLAG_NAME(TAG_QUEUE_SHARED), - HCTX_FLAG_NAME(BLOCKING), - HCTX_FLAG_NAME(NO_SCHED), HCTX_FLAG_NAME(STACKING), HCTX_FLAG_NAME(TAG_HCTX_SHARED), + HCTX_FLAG_NAME(BLOCKING), + HCTX_FLAG_NAME(NO_SCHED), + HCTX_FLAG_NAME(NO_SCHED_BY_DEFAULT), }; #undef HCTX_FLAG_NAME From c8f51feee135f37f0d77b4616083c25524daa7b0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 19 Jul 2024 11:29:01 +0000 Subject: [PATCH 09/23] block: remove QUEUE_FLAG_STOPPED QUEUE_FLAG_STOPPED is entirely unused. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Reviewed-by: Bart Van Assche Reviewed-by: Johannes Thumshirn Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-5-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 - include/linux/blkdev.h | 2 -- 2 files changed, 3 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 5f53796bd6e2..866e8c6bebd0 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -79,7 +79,6 @@ static int queue_pm_only_show(void *data, struct seq_file *m) #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name static const char *const blk_queue_flag_name[] = { - QUEUE_FLAG_NAME(STOPPED), QUEUE_FLAG_NAME(DYING), QUEUE_FLAG_NAME(NOMERGES), QUEUE_FLAG_NAME(SAME_COMP), diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index dce4a6bf7307..942ad4e0f231 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -588,7 +588,6 @@ struct request_queue { }; /* Keep blk_queue_flag_name[] in sync with the definitions below */ -#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ #define QUEUE_FLAG_DYING 1 /* queue being torn down */ #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ @@ -608,7 +607,6 @@ struct request_queue { void blk_queue_flag_set(unsigned int flag, struct request_queue *q); void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); -#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) From 3dff6155733f25872530ad358c6f5559800f4ccb Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:29:02 +0000 Subject: [PATCH 10/23] block: Relocate BLK_MQ_CPU_WORK_BATCH BLK_MQ_CPU_WORK_BATCH is defined in include/linux/blk-mq.h, but only used in blk-mq.c, so relocate to block/blk-mq.h Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-6-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-mq.h | 2 ++ include/linux/blk-mq.h | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/block/blk-mq.h b/block/blk-mq.h index 260beea8e332..3bd43b10032f 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -36,6 +36,8 @@ enum { BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, }; +#define BLK_MQ_CPU_WORK_BATCH (8) + typedef unsigned int __bitwise blk_insert_t; #define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 89ba6b16fe8b..df775a203a4d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -672,8 +672,6 @@ enum { BLK_MQ_S_INACTIVE = 3, BLK_MQ_MAX_DEPTH = 10240, - - BLK_MQ_CPU_WORK_BATCH = 8, }; #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ From 793356d23f8a817e164a917c792741a6d6d651ed Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:29:03 +0000 Subject: [PATCH 11/23] block: Relocate BLK_MQ_MAX_DEPTH BLK_MQ_MAX_DEPTH is defined as an enumerated value, but has no real relation to the other members in its enum, so just use #define to provide the definition. Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-7-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- include/linux/blk-mq.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index df775a203a4d..8a84f49468d5 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -670,8 +670,6 @@ enum { /* hw queue is inactive after all its CPUs become offline */ BLK_MQ_S_INACTIVE = 3, - - BLK_MQ_MAX_DEPTH = 10240, }; #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ @@ -680,6 +678,7 @@ enum { ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ << BLK_MQ_F_ALLOC_POLICY_START_BIT) +#define BLK_MQ_MAX_DEPTH (10240) #define BLK_MQ_NO_HCTX_IDX (-1U) struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, From 55177adf1837bc56f878f7f6f7123947a2088148 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:29:04 +0000 Subject: [PATCH 12/23] block: Make QUEUE_FLAG_x as an enum This will allow us better keep in sync with blk_queue_flag_name[]. Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-8-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 942ad4e0f231..ded46fad67a0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -588,19 +588,22 @@ struct request_queue { }; /* Keep blk_queue_flag_name[] in sync with the definitions below */ -#define QUEUE_FLAG_DYING 1 /* queue being torn down */ -#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ -#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ -#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ -#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ -#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ -#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ -#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ -#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ -#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ -#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ -#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ -#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ +enum { + QUEUE_FLAG_DYING, /* queue being torn down */ + QUEUE_FLAG_NOMERGES, /* disable merge attempts */ + QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */ + QUEUE_FLAG_FAIL_IO, /* fake timeout */ + QUEUE_FLAG_NOXMERGES, /* No extended merges */ + QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */ + QUEUE_FLAG_INIT_DONE, /* queue is initialized */ + QUEUE_FLAG_STATS, /* track IO start and completion times */ + QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */ + QUEUE_FLAG_QUIESCED, /* queue has been quiesced */ + QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */ + QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */ + QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */ + QUEUE_FLAG_MAX +}; #define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP) From cce496de061d09794825b7c7c7d57faca4772d82 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:29:05 +0000 Subject: [PATCH 13/23] block: Catch possible entries missing from blk_queue_flag_name[] Assert that we are not missing flag entries in blk_queue_flag_name[]. Signed-off-by: John Garry Reviewed-by: Bart Van Assche Link: https://lore.kernel.org/r/20240719112912.3830443-9-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 866e8c6bebd0..d28784c1957f 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -5,6 +5,7 @@ #include #include +#include #include #include "blk.h" @@ -99,6 +100,7 @@ static int queue_state_show(void *data, struct seq_file *m) { struct request_queue *q = data; + BUILD_BUG_ON(ARRAY_SIZE(blk_queue_flag_name) != QUEUE_FLAG_MAX); blk_flags_show(m, q->queue_flags, blk_queue_flag_name, ARRAY_SIZE(blk_queue_flag_name)); seq_puts(m, "\n"); From 23827310cce7eff3477aeaeb59ea3718f5c9c633 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:29:06 +0000 Subject: [PATCH 14/23] block: Catch possible entries missing from hctx_state_name[] Add a build-time assert that we are not missing entries from hctx_state_name[]. For this, create a separate enum for state flags and add a "max" entry for BLK_MQ_S_x flags. The numbering for those enum values is as default, so don't explicitly number. Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-10-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 + include/linux/blk-mq.h | 17 ++++++++++------- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index d28784c1957f..85be8aa39b90 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -165,6 +165,7 @@ static int hctx_state_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; + BUILD_BUG_ON(ARRAY_SIZE(hctx_state_name) != BLK_MQ_S_MAX); blk_flags_show(m, hctx->state, hctx_state_name, ARRAY_SIZE(hctx_state_name)); seq_puts(m, "\n"); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8a84f49468d5..4905a1d67551 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -663,13 +663,6 @@ enum { BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, - - BLK_MQ_S_STOPPED = 0, - BLK_MQ_S_TAG_ACTIVE = 1, - BLK_MQ_S_SCHED_RESTART = 2, - - /* hw queue is inactive after all its CPUs become offline */ - BLK_MQ_S_INACTIVE = 3, }; #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ @@ -681,6 +674,16 @@ enum { #define BLK_MQ_MAX_DEPTH (10240) #define BLK_MQ_NO_HCTX_IDX (-1U) +enum { + /* Keep hctx_state_name[] in sync with the definitions below */ + BLK_MQ_S_STOPPED, + BLK_MQ_S_TAG_ACTIVE, + BLK_MQ_S_SCHED_RESTART, + /* hw queue is inactive after all its CPUs become offline */ + BLK_MQ_S_INACTIVE, + BLK_MQ_S_MAX +}; + struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, struct queue_limits *lim, void *queuedata, struct lock_class_key *lkclass); From 226f0f6afc3e5c8903c6e57e1f6073ad8ad189b5 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:29:07 +0000 Subject: [PATCH 15/23] block: Catch possible entries missing from hctx_flag_name[] Refresh values in BLK_MQ_F_x enum, and then re-arrange members in hctx_flag_name[] to match that enum. Renumber BLK_MQ_F_ALLOC_POLICY_START_BIT to match the value refresh. Add a BUILD_BUG_ON() call to ensure that we are not missing entries in hctx_flag_name[]. Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-11-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 3 +++ include/linux/blk-mq.h | 10 ++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 85be8aa39b90..8618aa07ba2d 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -196,6 +196,9 @@ static int hctx_flags_show(void *data, struct seq_file *m) struct blk_mq_hw_ctx *hctx = data; const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); + BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) != + BLK_MQ_F_ALLOC_POLICY_START_BIT); + seq_puts(m, "alloc_policy="); if (alloc_policy < ARRAY_SIZE(alloc_policy_name) && alloc_policy_name[alloc_policy]) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 4905a1d67551..27241009c8f9 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -644,6 +644,7 @@ struct blk_mq_ops { #endif }; +/* Keep hctx_flag_name[] in sync with the definitions below */ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, @@ -653,15 +654,16 @@ enum { */ BLK_MQ_F_STACKING = 1 << 2, BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, - BLK_MQ_F_BLOCKING = 1 << 5, + BLK_MQ_F_BLOCKING = 1 << 4, /* Do not allow an I/O scheduler to be configured. */ - BLK_MQ_F_NO_SCHED = 1 << 6, + BLK_MQ_F_NO_SCHED = 1 << 5, + /* * Select 'none' during queue registration in case of a single hwq * or shared hwqs instead of 'mq-deadline'. */ - BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7, - BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, + BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6, + BLK_MQ_F_ALLOC_POLICY_START_BIT = 7, BLK_MQ_F_ALLOC_POLICY_BITS = 1, }; #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ From 26d3bdb57ec3fa56eaf8d2e74b5d488e55f43013 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:29:08 +0000 Subject: [PATCH 16/23] block: Catch possible entries missing from alloc_policy_name[] Make BLK_TAG_ALLOC_x an enum and add a "max" entry. Add a BUILD_BUG_ON() call to ensure that we are not missing entries in hctx_flag_name[]. Reviewed-by: Bart Van Assche Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-12-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 + include/linux/blk-mq.h | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 8618aa07ba2d..312e8a40caad 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -198,6 +198,7 @@ static int hctx_flags_show(void *data, struct seq_file *m) BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) != BLK_MQ_F_ALLOC_POLICY_START_BIT); + BUILD_BUG_ON(ARRAY_SIZE(alloc_policy_name) != BLK_TAG_ALLOC_MAX); seq_puts(m, "alloc_policy="); if (alloc_policy < ARRAY_SIZE(alloc_policy_name) && diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 27241009c8f9..a64a50a0edf7 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -278,8 +278,12 @@ enum blk_eh_timer_return { BLK_EH_RESET_TIMER, }; -#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ -#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ +/* Keep alloc_policy_name[] in sync with the definitions below */ +enum { + BLK_TAG_ALLOC_FIFO, /* allocate starting from 0 */ + BLK_TAG_ALLOC_RR, /* allocate starting from last allocated tag */ + BLK_TAG_ALLOC_MAX +}; /** * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware From 6fa99325ec86bcd442363d77561a1babd8d9a427 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:29:09 +0000 Subject: [PATCH 17/23] block: Catch possible entries missing from cmd_flag_name[] Add a BUILD_BUG_ON() call to ensure that we are not missing entries in cmd_flag_name[]. Reviewed-by: Bart Van Assche Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-13-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 2 ++ include/linux/blk_types.h | 1 + 2 files changed, 3 insertions(+) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 312e8a40caad..a4accd79c225 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -281,6 +281,8 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) const enum req_op op = req_op(rq); const char *op_str = blk_op_str(op); + BUILD_BUG_ON(ARRAY_SIZE(cmd_flag_name) != __REQ_NR_BITS); + seq_printf(m, "%p {.op=", rq); if (strcmp(op_str, "UNKNOWN") == 0) seq_printf(m, "%u", op); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 632edd71f8c6..36ed96133217 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -354,6 +354,7 @@ enum req_op { REQ_OP_LAST = (__force blk_opf_t)36, }; +/* Keep cmd_flag_name[] in sync with the definitions below */ enum req_flag_bits { __REQ_FAILFAST_DEV = /* no driver retries of device errors */ REQ_OP_BITS, From 5f89154e8e9e3445f9b592e58a7045e06153b822 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:29:10 +0000 Subject: [PATCH 18/23] block: Use enum to define RQF_x bit indexes Similar to what we do for enum req_flag_bits, divide the definition of RQF_x flags into an enum to declare the bits and an actual flag. Tweak some comments to not spill onto new lines. Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-14-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- include/linux/blk-mq.h | 86 ++++++++++++++++++++++++++---------------- 1 file changed, 54 insertions(+), 32 deletions(-) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a64a50a0edf7..af52ec6a1ed5 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -27,38 +27,60 @@ typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); * request flags */ typedef __u32 __bitwise req_flags_t; -/* drive already may have started this one */ -#define RQF_STARTED ((__force req_flags_t)(1 << 1)) -/* request for flush sequence */ -#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) -/* merge of different types, fail separately */ -#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) -/* don't call prep for this one */ -#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) -/* use hctx->sched_tags */ -#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << 8)) -/* use an I/O scheduler for this request */ -#define RQF_USE_SCHED ((__force req_flags_t)(1 << 9)) -/* vaguely specified driver internal error. Ignored by the block layer */ -#define RQF_FAILED ((__force req_flags_t)(1 << 10)) -/* don't warn about errors */ -#define RQF_QUIET ((__force req_flags_t)(1 << 11)) -/* account into disk and partition IO statistics */ -#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) -/* runtime pm request */ -#define RQF_PM ((__force req_flags_t)(1 << 15)) -/* on IO scheduler merge hash */ -#define RQF_HASHED ((__force req_flags_t)(1 << 16)) -/* track IO completion time */ -#define RQF_STATS ((__force req_flags_t)(1 << 17)) -/* Look at ->special_vec for the actual data payload instead of the - bio chain. */ -#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) -/* The request completion needs to be signaled to zone write pluging. */ -#define RQF_ZONE_WRITE_PLUGGING ((__force req_flags_t)(1 << 20)) -/* ->timeout has been called, don't expire again */ -#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) -#define RQF_RESV ((__force req_flags_t)(1 << 23)) +enum { + /* drive already may have started this one */ + __RQF_STARTED, + /* request for flush sequence */ + __RQF_FLUSH_SEQ, + /* merge of different types, fail separately */ + __RQF_MIXED_MERGE, + /* don't call prep for this one */ + __RQF_DONTPREP, + /* use hctx->sched_tags */ + __RQF_SCHED_TAGS, + /* use an I/O scheduler for this request */ + __RQF_USE_SCHED, + /* vaguely specified driver internal error. Ignored by block layer */ + __RQF_FAILED, + /* don't warn about errors */ + __RQF_QUIET, + /* account into disk and partition IO statistics */ + __RQF_IO_STAT, + /* runtime pm request */ + __RQF_PM, + /* on IO scheduler merge hash */ + __RQF_HASHED, + /* track IO completion time */ + __RQF_STATS, + /* Look at ->special_vec for the actual data payload instead of the + bio chain. */ + __RQF_SPECIAL_PAYLOAD, + /* request completion needs to be signaled to zone write plugging. */ + __RQF_ZONE_WRITE_PLUGGING, + /* ->timeout has been called, don't expire again */ + __RQF_TIMED_OUT, + __RQF_RESV, + __RQF_BITS +}; + +#define RQF_STARTED ((__force req_flags_t)(1 << __RQF_STARTED)) +#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << __RQF_FLUSH_SEQ)) +#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << __RQF_MIXED_MERGE)) +#define RQF_DONTPREP ((__force req_flags_t)(1 << __RQF_DONTPREP)) +#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << __RQF_SCHED_TAGS)) +#define RQF_USE_SCHED ((__force req_flags_t)(1 << __RQF_USE_SCHED)) +#define RQF_FAILED ((__force req_flags_t)(1 << __RQF_FAILED)) +#define RQF_QUIET ((__force req_flags_t)(1 << __RQF_QUIET)) +#define RQF_IO_STAT ((__force req_flags_t)(1 << __RQF_IO_STAT)) +#define RQF_PM ((__force req_flags_t)(1 << __RQF_PM)) +#define RQF_HASHED ((__force req_flags_t)(1 << __RQF_HASHED)) +#define RQF_STATS ((__force req_flags_t)(1 << __RQF_STATS)) +#define RQF_SPECIAL_PAYLOAD \ + ((__force req_flags_t)(1 << __RQF_SPECIAL_PAYLOAD)) +#define RQF_ZONE_WRITE_PLUGGING \ + ((__force req_flags_t)(1 << __RQF_ZONE_WRITE_PLUGGING)) +#define RQF_TIMED_OUT ((__force req_flags_t)(1 << __RQF_TIMED_OUT)) +#define RQF_RESV ((__force req_flags_t)(1 << __RQF_RESV)) /* flags that prevent us from merging requests: */ #define RQF_NOMERGE_FLAGS \ From 2d61a6c2ca7aadce3771f81a3624848f97dcc39e Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:29:11 +0000 Subject: [PATCH 19/23] block: Simplify definition of RQF_NAME() Now that we have a bit index for RQF_x in __RQF_x, use __RQF_x to simplify the definition of RQF_NAME() by not using ilog2((__force u32()). Reviewed-by: Bart Van Assche Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-15-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index a4accd79c225..34ed099c3429 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -240,7 +240,7 @@ static const char *const cmd_flag_name[] = { }; #undef CMD_FLAG_NAME -#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name +#define RQF_NAME(name) [__RQF_##name] = #name static const char *const rqf_name[] = { RQF_NAME(STARTED), RQF_NAME(FLUSH_SEQ), From 8a47e33f50dd779f94bc277c6d3de81672463c5e Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 19 Jul 2024 11:29:12 +0000 Subject: [PATCH 20/23] block: Catch possible entries missing from rqf_name[] Add a BUILD_BUG_ON() call to ensure that we are not missing entries in rqf_name[]. Reviewed-by: Bart Van Assche Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240719112912.3830443-16-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 + include/linux/blk-mq.h | 1 + 2 files changed, 2 insertions(+) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 34ed099c3429..5463697a8442 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -282,6 +282,7 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) const char *op_str = blk_op_str(op); BUILD_BUG_ON(ARRAY_SIZE(cmd_flag_name) != __REQ_NR_BITS); + BUILD_BUG_ON(ARRAY_SIZE(rqf_name) != __RQF_BITS); seq_printf(m, "%p {.op=", rq); if (strcmp(op_str, "UNKNOWN") == 0) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index af52ec6a1ed5..8d304b1d16b1 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -27,6 +27,7 @@ typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); * request flags */ typedef __u32 __bitwise req_flags_t; +/* Keep rqf_name[] in sync with the definitions below */ enum { /* drive already may have started this one */ __RQF_STARTED, From 73e59d3eeca4feaf0814a077df8ec5edc53ccf77 Mon Sep 17 00:00:00 2001 From: hexue Date: Thu, 18 Jul 2024 15:08:17 +0800 Subject: [PATCH 21/23] block: avoid polling configuration errors This patch adds a poll queue check, aiming to help users use polled IO accurately. If users do polled IO but the device doesn't have poll queues, they will get suboptimal performance data and waste CPU resources. Add a poll queue check batching this. If users don't have the device properly configured, or if it simply doesn't support polled IO, it will error the IO with -EOPNOTSUPP. This is similar to what we used to do for sync polled IO, which is no longer supported. Signed-off-by: hexue Link: https://lore.kernel.org/r/20240718070817.1031494-1-xue01.he@samsung.com Signed-off-by: Jens Axboe --- block/blk-core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c index 02bceeb36f2c..1217c2cd66dd 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -791,8 +791,11 @@ void submit_bio_noacct(struct bio *bio) } } - if (!(q->limits.features & BLK_FEAT_POLL)) + if (!(q->limits.features & BLK_FEAT_POLL) && + (bio->bi_opf & REQ_POLLED)) { bio_clear_polled(bio); + goto not_supported; + } switch (bio_op(bio)) { case REQ_OP_READ: From 72d04bdcf3f7d7e07d82f9757946f68802a7270a Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 16 Jul 2024 16:26:27 +0800 Subject: [PATCH 22/23] sbitmap: fix io hung due to race on sbitmap_word::cleared Configuration for sbq: depth=64, wake_batch=6, shift=6, map_nr=1 1. There are 64 requests in progress: map->word = 0xFFFFFFFFFFFFFFFF 2. After all the 64 requests complete, and no more requests come: map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF 3. Now two tasks try to allocate requests: T1: T2: __blk_mq_get_tag . __sbitmap_queue_get . sbitmap_get . sbitmap_find_bit . sbitmap_find_bit_in_word . __sbitmap_get_word -> nr=-1 __blk_mq_get_tag sbitmap_deferred_clear __sbitmap_queue_get /* map->cleared=0xFFFFFFFFFFFFFFFF */ sbitmap_find_bit if (!READ_ONCE(map->cleared)) sbitmap_find_bit_in_word return false; __sbitmap_get_word -> nr=-1 mask = xchg(&map->cleared, 0) sbitmap_deferred_clear atomic_long_andnot() /* map->cleared=0 */ if (!(map->cleared)) return false; /* * map->cleared is cleared by T1 * T2 fail to acquire the tag */ 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken up due to the wake_batch being set at 6. If no more requests come, T1 will wait here indefinitely. This patch achieves two purposes: 1. Check on ->cleared and update on both ->cleared and ->word need to be done atomically, and using spinlock could be the simplest solution. 2. Add extra check in sbitmap_deferred_clear(), to identify whether ->word has free bits. Fixes: ea86ea2cdced ("sbitmap: ammortize cost of clearing bits") Signed-off-by: Yang Yang Reviewed-by: Ming Lei Reviewed-by: Bart Van Assche Link: https://lore.kernel.org/r/20240716082644.659566-1-yang.yang@vivo.com Signed-off-by: Jens Axboe --- include/linux/sbitmap.h | 5 +++++ lib/sbitmap.c | 36 +++++++++++++++++++++++++++++------- 2 files changed, 34 insertions(+), 7 deletions(-) diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index d662cf136021..c09cdcc99471 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -36,6 +36,11 @@ struct sbitmap_word { * @cleared: word holding cleared bits */ unsigned long cleared ____cacheline_aligned_in_smp; + + /** + * @swap_lock: serializes simultaneous updates of ->word and ->cleared + */ + spinlock_t swap_lock; } ____cacheline_aligned_in_smp; /** diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 1e453f825c05..5e2e93307f0d 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -60,12 +60,30 @@ static inline void update_alloc_hint_after_get(struct sbitmap *sb, /* * See if we have deferred clears that we can batch move */ -static inline bool sbitmap_deferred_clear(struct sbitmap_word *map) +static inline bool sbitmap_deferred_clear(struct sbitmap_word *map, + unsigned int depth, unsigned int alloc_hint, bool wrap) { - unsigned long mask; + unsigned long mask, word_mask; - if (!READ_ONCE(map->cleared)) - return false; + guard(spinlock_irqsave)(&map->swap_lock); + + if (!map->cleared) { + if (depth == 0) + return false; + + word_mask = (~0UL) >> (BITS_PER_LONG - depth); + /* + * The current behavior is to always retry after moving + * ->cleared to word, and we change it to retry in case + * of any free bits. To avoid an infinite loop, we need + * to take wrap & alloc_hint into account, otherwise a + * soft lockup may occur. + */ + if (!wrap && alloc_hint) + word_mask &= ~((1UL << alloc_hint) - 1); + + return (READ_ONCE(map->word) & word_mask) != word_mask; + } /* * First get a stable cleared mask, setting the old mask to 0. @@ -85,6 +103,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, bool alloc_hint) { unsigned int bits_per_word; + int i; if (shift < 0) shift = sbitmap_calculate_shift(depth); @@ -116,6 +135,9 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, return -ENOMEM; } + for (i = 0; i < sb->map_nr; i++) + spin_lock_init(&sb->map[i].swap_lock); + return 0; } EXPORT_SYMBOL_GPL(sbitmap_init_node); @@ -126,7 +148,7 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth) unsigned int i; for (i = 0; i < sb->map_nr; i++) - sbitmap_deferred_clear(&sb->map[i]); + sbitmap_deferred_clear(&sb->map[i], 0, 0, 0); sb->depth = depth; sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); @@ -179,7 +201,7 @@ static int sbitmap_find_bit_in_word(struct sbitmap_word *map, alloc_hint, wrap); if (nr != -1) break; - if (!sbitmap_deferred_clear(map)) + if (!sbitmap_deferred_clear(map, depth, alloc_hint, wrap)) break; } while (1); @@ -496,7 +518,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, unsigned int map_depth = __map_depth(sb, index); unsigned long val; - sbitmap_deferred_clear(map); + sbitmap_deferred_clear(map, 0, 0, 0); val = READ_ONCE(map->word); if (val == (1UL << (map_depth - 1)) - 1) goto next; From 89ed6c9ac69ec398ccb648f5f675b43e8ca679ca Mon Sep 17 00:00:00 2001 From: Xiu Jianfeng Date: Tue, 16 Jul 2024 13:30:58 +0000 Subject: [PATCH 23/23] blk-cgroup: move congestion_count to struct blkcg The congestion_count was introduced into the struct cgroup by commit d09d8df3a294 ("blkcg: add generic throttling mechanism"), but since it is closely related to the blkio subsys, it is not appropriate to put it in the struct cgroup, so let's move it to struct blkcg. There should be no functional changes because blkcg is per cgroup. Signed-off-by: Xiu Jianfeng Acked-by: Tejun Heo Link: https://lore.kernel.org/r/20240716133058.3491350-1-xiujianfeng@huawei.com Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 7 ++++--- block/blk-cgroup.h | 10 ++++++---- include/linux/cgroup-defs.h | 3 --- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 37e6cc91d576..69e70964398c 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -2182,12 +2182,13 @@ void blk_cgroup_bio_start(struct bio *bio) bool blk_cgroup_congested(void) { - struct cgroup_subsys_state *css; + struct blkcg *blkcg; bool ret = false; rcu_read_lock(); - for (css = blkcg_css(); css; css = css->parent) { - if (atomic_read(&css->cgroup->congestion_count)) { + for (blkcg = css_to_blkcg(blkcg_css()); blkcg; + blkcg = blkcg_parent(blkcg)) { + if (atomic_read(&blkcg->congestion_count)) { ret = true; break; } diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index bd472a30bc61..864fad4a850b 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -95,6 +95,8 @@ struct blkcg { struct cgroup_subsys_state css; spinlock_t lock; refcount_t online_pin; + /* If there is block congestion on this cgroup. */ + atomic_t congestion_count; struct radix_tree_root blkg_tree; struct blkcg_gq __rcu *blkg_hint; @@ -374,7 +376,7 @@ static inline void blkcg_use_delay(struct blkcg_gq *blkg) if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) return; if (atomic_add_return(1, &blkg->use_delay) == 1) - atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); + atomic_inc(&blkg->blkcg->congestion_count); } static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) @@ -399,7 +401,7 @@ static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) if (old == 0) return 0; if (old == 1) - atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); + atomic_dec(&blkg->blkcg->congestion_count); return 1; } @@ -418,7 +420,7 @@ static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) /* We only want 1 person setting the congestion count for this blkg. */ if (!old && atomic_try_cmpxchg(&blkg->use_delay, &old, -1)) - atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); + atomic_inc(&blkg->blkcg->congestion_count); atomic64_set(&blkg->delay_nsec, delay); } @@ -435,7 +437,7 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg) /* We only want 1 person clearing the congestion count for this blkg. */ if (old && atomic_try_cmpxchg(&blkg->use_delay, &old, 0)) - atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); + atomic_dec(&blkg->blkcg->congestion_count); } /** diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index ea48c861cd36..ec82c4b7ed4d 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -534,9 +534,6 @@ struct cgroup { /* used to store eBPF programs */ struct cgroup_bpf bpf; - /* If there is block congestion on this cgroup. */ - atomic_t congestion_count; - /* Used to store internal freezer state */ struct cgroup_freezer_state freezer;