From 3116a23bb30272d74ea81baf5d0ee23f602dd15b Mon Sep 17 00:00:00 2001 From: Dmitry Monakhov Date: Wed, 10 May 2017 19:20:44 +0400 Subject: [PATCH 01/15] bio-integrity: Do not allocate integrity context for bio w/o data If bio has no data, such as ones from blkdev_issue_flush(), then we have nothing to protect. This patch prevent bugon like follows: kfree_debugcheck: out of range ptr ac1fa1d106742a5ah kernel BUG at mm/slab.c:2773! invalid opcode: 0000 [#1] SMP Modules linked in: bcache CPU: 0 PID: 4428 Comm: xfs_io Tainted: G W 4.11.0-rc4-ext4-00041-g2ef0043-dirty #43 Hardware name: Virtuozzo KVM, BIOS seabios-1.7.5-11.vz7.4 04/01/2014 task: ffff880137786440 task.stack: ffffc90000ba8000 RIP: 0010:kfree_debugcheck+0x25/0x2a RSP: 0018:ffffc90000babde0 EFLAGS: 00010082 RAX: 0000000000000034 RBX: ac1fa1d106742a5a RCX: 0000000000000007 RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff88013f3ccb40 RBP: ffffc90000babde8 R08: 0000000000000000 R09: 0000000000000000 R10: 00000000fcb76420 R11: 00000000725172ed R12: 0000000000000282 R13: ffffffff8150e766 R14: ffff88013a145e00 R15: 0000000000000001 FS: 00007fb09384bf40(0000) GS:ffff88013f200000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fd0172f9e40 CR3: 0000000137fa9000 CR4: 00000000000006f0 Call Trace: kfree+0xc8/0x1b3 bio_integrity_free+0xc3/0x16b bio_free+0x25/0x66 bio_put+0x14/0x26 blkdev_issue_flush+0x7a/0x85 blkdev_fsync+0x35/0x42 vfs_fsync_range+0x8e/0x9f vfs_fsync+0x1c/0x1e do_fsync+0x31/0x4a SyS_fsync+0x10/0x14 entry_SYSCALL_64_fastpath+0x1f/0xc2 Reviewed-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Reviewed-by: Martin K. Petersen Signed-off-by: Dmitry Monakhov Signed-off-by: Jens Axboe --- block/bio-integrity.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 5384713d48bc..b5009a896a7f 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio) if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE) return false; + if (!bio_sectors(bio)) + return false; + /* Already protected? */ if (bio_integrity(bio)) return false; From dad7a3be4960e5545882a0cd8d7613af22874314 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 6 Jun 2017 23:21:59 +0800 Subject: [PATCH 02/15] blk-mq: pass correct hctx to blk_mq_try_issue_directly When direct issue is done on request picked up from plug list, the hctx need to be updated with the actual hw queue, otherwise wrong hctx is used and may hurt performance, especially when wrong SRCU readlock is acquired/released Reported-by: Bart Van Assche Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 1bcccedcc74f..4ddfa019face 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1619,9 +1619,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_mq_put_ctx(data.ctx); - if (same_queue_rq) + if (same_queue_rq) { + data.hctx = blk_mq_map_queue(q, + same_queue_rq->mq_ctx->cpu); blk_mq_try_issue_directly(data.hctx, same_queue_rq, &cookie); + } } else if (q->nr_hw_queues > 1 && is_sync) { blk_mq_put_ctx(data.ctx); blk_mq_bio_to_request(rq, bio); From d964f04a8fde84d978eff0d96561faa6e8de24de Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 6 Jun 2017 23:22:00 +0800 Subject: [PATCH 03/15] blk-mq: fix direct issue If queue is stopped, we shouldn't dispatch request into driver and hardware, unfortunately the check is removed in bd166ef183c2(blk-mq-sched: add framework for MQ capable IO schedulers). This patch fixes the issue by moving the check back into __blk_mq_try_issue_directly(). This patch fixes request use-after-free[1][2] during canceling requets of NVMe in nvme_dev_disable(), which can be triggered easily during NVMe reset & remove test. [1] oops kernel log when CONFIG_BLK_DEV_INTEGRITY is on [ 103.412969] BUG: unable to handle kernel NULL pointer dereference at 000000000000000a [ 103.412980] IP: bio_integrity_advance+0x48/0xf0 [ 103.412981] PGD 275a88067 [ 103.412981] P4D 275a88067 [ 103.412982] PUD 276c43067 [ 103.412983] PMD 0 [ 103.412984] [ 103.412986] Oops: 0000 [#1] SMP [ 103.412989] Modules linked in: vfat fat intel_rapl sb_edac x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul ghash_clmulni_intel pcbc aesni_intel crypto_simd cryptd ipmi_ssif iTCO_wdt iTCO_vendor_support mxm_wmi glue_helper dcdbas ipmi_si mei_me pcspkr mei sg ipmi_devintf lpc_ich ipmi_msghandler shpchp acpi_power_meter wmi nfsd auth_rpcgss nfs_acl lockd grace sunrpc ip_tables xfs libcrc32c sd_mod mgag200 i2c_algo_bit drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm drm crc32c_intel nvme ahci nvme_core libahci libata tg3 i2c_core megaraid_sas ptp pps_core dm_mirror dm_region_hash dm_log dm_mod [ 103.413035] CPU: 0 PID: 102 Comm: kworker/0:2 Not tainted 4.11.0+ #1 [ 103.413036] Hardware name: Dell Inc. PowerEdge R730xd/072T6D, BIOS 2.2.5 09/06/2016 [ 103.413041] Workqueue: events nvme_remove_dead_ctrl_work [nvme] [ 103.413043] task: ffff9cc8775c8000 task.stack: ffffc033c252c000 [ 103.413045] RIP: 0010:bio_integrity_advance+0x48/0xf0 [ 103.413046] RSP: 0018:ffffc033c252fc10 EFLAGS: 00010202 [ 103.413048] RAX: 0000000000000000 RBX: ffff9cc8720a8cc0 RCX: ffff9cca72958240 [ 103.413049] RDX: ffff9cca72958000 RSI: 0000000000000008 RDI: ffff9cc872537f00 [ 103.413049] RBP: ffffc033c252fc28 R08: 0000000000000000 R09: ffffffffb963a0d5 [ 103.413050] R10: 000000000000063e R11: 0000000000000000 R12: ffff9cc8720a8d18 [ 103.413051] R13: 0000000000001000 R14: ffff9cc872682e00 R15: 00000000fffffffb [ 103.413053] FS: 0000000000000000(0000) GS:ffff9cc877c00000(0000) knlGS:0000000000000000 [ 103.413054] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 103.413055] CR2: 000000000000000a CR3: 0000000276c41000 CR4: 00000000001406f0 [ 103.413056] Call Trace: [ 103.413063] bio_advance+0x2a/0xe0 [ 103.413067] blk_update_request+0x76/0x330 [ 103.413072] blk_mq_end_request+0x1a/0x70 [ 103.413074] blk_mq_dispatch_rq_list+0x370/0x410 [ 103.413076] ? blk_mq_flush_busy_ctxs+0x94/0xe0 [ 103.413080] blk_mq_sched_dispatch_requests+0x173/0x1a0 [ 103.413083] __blk_mq_run_hw_queue+0x8e/0xa0 [ 103.413085] __blk_mq_delay_run_hw_queue+0x9d/0xa0 [ 103.413088] blk_mq_start_hw_queue+0x17/0x20 [ 103.413090] blk_mq_start_hw_queues+0x32/0x50 [ 103.413095] nvme_kill_queues+0x54/0x80 [nvme_core] [ 103.413097] nvme_remove_dead_ctrl_work+0x1f/0x40 [nvme] [ 103.413103] process_one_work+0x149/0x360 [ 103.413105] worker_thread+0x4d/0x3c0 [ 103.413109] kthread+0x109/0x140 [ 103.413111] ? rescuer_thread+0x380/0x380 [ 103.413113] ? kthread_park+0x60/0x60 [ 103.413120] ret_from_fork+0x2c/0x40 [ 103.413121] Code: 08 4c 8b 63 50 48 8b 80 80 00 00 00 48 8b 90 d0 03 00 00 31 c0 48 83 ba 40 02 00 00 00 48 8d 8a 40 02 00 00 48 0f 45 c1 c1 ee 09 <0f> b6 48 0a 0f b6 40 09 41 89 f5 83 e9 09 41 d3 ed 44 0f af e8 [ 103.413145] RIP: bio_integrity_advance+0x48/0xf0 RSP: ffffc033c252fc10 [ 103.413146] CR2: 000000000000000a [ 103.413157] ---[ end trace cd6875d16eb5a11e ]--- [ 103.455368] Kernel panic - not syncing: Fatal exception [ 103.459826] Kernel Offset: 0x37600000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff) [ 103.850916] ---[ end Kernel panic - not syncing: Fatal exception [ 103.857637] sched: Unexpected reschedule of offline CPU#1! [ 103.863762] ------------[ cut here ]------------ [2] kernel hang in blk_mq_freeze_queue_wait() when CONFIG_BLK_DEV_INTEGRITY is off [ 247.129825] INFO: task nvme-test:1772 blocked for more than 120 seconds. [ 247.137311] Not tainted 4.12.0-rc2.upstream+ #4 [ 247.142954] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 247.151704] Call Trace: [ 247.154445] __schedule+0x28a/0x880 [ 247.158341] schedule+0x36/0x80 [ 247.161850] blk_mq_freeze_queue_wait+0x4b/0xb0 [ 247.166913] ? remove_wait_queue+0x60/0x60 [ 247.171485] blk_freeze_queue+0x1a/0x20 [ 247.175770] blk_cleanup_queue+0x7f/0x140 [ 247.180252] nvme_ns_remove+0xa3/0xb0 [nvme_core] [ 247.185503] nvme_remove_namespaces+0x32/0x50 [nvme_core] [ 247.191532] nvme_uninit_ctrl+0x2d/0xa0 [nvme_core] [ 247.196977] nvme_remove+0x70/0x110 [nvme] [ 247.201545] pci_device_remove+0x39/0xc0 [ 247.205927] device_release_driver_internal+0x141/0x200 [ 247.211761] device_release_driver+0x12/0x20 [ 247.216531] pci_stop_bus_device+0x8c/0xa0 [ 247.221104] pci_stop_and_remove_bus_device_locked+0x1a/0x30 [ 247.227420] remove_store+0x7c/0x90 [ 247.231320] dev_attr_store+0x18/0x30 [ 247.235409] sysfs_kf_write+0x3a/0x50 [ 247.239497] kernfs_fop_write+0xff/0x180 [ 247.243867] __vfs_write+0x37/0x160 [ 247.247757] ? selinux_file_permission+0xe5/0x120 [ 247.253011] ? security_file_permission+0x3b/0xc0 [ 247.258260] vfs_write+0xb2/0x1b0 [ 247.261964] ? syscall_trace_enter+0x1d0/0x2b0 [ 247.266924] SyS_write+0x55/0xc0 [ 247.270540] do_syscall_64+0x67/0x150 [ 247.274636] entry_SYSCALL64_slow_path+0x25/0x25 [ 247.279794] RIP: 0033:0x7f5c96740840 [ 247.283785] RSP: 002b:00007ffd00e87ee8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 247.292238] RAX: ffffffffffffffda RBX: 0000000000000002 RCX: 00007f5c96740840 [ 247.300194] RDX: 0000000000000002 RSI: 00007f5c97060000 RDI: 0000000000000001 [ 247.308159] RBP: 00007f5c97060000 R08: 000000000000000a R09: 00007f5c97059740 [ 247.316123] R10: 0000000000000001 R11: 0000000000000246 R12: 00007f5c96a14400 [ 247.324087] R13: 0000000000000002 R14: 0000000000000001 R15: 0000000000000000 [ 370.016340] INFO: task nvme-test:1772 blocked for more than 120 seconds. Fixes: 12d70958a2e8(blk-mq: don't fail allocating driver tag for stopped hw queue) Cc: stable@vger.kernel.org Signed-off-by: Ming Lei Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- block/blk-mq.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 4ddfa019face..bb66c96850b1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1461,22 +1461,28 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); } -static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie, - bool may_sleep) +static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, + struct request *rq, + blk_qc_t *cookie, bool may_sleep) { struct request_queue *q = rq->q; struct blk_mq_queue_data bd = { .rq = rq, .last = true, }; - struct blk_mq_hw_ctx *hctx; blk_qc_t new_cookie; int ret; + bool run_queue = true; + + if (blk_mq_hctx_stopped(hctx)) { + run_queue = false; + goto insert; + } if (q->elevator) goto insert; - if (!blk_mq_get_driver_tag(rq, &hctx, false)) + if (!blk_mq_get_driver_tag(rq, NULL, false)) goto insert; new_cookie = request_to_qc_t(hctx, rq); @@ -1500,7 +1506,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie, __blk_mq_requeue_request(rq); insert: - blk_mq_sched_insert_request(rq, false, true, false, may_sleep); + blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep); } static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, @@ -1508,7 +1514,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, { if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { rcu_read_lock(); - __blk_mq_try_issue_directly(rq, cookie, false); + __blk_mq_try_issue_directly(hctx, rq, cookie, false); rcu_read_unlock(); } else { unsigned int srcu_idx; @@ -1516,7 +1522,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, might_sleep(); srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); - __blk_mq_try_issue_directly(rq, cookie, true); + __blk_mq_try_issue_directly(hctx, rq, cookie, true); srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); } } From 9bd2bbc01d17ddd567cc0f81f77fe1163e497462 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 2 Jun 2017 20:35:51 -0700 Subject: [PATCH 04/15] elevator: fix truncation of icq_cache_name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc 7.1 reports the following warning: block/elevator.c: In function ‘elv_register’: block/elevator.c:898:5: warning: ‘snprintf’ output may be truncated before the last format character [-Wformat-truncation=] "%s_io_cq", e->elevator_name); ^~~~~~~~~~ block/elevator.c:897:3: note: ‘snprintf’ output between 7 and 22 bytes into a destination of size 21 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name), ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ "%s_io_cq", e->elevator_name); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The bug is that the name of the icq_cache is 6 characters longer than the elevator name, but only ELV_NAME_MAX + 5 characters were reserved for it --- so in the case of a maximum-length elevator name, the 'q' character in "_io_cq" would be truncated by snprintf(). Fix it by reserving ELV_NAME_MAX + 6 characters instead. Signed-off-by: Eric Biggers Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- include/linux/elevator.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 9ec5e22846e0..0e306c5a86d6 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -153,7 +153,7 @@ struct elevator_type #endif /* managed by elevator core */ - char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */ + char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */ struct list_head list; }; From 82654b6b8ef8b93ee87a97fc562f87f081fc2f91 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 2 Jun 2017 16:32:08 +0800 Subject: [PATCH 05/15] nvme: fix hang in remove path We need to start admin queues too in nvme_kill_queues() for avoiding hang in remove path[1]. This patch is very similar with 806f026f9b901eaf(nvme: use blk_mq_start_hw_queues() in nvme_kill_queues()). [1] hang stack trace [] blk_execute_rq+0x56/0x80 [] __nvme_submit_sync_cmd+0x89/0xf0 [] nvme_set_features+0x5e/0x90 [] nvme_configure_apst+0x166/0x200 [] nvme_set_latency_tolerance+0x35/0x50 [] apply_constraint+0xb1/0xc0 [] dev_pm_qos_constraints_destroy+0xf4/0x1f0 [] dpm_sysfs_remove+0x2a/0x60 [] device_del+0x101/0x320 [] device_unregister+0x1a/0x60 [] device_destroy+0x3c/0x50 [] nvme_uninit_ctrl+0x45/0xa0 [] nvme_remove+0x78/0x110 [] pci_device_remove+0x39/0xb0 [] device_release_driver_internal+0x155/0x210 [] device_release_driver+0x12/0x20 [] nvme_remove_dead_ctrl_work+0x6b/0x70 [] process_one_work+0x18c/0x3a0 [] worker_thread+0x4e/0x3b0 [] kthread+0x109/0x140 [] ret_from_fork+0x2c/0x40 [] 0xffffffffffffffff Fixes: c5552fde102fc("nvme: Enable autonomous power state transitions") Reported-by: Rakesh Pandit Tested-by: Rakesh Pandit Reviewed-by: Sagi Grimberg Signed-off-by: Ming Lei Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index a60926410438..0f9cc0c55e15 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2438,6 +2438,10 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) struct nvme_ns *ns; mutex_lock(&ctrl->namespaces_mutex); + + /* Forcibly start all queues to avoid having stuck requests */ + blk_mq_start_hw_queues(ctrl->admin_q); + list_for_each_entry(ns, &ctrl->namespaces, list) { /* * Revalidating a dead namespace sets capacity to 0. This will From 82b057caefaff2a891f821a617d939f46e03e844 Mon Sep 17 00:00:00 2001 From: Rakesh Pandit Date: Mon, 5 Jun 2017 14:43:11 +0300 Subject: [PATCH 06/15] nvme-pci: fix multiple ctrl removal scheduling Commit c5f6ce97c1210 tries to address multiple resets but fails as work_busy doesn't involve any synchronization and can fail. This is reproducible easily as can be seen by WARNING below which is triggered with line: WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING) Allowing multiple resets can result in multiple controller removal as well if different conditions inside nvme_reset_work fail and which might deadlock on device_release_driver. [ 480.327007] WARNING: CPU: 3 PID: 150 at drivers/nvme/host/pci.c:1900 nvme_reset_work+0x36c/0xec0 [ 480.327008] Modules linked in: rfcomm fuse nf_conntrack_netbios_ns nf_conntrack_broadcast... [ 480.327044] btusb videobuf2_core ghash_clmulni_intel snd_hwdep cfg80211 acer_wmi hci_uart.. [ 480.327065] CPU: 3 PID: 150 Comm: kworker/u16:2 Not tainted 4.12.0-rc1+ #13 [ 480.327065] Hardware name: Acer Predator G9-591/Mustang_SLS, BIOS V1.10 03/03/2016 [ 480.327066] Workqueue: nvme nvme_reset_work [ 480.327067] task: ffff880498ad8000 task.stack: ffffc90002218000 [ 480.327068] RIP: 0010:nvme_reset_work+0x36c/0xec0 [ 480.327069] RSP: 0018:ffffc9000221bdb8 EFLAGS: 00010246 [ 480.327070] RAX: 0000000000460000 RBX: ffff880498a98128 RCX: dead000000000200 [ 480.327070] RDX: 0000000000000001 RSI: ffff8804b1028020 RDI: ffff880498a98128 [ 480.327071] RBP: ffffc9000221be50 R08: 0000000000000000 R09: 0000000000000000 [ 480.327071] R10: ffffc90001963ce8 R11: 000000000000020d R12: ffff880498a98000 [ 480.327072] R13: ffff880498a53500 R14: ffff880498a98130 R15: ffff880498a98128 [ 480.327072] FS: 0000000000000000(0000) GS:ffff8804c1cc0000(0000) knlGS:0000000000000000 [ 480.327073] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 480.327074] CR2: 00007ffcf3c37f78 CR3: 0000000001e09000 CR4: 00000000003406e0 [ 480.327074] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 480.327075] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 480.327075] Call Trace: [ 480.327079] ? __switch_to+0x227/0x400 [ 480.327081] process_one_work+0x18c/0x3a0 [ 480.327082] worker_thread+0x4e/0x3b0 [ 480.327084] kthread+0x109/0x140 [ 480.327085] ? process_one_work+0x3a0/0x3a0 [ 480.327087] ? kthread_park+0x60/0x60 [ 480.327102] ret_from_fork+0x2c/0x40 [ 480.327103] Code: e8 5a dc ff ff 85 c0 41 89 c1 0f..... This patch addresses the problem by using state of controller to decide whether reset should be queued or not as state change is synchronizated using controller spinlock. Also cancel_work_sync is used to make sure remove cancels the reset_work and waits for it to finish. This patch also changes return value from -ENODEV to more appropriate -EBUSY if nvme_reset fails to change state. Fixes: c5f6ce97c1210 ("nvme: don't schedule multiple resets") Signed-off-by: Rakesh Pandit Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/pci.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d52701df7245..951042a375d6 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1367,7 +1367,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); /* If there is a reset ongoing, we shouldn't reset again. */ - if (work_busy(&dev->reset_work)) + if (dev->ctrl.state == NVME_CTRL_RESETTING) return false; /* We shouldn't reset unless the controller is on fatal error state @@ -1903,7 +1903,7 @@ static void nvme_reset_work(struct work_struct *work) bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); int result = -ENODEV; - if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING)) + if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) goto out; /* @@ -1913,9 +1913,6 @@ static void nvme_reset_work(struct work_struct *work) if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) nvme_dev_disable(dev, false); - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) - goto out; - result = nvme_pci_enable(dev); if (result) goto out; @@ -2009,8 +2006,8 @@ static int nvme_reset(struct nvme_dev *dev) { if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) return -ENODEV; - if (work_busy(&dev->reset_work)) - return -ENODEV; + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) + return -EBUSY; if (!queue_work(nvme_workq, &dev->reset_work)) return -EBUSY; return 0; @@ -2136,6 +2133,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto release_pools; + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING); dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); queue_work(nvme_workq, &dev->reset_work); @@ -2179,6 +2177,7 @@ static void nvme_remove(struct pci_dev *pdev) nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); + cancel_work_sync(&dev->reset_work); pci_set_drvdata(pdev, NULL); if (!pci_device_is_present(pdev)) { From e818a5b487fea20494b0e48548c1085634abdc0d Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 5 Jun 2017 20:35:56 +0300 Subject: [PATCH 07/15] nvme-rdma: fast fail incoming requests while we reconnect When we encounter an transport/controller errors, error recovery kicks in which performs: 1. stops io/admin queues 2. moves transport queues out of LIVE state 3. fast fail pending io 4. schedule periodic reconnects. But we also need to fast fail incoming IO taht enters after we already scheduled. Given that our queue is not LIVE anymore, simply restart the request queues to fail in .queue_rq Reported-by: Alex Turin Reported-by: shahar.salzman Signed-off-by: Sagi Grimberg Signed-off-by: Christoph Hellwig Cc: stable@vger.kernel.org --- drivers/nvme/host/rdma.c | 44 ++++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 28bd255c144d..24397d306d53 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -753,28 +753,26 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) if (ret) goto requeue; - blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true); - ret = nvmf_connect_admin_queue(&ctrl->ctrl); if (ret) - goto stop_admin_q; + goto requeue; set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); if (ret) - goto stop_admin_q; + goto requeue; nvme_start_keep_alive(&ctrl->ctrl); if (ctrl->queue_count > 1) { ret = nvme_rdma_init_io_queues(ctrl); if (ret) - goto stop_admin_q; + goto requeue; ret = nvme_rdma_connect_io_queues(ctrl); if (ret) - goto stop_admin_q; + goto requeue; } changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); @@ -782,7 +780,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) ctrl->ctrl.opts->nr_reconnects = 0; if (ctrl->queue_count > 1) { - nvme_start_queues(&ctrl->ctrl); nvme_queue_scan(&ctrl->ctrl); nvme_queue_async_events(&ctrl->ctrl); } @@ -791,8 +788,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) return; -stop_admin_q: - blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); requeue: dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", ctrl->ctrl.opts->nr_reconnects); @@ -823,6 +818,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request, &ctrl->ctrl); + /* + * queues are not a live anymore, so restart the queues to fail fast + * new IO + */ + blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true); + nvme_start_queues(&ctrl->ctrl); + nvme_rdma_reconnect_or_remove(ctrl); } @@ -1433,7 +1435,7 @@ nvme_rdma_timeout(struct request *rq, bool reserved) /* * We cannot accept any other command until the Connect command has completed. */ -static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, +static inline int nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) { if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { @@ -1441,11 +1443,22 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, if (!blk_rq_is_passthrough(rq) || cmd->common.opcode != nvme_fabrics_command || - cmd->fabrics.fctype != nvme_fabrics_type_connect) - return false; + cmd->fabrics.fctype != nvme_fabrics_type_connect) { + /* + * reconnecting state means transport disruption, which + * can take a long time and even might fail permanently, + * so we can't let incoming I/O be requeued forever. + * fail it fast to allow upper layers a chance to + * failover. + */ + if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING) + return -EIO; + else + return -EAGAIN; + } } - return true; + return 0; } static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, @@ -1463,8 +1476,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, WARN_ON_ONCE(rq->tag < 0); - if (!nvme_rdma_queue_is_ready(queue, rq)) - return BLK_MQ_RQ_QUEUE_BUSY; + ret = nvme_rdma_queue_is_ready(queue, rq); + if (unlikely(ret)) + goto err; dev = queue->device->dev; ib_dma_sync_single_for_cpu(dev, sqe->dma, From f874d5d079ec35158fa1a1509554c9d641bd5770 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 22:54:21 -0700 Subject: [PATCH 08/15] nvme-fc: on lldd/transport io error, terminate association Per FC-NVME, when lldd or transport detects an i/o error, the connection must be terminated, which in turn requires the association to be termianted. Currently the transport simply creates a nvme completion status of transport error and returns the io. The FC-NVME spec makes the mandate as initiator and host, depending on the error, can get out of sync on outstanding io counts (sqhd/sqtail). Implement the association teardown on lldd or transport detected errors. Signed-off-by: James Smart Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg --- drivers/nvme/host/fc.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 5b14cbefb724..2edae54688e8 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1139,6 +1139,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) /* *********************** NVME Ctrl Routines **************************** */ static void __nvme_fc_final_op_cleanup(struct request *rq); +static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); static int nvme_fc_reinit_request(void *data, struct request *rq) @@ -1265,7 +1266,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) struct nvme_command *sqe = &op->cmd_iu.sqe; __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); union nvme_result result; - bool complete_rq; + bool complete_rq, terminate_assoc = true; /* * WARNING: @@ -1294,6 +1295,14 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) * fabricate a CQE, the following fields will not be set as they * are not referenced: * cqe.sqid, cqe.sqhd, cqe.command_id + * + * Failure or error of an individual i/o, in a transport + * detected fashion unrelated to the nvme completion status, + * potentially cause the initiator and target sides to get out + * of sync on SQ head/tail (aka outstanding io count allowed). + * Per FC-NVME spec, failure of an individual command requires + * the connection to be terminated, which in turn requires the + * association to be terminated. */ fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, @@ -1359,6 +1368,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) goto done; } + terminate_assoc = false; + done: if (op->flags & FCOP_FLAGS_AEN) { nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); @@ -1366,7 +1377,7 @@ done: atomic_set(&op->state, FCPOP_STATE_IDLE); op->flags = FCOP_FLAGS_AEN; /* clear other flags */ nvme_fc_ctrl_put(ctrl); - return; + goto check_error; } complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); @@ -1379,6 +1390,10 @@ done: nvme_end_request(rq, status, result); } else __nvme_fc_final_op_cleanup(rq); + +check_error: + if (terminate_assoc) + nvme_fc_error_recovery(ctrl, "transport detected io error"); } static int From 24b7f0592f738a1127c72dbf5b72a83997dd6997 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 5 Jun 2017 15:03:42 -0700 Subject: [PATCH 09/15] nvme-fc: fix missing put reference on controller create failure The failure case, of a create controller request, called nvme_uninit_ctrl() but didn't do a put to allow the nvme controller to be deleted. Signed-off-by: James Smart Signed-off-by: Christoph Hellwig --- drivers/nvme/host/fc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 2edae54688e8..92964cef0f4b 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2806,6 +2806,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ctrl->ctrl.opts = NULL; /* initiate nvme ctrl ref counting teardown */ nvme_uninit_ctrl(&ctrl->ctrl); + nvme_put_ctrl(&ctrl->ctrl); /* as we're past the point where we transition to the ref * counting teardown path, if we return a bad pointer here, From da87591bea92204fcb921bac927666eb7141908e Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Wed, 7 Jun 2017 15:25:42 +0800 Subject: [PATCH 10/15] nvme: only consider exit latency when choosing useful non-op power states When a NVMe is in non-op states, the latency is exlat. The latency will be enlat + exlat only when the NVMe tries to transit from operational state right atfer it begins to transit to non-operational state, which should be a rare case. Therefore, as Andy Lutomirski suggests, use exlat only when deciding power states to trainsit to. Signed-off-by: Kai-Heng Feng Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0f9cc0c55e15..c07d8d4e18c9 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1342,7 +1342,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) * transitioning between power states. Therefore, when running * in any given state, we will enter the next lower-power * non-operational state after waiting 50 * (enlat + exlat) - * microseconds, as long as that state's total latency is under + * microseconds, as long as that state's exit latency is under * the requested maximum latency. * * We will not autonomously enter any non-operational state for @@ -1387,7 +1387,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) * lowest-power state, not the number of states. */ for (state = (int)ctrl->npss; state >= 0; state--) { - u64 total_latency_us, transition_ms; + u64 total_latency_us, exit_latency_us, transition_ms; if (target) table->entries[state] = target; @@ -1408,12 +1408,15 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) NVME_PS_FLAGS_NON_OP_STATE)) continue; - total_latency_us = - (u64)le32_to_cpu(ctrl->psd[state].entry_lat) + - + le32_to_cpu(ctrl->psd[state].exit_lat); - if (total_latency_us > ctrl->ps_max_latency_us) + exit_latency_us = + (u64)le32_to_cpu(ctrl->psd[state].exit_lat); + if (exit_latency_us > ctrl->ps_max_latency_us) continue; + total_latency_us = + exit_latency_us + + le32_to_cpu(ctrl->psd[state].entry_lat); + /* * This state is good. Use it as the APST idle * target for higher power states. From 9947d6a09cd71937dade2fc14640e4843ae19802 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Wed, 7 Jun 2017 15:25:43 +0800 Subject: [PATCH 11/15] nvme: relax APST default max latency to 100ms Christoph Hellwig suggests we should to make APST work out of the box. Hence relax the the default max latency to make them able to enter deepest power state on default. Here are id-ctrl excerpts from two high latency NVMes: vid : 0x14a4 ssvid : 0x1b4b mn : CX2-GB1024-Q11 NVMe LITEON 1024GB ps 3 : mp:0.1000W non-operational enlat:5000 exlat:5000 rrt:3 rrl:3 rwt:3 rwl:3 idle_power:- active_power:- ps 4 : mp:0.0100W non-operational enlat:50000 exlat:100000 rrt:4 rrl:4 rwt:4 rwl:4 idle_power:- active_power:- vid : 0x15b7 ssvid : 0x1b4b mn : A400 NVMe SanDisk 512GB ps 3 : mp:0.0500W non-operational enlat:51000 exlat:10000 rrt:0 rrl:0 rwt:0 rwl:0 idle_power:- active_power:- ps 4 : mp:0.0055W non-operational enlat:1000000 exlat:100000 rrt:0 rrl:0 rwt:0 rwl:0 idle_power:- active_power:- Signed-off-by: Kai-Heng Feng Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c07d8d4e18c9..903d5813023a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -56,7 +56,7 @@ MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); static int nvme_char_major; module_param(nvme_char_major, int, 0); -static unsigned long default_ps_max_latency_us = 25000; +static unsigned long default_ps_max_latency_us = 100000; module_param(default_ps_max_latency_us, ulong, 0644); MODULE_PARM_DESC(default_ps_max_latency_us, "max power saving latency for new devices; use PM QOS to change per device"); From a41b816c174409417d91b4ceef0145c9f0bef67c Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Wed, 7 Jun 2017 11:36:14 +0800 Subject: [PATCH 12/15] blk-throttle: fix NULL pointer dereference in throtl_schedule_pending_timer I have encountered a NULL pointer dereference in throtl_schedule_pending_timer: [ 413.735396] BUG: unable to handle kernel NULL pointer dereference at 0000000000000038 [ 413.735535] IP: [] throtl_schedule_pending_timer+0x3f/0x210 [ 413.735643] PGD 22c8cf067 PUD 22cb34067 PMD 0 [ 413.735713] Oops: 0000 [#1] SMP ...... This is caused by the following case: blk_throtl_bio throtl_schedule_next_dispatch <= sq is top level one without parent throtl_schedule_pending_timer sq_to_tg(sq)->td->throtl_slice <= sq_to_tg(sq) returns NULL Fix it by using sq_to_td instead of sq_to_tg(sq)->td, which will always return a valid td. Fixes: 297e3d854784 ("blk-throttle: make throtl_slice tunable") Signed-off-by: Joseph Qi Reviewed-by: Shaohua Li Signed-off-by: Jens Axboe --- block/blk-throttle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-throttle.c b/block/blk-throttle.c index fc13dd0c6e39..3b751f706c61 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -698,7 +698,7 @@ static void throtl_dequeue_tg(struct throtl_grp *tg) static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, unsigned long expires) { - unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice; + unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice; /* * Since we are adjusting the throttle limit dynamically, the sleep From 6679a90c4b0dc2563383df1fe0eb170736952a2e Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Tue, 6 Jun 2017 12:40:43 -0700 Subject: [PATCH 13/15] blk-throttle: set default latency baseline for harddisk hard disk IO latency varies a lot depending on spindle move. The latency range could be from several microseconds to several milliseconds. It's pretty hard to get the baseline latency used by io.low. We will use a different stragety here. The idea is only using IO with spindle move to determine if cgroup IO is in good state. For HD, if io latency is small (< 1ms), we ignore the IO. Such IO is likely from sequential IO, and is helpless to help determine if a cgroup's IO is impacted by other cgroups. With this, we only account IO with big latency. Then we can choose a hardcoded baseline latency for HD (4ms, which is typical IO latency with seek). With all these settings, the io.low latency works for both HD and SSD. Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe --- block/blk-throttle.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 3b751f706c61..a7285bf2831c 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -27,6 +27,13 @@ static int throtl_quantum = 32; #define MIN_THROTL_IOPS (10) #define DFL_LATENCY_TARGET (-1L) #define DFL_IDLE_THRESHOLD (0) +#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */ +#define LATENCY_FILTERED_SSD (0) +/* + * For HD, very small latency comes from sequential IO. Such IO is helpless to + * help determine if its IO is impacted by others, hence we ignore the IO + */ +#define LATENCY_FILTERED_HD (1000L) /* 1ms */ #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT) @@ -212,6 +219,7 @@ struct throtl_data struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE]; struct latency_bucket __percpu *latency_buckets; unsigned long last_calculate_time; + unsigned long filtered_latency; bool track_bio_latency; }; @@ -2281,7 +2289,7 @@ void blk_throtl_bio_endio(struct bio *bio) throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat), bio_op(bio), lat); - if (tg->latency_target) { + if (tg->latency_target && lat >= tg->td->filtered_latency) { int bucket; unsigned int threshold; @@ -2417,14 +2425,20 @@ void blk_throtl_exit(struct request_queue *q) void blk_throtl_register_queue(struct request_queue *q) { struct throtl_data *td; + int i; td = q->td; BUG_ON(!td); - if (blk_queue_nonrot(q)) + if (blk_queue_nonrot(q)) { td->throtl_slice = DFL_THROTL_SLICE_SSD; - else + td->filtered_latency = LATENCY_FILTERED_SSD; + } else { td->throtl_slice = DFL_THROTL_SLICE_HD; + td->filtered_latency = LATENCY_FILTERED_HD; + for (i = 0; i < LATENCY_BUCKET_SIZE; i++) + td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY; + } #ifndef CONFIG_BLK_DEV_THROTTLING_LOW /* if no low limit, use previous default */ td->throtl_slice = DFL_THROTL_SLICE_HD; From 6460495709aeb651896bc8e5c134b2e4ca7d34a8 Mon Sep 17 00:00:00 2001 From: James Wang Date: Thu, 8 Jun 2017 14:52:51 +0800 Subject: [PATCH 14/15] Fix loop device flush before configure v3 While installing SLES-12 (based on v4.4), I found that the installer will stall for 60+ seconds during LVM disk scan. The root cause was determined to be the removal of a bound device check in loop_flush() by commit b5dd2f6047ca ("block: loop: improve performance via blk-mq"). Restoring this check, examining ->lo_state as set by loop_set_fd() eliminates the bad behavior. Test method: modprobe loop max_loop=64 dd if=/dev/zero of=disk bs=512 count=200K for((i=0;i<4;i++))do losetup -f disk; done mkfs.ext4 -F /dev/loop0 for((i=0;i<4;i++))do mkdir t$i; mount /dev/loop$i t$i;done for f in `ls /dev/loop[0-9]*|sort`; do \ echo $f; dd if=$f of=/dev/null bs=512 count=1; \ done Test output: stock patched /dev/loop0 18.1217e-05 8.3842e-05 /dev/loop1 6.1114e-05 0.000147979 /dev/loop10 0.414701 0.000116564 /dev/loop11 0.7474 6.7942e-05 /dev/loop12 0.747986 8.9082e-05 /dev/loop13 0.746532 7.4799e-05 /dev/loop14 0.480041 9.3926e-05 /dev/loop15 1.26453 7.2522e-05 Note that from loop10 onward, the device is not mounted, yet the stock kernel consumes several orders of magnitude more wall time than it does for a mounted device. (Thanks for Mike Galbraith , give a changelog review.) Reviewed-by: Hannes Reinecke Reviewed-by: Ming Lei Signed-off-by: James Wang Fixes: b5dd2f6047ca ("block: loop: improve performance via blk-mq") Signed-off-by: Jens Axboe --- drivers/block/loop.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 28d932906f24..ebbd0c3fe0ed 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -608,6 +608,9 @@ static int loop_switch(struct loop_device *lo, struct file *file) */ static int loop_flush(struct loop_device *lo) { + /* loop not yet configured, no running thread, nothing to flush */ + if (lo->lo_state != Lo_bound) + return 0; return loop_switch(lo, NULL); } From 8f9bebc33dd718283183582fc4a762e178552fb8 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Mon, 5 Jun 2017 10:11:15 +0200 Subject: [PATCH 15/15] block, bfq: access and cache blkg data only when safe In blk-cgroup, operations on blkg objects are protected with the request_queue lock. This is no more the lock that protects I/O-scheduler operations in blk-mq. In fact, the latter are now protected with a finer-grained per-scheduler-instance lock. As a consequence, although blkg lookups are also rcu-protected, blk-mq I/O schedulers may see inconsistent data when they access blkg and blkg-related objects. BFQ does access these objects, and does incur this problem, in the following case. The blkg_lookup performed in bfq_get_queue, being protected (only) through rcu, may happen to return the address of a copy of the original blkg. If this is the case, then the blkg_get performed in bfq_get_queue, to pin down the blkg, is useless: it does not prevent blk-cgroup code from destroying both the original blkg and all objects directly or indirectly referred by the copy of the blkg. BFQ accesses these objects, which typically causes a crash for NULL-pointer dereference of memory-protection violation. Some additional protection mechanism should be added to blk-cgroup to address this issue. In the meantime, this commit provides a quick temporary fix for BFQ: cache (when safe) blkg data that might disappear right after a blkg_lookup. In particular, this commit exploits the following facts to achieve its goal without introducing further locks. Destroy operations on a blkg invoke, as a first step, hooks of the scheduler associated with the blkg. And these hooks are executed with bfqd->lock held for BFQ. As a consequence, for any blkg associated with the request queue an instance of BFQ is attached to, we are guaranteed that such a blkg is not destroyed, and that all the pointers it contains are consistent, while that instance is holding its bfqd->lock. A blkg_lookup performed with bfqd->lock held then returns a fully consistent blkg, which remains consistent until this lock is held. In more detail, this holds even if the returned blkg is a copy of the original one. Finally, also the object describing a group inside BFQ needs to be protected from destruction on the blkg_free of the original blkg (which invokes bfq_pd_free). This commit adds private refcounting for this object, to let it disappear only after no bfq_queue refers to it any longer. This commit also removes or updates some stale comments on locking issues related to blk-cgroup operations. Reported-by: Tomas Konir Reported-by: Lee Tibbert Reported-by: Marco Piazza Signed-off-by: Paolo Valente Tested-by: Tomas Konir Tested-by: Lee Tibbert Tested-by: Marco Piazza Signed-off-by: Jens Axboe --- block/bfq-cgroup.c | 116 +++++++++++++++++++++++++++++++++++--------- block/bfq-iosched.c | 2 +- block/bfq-iosched.h | 23 +++++---- 3 files changed, 105 insertions(+), 36 deletions(-) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index c8a32fb345cf..78b2e0db4fb2 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -52,7 +52,7 @@ BFQG_FLAG_FNS(idling) BFQG_FLAG_FNS(empty) #undef BFQG_FLAG_FNS -/* This should be called with the queue_lock held. */ +/* This should be called with the scheduler lock held. */ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) { unsigned long long now; @@ -67,7 +67,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) bfqg_stats_clear_waiting(stats); } -/* This should be called with the queue_lock held. */ +/* This should be called with the scheduler lock held. */ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, struct bfq_group *curr_bfqg) { @@ -81,7 +81,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, bfqg_stats_mark_waiting(stats); } -/* This should be called with the queue_lock held. */ +/* This should be called with the scheduler lock held. */ static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { unsigned long long now; @@ -203,12 +203,30 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq) static void bfqg_get(struct bfq_group *bfqg) { - return blkg_get(bfqg_to_blkg(bfqg)); + bfqg->ref++; } void bfqg_put(struct bfq_group *bfqg) { - return blkg_put(bfqg_to_blkg(bfqg)); + bfqg->ref--; + + if (bfqg->ref == 0) + kfree(bfqg); +} + +static void bfqg_and_blkg_get(struct bfq_group *bfqg) +{ + /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ + bfqg_get(bfqg); + + blkg_get(bfqg_to_blkg(bfqg)); +} + +void bfqg_and_blkg_put(struct bfq_group *bfqg) +{ + bfqg_put(bfqg); + + blkg_put(bfqg_to_blkg(bfqg)); } void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, @@ -312,7 +330,11 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) if (bfqq) { bfqq->ioprio = bfqq->new_ioprio; bfqq->ioprio_class = bfqq->new_ioprio_class; - bfqg_get(bfqg); + /* + * Make sure that bfqg and its associated blkg do not + * disappear before entity. + */ + bfqg_and_blkg_get(bfqg); } entity->parent = bfqg->my_entity; /* NULL for root group */ entity->sched_data = &bfqg->sched_data; @@ -399,6 +421,8 @@ struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) return NULL; } + /* see comments in bfq_bic_update_cgroup for why refcounting */ + bfqg_get(bfqg); return &bfqg->pd; } @@ -426,7 +450,7 @@ void bfq_pd_free(struct blkg_policy_data *pd) struct bfq_group *bfqg = pd_to_bfqg(pd); bfqg_stats_exit(&bfqg->stats); - return kfree(bfqg); + bfqg_put(bfqg); } void bfq_pd_reset_stats(struct blkg_policy_data *pd) @@ -496,9 +520,10 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, * Move @bfqq to @bfqg, deactivating it from its old group and reactivating * it on the new one. Avoid putting the entity on the old group idle tree. * - * Must be called under the queue lock; the cgroup owning @bfqg must - * not disappear (by now this just means that we are called under - * rcu_read_lock()). + * Must be called under the scheduler lock, to make sure that the blkg + * owning @bfqg does not disappear (see comments in + * bfq_bic_update_cgroup on guaranteeing the consistency of blkg + * objects). */ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct bfq_group *bfqg) @@ -519,16 +544,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_deactivate_bfqq(bfqd, bfqq, false, false); else if (entity->on_st) bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); - bfqg_put(bfqq_group(bfqq)); + bfqg_and_blkg_put(bfqq_group(bfqq)); - /* - * Here we use a reference to bfqg. We don't need a refcounter - * as the cgroup reference will not be dropped, so that its - * destroy() callback will not be invoked. - */ entity->parent = bfqg->my_entity; entity->sched_data = &bfqg->sched_data; - bfqg_get(bfqg); + /* pin down bfqg and its associated blkg */ + bfqg_and_blkg_get(bfqg); if (bfq_bfqq_busy(bfqq)) { bfq_pos_tree_add_move(bfqd, bfqq); @@ -545,8 +566,9 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, * @bic: the bic to move. * @blkcg: the blk-cgroup to move to. * - * Move bic to blkcg, assuming that bfqd->queue is locked; the caller - * has to make sure that the reference to cgroup is valid across the call. + * Move bic to blkcg, assuming that bfqd->lock is held; which makes + * sure that the reference to cgroup is valid across the call (see + * comments in bfq_bic_update_cgroup on this issue) * * NOTE: an alternative approach might have been to store the current * cgroup in bfqq and getting a reference to it, reducing the lookup @@ -604,6 +626,57 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) goto out; bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); + /* + * Update blkg_path for bfq_log_* functions. We cache this + * path, and update it here, for the following + * reasons. Operations on blkg objects in blk-cgroup are + * protected with the request_queue lock, and not with the + * lock that protects the instances of this scheduler + * (bfqd->lock). This exposes BFQ to the following sort of + * race. + * + * The blkg_lookup performed in bfq_get_queue, protected + * through rcu, may happen to return the address of a copy of + * the original blkg. If this is the case, then the + * bfqg_and_blkg_get performed in bfq_get_queue, to pin down + * the blkg, is useless: it does not prevent blk-cgroup code + * from destroying both the original blkg and all objects + * directly or indirectly referred by the copy of the + * blkg. + * + * On the bright side, destroy operations on a blkg invoke, as + * a first step, hooks of the scheduler associated with the + * blkg. And these hooks are executed with bfqd->lock held for + * BFQ. As a consequence, for any blkg associated with the + * request queue this instance of the scheduler is attached + * to, we are guaranteed that such a blkg is not destroyed, and + * that all the pointers it contains are consistent, while we + * are holding bfqd->lock. A blkg_lookup performed with + * bfqd->lock held then returns a fully consistent blkg, which + * remains consistent until this lock is held. + * + * Thanks to the last fact, and to the fact that: (1) bfqg has + * been obtained through a blkg_lookup in the above + * assignment, and (2) bfqd->lock is being held, here we can + * safely use the policy data for the involved blkg (i.e., the + * field bfqg->pd) to get to the blkg associated with bfqg, + * and then we can safely use any field of blkg. After we + * release bfqd->lock, even just getting blkg through this + * bfqg may cause dangling references to be traversed, as + * bfqg->pd may not exist any more. + * + * In view of the above facts, here we cache, in the bfqg, any + * blkg data we may need for this bic, and for its associated + * bfq_queue. As of now, we need to cache only the path of the + * blkg, which is used in the bfq_log_* functions. + * + * Finally, note that bfqg itself needs to be protected from + * destruction on the blkg_free of the original blkg (which + * invokes bfq_pd_free). We use an additional private + * refcounter for bfqg, to let it disappear only after no + * bfq_queue refers to it any longer. + */ + blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); bic->blkcg_serial_nr = serial_nr; out: rcu_read_unlock(); @@ -640,8 +713,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, * @bfqd: the device data structure with the root group. * @bfqg: the group to move from. * @st: the service tree with the entities. - * - * Needs queue_lock to be taken and reference to be valid over the call. */ static void bfq_reparent_active_entities(struct bfq_data *bfqd, struct bfq_group *bfqg, @@ -692,8 +763,7 @@ void bfq_pd_offline(struct blkg_policy_data *pd) /* * The idle tree may still contain bfq_queues belonging * to exited task because they never migrated to a different - * cgroup from the one being destroyed now. No one else - * can access them so it's safe to act without any lock. + * cgroup from the one being destroyed now. */ bfq_flush_idle_tree(st); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 08ce45096350..ed93da2462ab 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -3665,7 +3665,7 @@ void bfq_put_queue(struct bfq_queue *bfqq) kmem_cache_free(bfq_pool, bfqq); #ifdef CONFIG_BFQ_GROUP_IOSCHED - bfqg_put(bfqg); + bfqg_and_blkg_put(bfqg); #endif } diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index ae783c06dfd9..5c3bf9861492 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -759,6 +759,12 @@ struct bfq_group { /* must be the first member */ struct blkg_policy_data pd; + /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */ + char blkg_path[128]; + + /* reference counter (see comments in bfq_bic_update_cgroup) */ + int ref; + struct bfq_entity entity; struct bfq_sched_data sched_data; @@ -838,7 +844,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); struct bfq_group *bfqq_group(struct bfq_queue *bfqq); struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); -void bfqg_put(struct bfq_group *bfqg); +void bfqg_and_blkg_put(struct bfq_group *bfqg); #ifdef CONFIG_BFQ_GROUP_IOSCHED extern struct cftype bfq_blkcg_legacy_files[]; @@ -910,20 +916,13 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq); struct bfq_group *bfqq_group(struct bfq_queue *bfqq); #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ - char __pbuf[128]; \ - \ - blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ - blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \ + blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ - __pbuf, ##args); \ + bfqq_group(bfqq)->blkg_path, ##args); \ } while (0) -#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ - char __pbuf[128]; \ - \ - blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ - blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \ -} while (0) +#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \ + blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args) #else /* CONFIG_BFQ_GROUP_IOSCHED */