From 072792dcdfc8d5f91a26050e5665285f50afebf5 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Thu, 11 May 2017 06:14:16 -0400 Subject: [PATCH 01/15] dm cache: fix incorrect 'idle_time' reset in IO tracker Some bios have no payload (eg, a FLUSH), don't reset the idle_time when these come in. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer --- drivers/md/dm-cache-target.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1db375f50a13..0760ba409c21 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -94,6 +94,9 @@ static void iot_io_begin(struct io_tracker *iot, sector_t len) static void __iot_io_end(struct io_tracker *iot, sector_t len) { + if (!len) + return; + iot->in_flight -= len; if (!iot->in_flight) iot->idle_time = jiffies; From a8cd1eba6135e086109e2b94bf96deb17456ede8 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Thu, 11 May 2017 05:07:34 -0400 Subject: [PATCH 02/15] dm cache policy smq: only demote entries in bottom half of the clean multiqueue Heavy IO load may mean there are very few clean blocks in the cache, and we risk demoting entries that get hit a lot. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer --- drivers/md/dm-cache-policy-smq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index 72479bd61e11..a177559f2049 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -1190,7 +1190,7 @@ static void queue_demotion(struct smq_policy *mq) if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed))) return; - e = q_peek(&mq->clean, mq->clean.nr_levels, true); + e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); if (!e) { if (!clean_target_met(mq, false)) queue_writeback(mq); From 78c45607b909fb384c47c134d89b39285a6a8b45 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Thu, 11 May 2017 05:09:38 -0400 Subject: [PATCH 03/15] dm cache policy smq: be more aggressive about triggering a writeback If there are no clean entries to demote we really want to writeback immediately. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer --- drivers/md/dm-cache-policy-smq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index a177559f2049..5aa8f43856c5 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -1192,7 +1192,7 @@ static void queue_demotion(struct smq_policy *mq) e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); if (!e) { - if (!clean_target_met(mq, false)) + if (!clean_target_met(mq, true)) queue_writeback(mq); return; } From 4d44ec5ab751be63c5d348f13294304d87baa8c3 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Thu, 11 May 2017 05:11:06 -0400 Subject: [PATCH 04/15] dm cache policy smq: put newly promoted entries at the top of the multiqueue This stops entries bouncing in and out of the cache quickly. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer --- drivers/md/dm-cache-policy-smq.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index 5aa8f43856c5..54421a846a0c 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -1452,6 +1452,7 @@ static void __complete_background_work(struct smq_policy *mq, clear_pending(mq, e); if (success) { e->oblock = work->oblock; + e->level = NR_CACHE_LEVELS - 1; push(mq, e); // h, q, a } else { From 6cf4cc8f8b3b7bc9e3c04a7eab44b985d50029fc Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Thu, 11 May 2017 07:48:18 -0400 Subject: [PATCH 05/15] dm cache policy smq: stop preemptively demoting blocks It causes a lot of churn if the working set's size is close to the fast device's size. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer --- drivers/md/dm-cache-policy-smq.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index 54421a846a0c..758480a1893d 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -1134,13 +1134,10 @@ static bool clean_target_met(struct smq_policy *mq, bool idle) percent_to_target(mq, CLEAN_TARGET); } -static bool free_target_met(struct smq_policy *mq, bool idle) +static bool free_target_met(struct smq_policy *mq) { unsigned nr_free; - if (!idle) - return true; - nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= percent_to_target(mq, FREE_TARGET); @@ -1220,7 +1217,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock, * We always claim to be 'idle' to ensure some demotions happen * with continuous loads. */ - if (!free_target_met(mq, true)) + if (!free_target_met(mq)) queue_demotion(mq); return; } @@ -1421,14 +1418,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle, spin_lock_irqsave(&mq->lock, flags); r = btracker_issue(mq->bg_work, result); if (r == -ENODATA) { - /* find some writeback work to do */ - if (mq->migrations_allowed && !free_target_met(mq, idle)) - queue_demotion(mq); - - else if (!clean_target_met(mq, idle)) + if (!clean_target_met(mq, idle)) { queue_writeback(mq); - - r = btracker_issue(mq->bg_work, result); + r = btracker_issue(mq->bg_work, result); + } } spin_unlock_irqrestore(&mq->lock, flags); From 701e03e4e180f0cd97d4139a32e2b2d879d12da2 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Thu, 11 May 2017 08:22:31 -0400 Subject: [PATCH 06/15] dm cache: track all IO to the cache rather than just the origin device's IO IO tracking used to throttle writebacks when the origin device is busy. Even if all the IO is going to the fast device, writebacks can significantly degrade performance. So track all IO to gauge whether the cache is busy or not. Otherwise, synthetic IO tests (e.g. fio) that might send all IO to the fast device wouldn't cause writebacks to get throttled. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer --- drivers/md/dm-cache-target.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 0760ba409c21..232078e48167 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -477,7 +477,7 @@ struct cache { spinlock_t invalidation_lock; struct list_head invalidation_requests; - struct io_tracker origin_tracker; + struct io_tracker tracker; struct work_struct commit_ws; struct batcher committer; @@ -904,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) static bool accountable_bio(struct cache *cache, struct bio *bio) { - return ((bio->bi_bdev == cache->origin_dev->bdev) && - bio_op(bio) != REQ_OP_DISCARD); + return bio_op(bio) != REQ_OP_DISCARD; } static void accounted_begin(struct cache *cache, struct bio *bio) @@ -915,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio) if (accountable_bio(cache, bio)) { pb->len = bio_sectors(bio); - iot_io_begin(&cache->origin_tracker, pb->len); + iot_io_begin(&cache->tracker, pb->len); } } @@ -924,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio) size_t pb_data_size = get_per_bio_data_size(cache); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); - iot_io_end(&cache->origin_tracker, pb->len); + iot_io_end(&cache->tracker, pb->len); } static void accounted_request(struct cache *cache, struct bio *bio) @@ -1725,7 +1724,7 @@ enum busy { static enum busy spare_migration_bandwidth(struct cache *cache) { - bool idle = iot_idle_for(&cache->origin_tracker, HZ); + bool idle = iot_idle_for(&cache->tracker, HZ); sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * cache->sectors_per_block; @@ -2720,7 +2719,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) batcher_init(&cache->committer, commit_op, cache, issue_op, cache, cache->wq); - iot_init(&cache->origin_tracker); + iot_init(&cache->tracker); init_rwsem(&cache->background_work_lock); prevent_background_work(cache); @@ -2944,7 +2943,7 @@ static void cache_postsuspend(struct dm_target *ti) cancel_delayed_work(&cache->waker); flush_workqueue(cache->wq); - WARN_ON(cache->origin_tracker.in_flight); + WARN_ON(cache->tracker.in_flight); /* * If it's a flush suspend there won't be any deferred bios, so this From 49b7f768900f4084a65c3689d955b2fceac39e53 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Thu, 11 May 2017 09:07:16 -0400 Subject: [PATCH 07/15] dm cache: simplify the IDLE vs BUSY state calculation Drop the MODERATE state since it wasn't buying us much. Also, in check_migrations(), prepare for the next commit ("dm cache policy smq: don't do any writebacks unless IDLE") by deferring to the policy to make the final decision on whether writebacks can be serviced. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer --- drivers/md/dm-cache-target.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 232078e48167..d682a0511381 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -1718,7 +1718,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock, enum busy { IDLE, - MODERATE, BUSY }; @@ -1728,10 +1727,10 @@ static enum busy spare_migration_bandwidth(struct cache *cache) sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * cache->sectors_per_block; - if (current_volume <= cache->migration_threshold) - return idle ? IDLE : MODERATE; + if (idle && current_volume <= cache->migration_threshold) + return IDLE; else - return idle ? MODERATE : BUSY; + return BUSY; } static void inc_hit_counter(struct cache *cache, struct bio *bio) @@ -2047,8 +2046,6 @@ static void check_migrations(struct work_struct *ws) for (;;) { b = spare_migration_bandwidth(cache); - if (b == BUSY) - break; r = policy_get_background_work(cache->policy, b == IDLE, &op); if (r == -ENODATA) From 2e63309507c818e8b631a03f02c363031c007fb7 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Thu, 11 May 2017 09:09:04 -0400 Subject: [PATCH 08/15] dm cache policy smq: don't do any writebacks unless IDLE If there are no clean blocks to be demoted the writeback will be triggered at that point. Preemptively writing back can hurt high IO load scenarios. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer --- drivers/md/dm-cache-policy-smq.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index 758480a1893d..e5eb9c9b4bc8 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -1120,8 +1120,6 @@ static bool clean_target_met(struct smq_policy *mq, bool idle) * Cache entries may not be populated. So we cannot rely on the * size of the clean queue. */ - unsigned nr_clean; - if (idle) { /* * We'd like to clean everything. @@ -1129,9 +1127,10 @@ static bool clean_target_met(struct smq_policy *mq, bool idle) return q_size(&mq->dirty) == 0u; } - nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty); - return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >= - percent_to_target(mq, CLEAN_TARGET); + /* + * If we're busy we don't worry about cleaning at all. + */ + return true; } static bool free_target_met(struct smq_policy *mq) From 91bcdb92d39711d1adb40c26b653b7978d93eb98 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Mon, 15 May 2017 09:43:05 -0400 Subject: [PATCH 09/15] dm thin metadata: call precommit before saving the roots These calls were the wrong way round in __write_initial_superblock. Cc: stable@vger.kernel.org Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer --- drivers/md/dm-thin-metadata.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 0f0251d0d337..d31d18d9727c 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -484,11 +484,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd) if (r < 0) return r; - r = save_sm_roots(pmd); + r = dm_tm_pre_commit(pmd->tm); if (r < 0) return r; - r = dm_tm_pre_commit(pmd->tm); + r = save_sm_roots(pmd); if (r < 0) return r; From 0377a07c7a035e0d033cd8b29f0cb15244c0916a Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Mon, 15 May 2017 09:45:40 -0400 Subject: [PATCH 10/15] dm space map disk: fix some book keeping in the disk space map When decrementing the reference count for a block, the free count wasn't being updated if the reference count went to zero. Cc: stable@vger.kernel.org Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer --- drivers/md/persistent-data/dm-space-map-disk.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index ebb280a14325..32adf6b4a9c7 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b) static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) { + int r; + uint32_t old_count; enum allocation_event ev; struct sm_disk *smd = container_of(sm, struct sm_disk, sm); - return sm_ll_dec(&smd->ll, b, &ev); + r = sm_ll_dec(&smd->ll, b, &ev); + if (!r && (ev == SM_FREE)) { + /* + * It's only free if it's also free in the last + * transaction. + */ + r = sm_ll_lookup(&smd->old_ll, b, &old_count); + if (!r && !old_count) + smd->nr_allocated_this_transaction--; + } + + return r; } static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) From ece0728037b15f4d31198f12b359104bcb5db4c8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 15 May 2017 17:28:36 +0200 Subject: [PATCH 11/15] dm rq: add a missing break to map_request We don't want to bug when receiving a DM_MAPIO_KILL value.. Fixes: 412445ac ("dm: introduce a new DM_MAPIO_KILL return value") Signed-off-by: Christoph Hellwig Signed-off-by: Mike Snitzer --- drivers/md/dm-rq.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 2af27026aa2e..b639fa7246ee 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -507,6 +507,7 @@ static int map_request(struct dm_rq_target_io *tio) case DM_MAPIO_KILL: /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, -EIO); + break; default: DMWARN("unimplemented target map return value: %d", r); BUG(); From 18a482f5245cc875755090853e84283512b3e6bd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 15 May 2017 17:28:37 +0200 Subject: [PATCH 12/15] dm mpath: don't return -EIO from dm_report_EIO Instead just turn the macro into a helper for the warning message. This removes an unnecessary assignment and will allow the next commit to fix a place where -EIO is the wrong return value. Signed-off-by: Christoph Hellwig Signed-off-by: Mike Snitzer --- drivers/md/dm-mpath.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 926a6bcb32c8..d55454f98b59 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -447,7 +447,7 @@ failed: * it has been invoked. */ #define dm_report_EIO(m) \ -({ \ +do { \ struct mapped_device *md = dm_table_get_md((m)->ti->table); \ \ pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \ @@ -455,8 +455,7 @@ failed: test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ dm_noflush_suspending((m)->ti)); \ - -EIO; \ -}) +} while (0) /* * Map cloned requests (request-based multipath) @@ -481,7 +480,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, if (!pgpath) { if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) return DM_MAPIO_DELAY_REQUEUE; - return dm_report_EIO(m); /* Failed */ + dm_report_EIO(m); /* Failed */ + return -EIO; } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { if (pg_init_all_paths(m)) @@ -558,7 +558,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m if (!pgpath) { if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) return DM_MAPIO_REQUEUE; - return dm_report_EIO(m); + dm_report_EIO(m); + return -EIO; } mpio->pgpath = pgpath; @@ -1493,7 +1494,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, if (atomic_read(&m->nr_valid_paths) == 0 && !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { if (error == -EIO) - error = dm_report_EIO(m); + dm_report_EIO(m); /* complete with the original error */ r = DM_ENDIO_DONE; } @@ -1524,8 +1525,10 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone, fail_path(mpio->pgpath); if (atomic_read(&m->nr_valid_paths) == 0 && - !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) - return dm_report_EIO(m); + !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { + dm_report_EIO(m); + return -EIO; + } /* Queue for the daemon to resubmit */ dm_bio_restore(get_bio_details_from_bio(clone), clone); From f98e0eb68008aff9824d1c4dad7276c8bab83ca5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 15 May 2017 17:28:38 +0200 Subject: [PATCH 13/15] dm mpath: multipath_clone_and_map must not return -EIO Since 412445ac ("dm: introduce a new DM_MAPIO_KILL return value"), the clone_and_map_rq methods must not return errno values, so fix it up to properly return DM_MAPIO_KILL, instead of the -EIO value that snuck in due to a conflict between two patches. Signed-off-by: Christoph Hellwig Signed-off-by: Mike Snitzer --- drivers/md/dm-mpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index d55454f98b59..3df056b73b66 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -481,7 +481,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) return DM_MAPIO_DELAY_REQUEUE; dm_report_EIO(m); /* Failed */ - return -EIO; + return DM_MAPIO_KILL; } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { if (pg_init_all_paths(m)) From 13840d38016203f0095cd547b90352812d24b787 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sun, 30 Apr 2017 17:32:28 -0400 Subject: [PATCH 14/15] dm bufio: make the parameter "retain_bytes" unsigned long Change the type of the parameter "retain_bytes" from unsigned to unsigned long, so that on 64-bit machines the user can set more than 4GiB of data to be retained. Also, change the type of the variable "count" in the function "__evict_old_buffers" to unsigned long. The assignment "count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];" could result in unsigned long to unsigned overflow and that could result in buffers not being freed when they should. While at it, avoid division in get_retain_buffers(). Division is slow, we can change it to shift because we have precalculated the log2 of block size. Cc: stable@vger.kernel.org Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-bufio.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 5db11a405129..cd8139593ccd 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -218,7 +218,7 @@ static DEFINE_SPINLOCK(param_spinlock); * Buffers are freed after this timeout */ static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; -static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; +static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; static unsigned long dm_bufio_peak_allocated; static unsigned long dm_bufio_allocated_kmem_cache; @@ -1558,10 +1558,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) return true; } -static unsigned get_retain_buffers(struct dm_bufio_client *c) +static unsigned long get_retain_buffers(struct dm_bufio_client *c) { - unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); - return retain_bytes / c->block_size; + unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); + return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); } static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, @@ -1571,7 +1571,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, struct dm_buffer *b, *tmp; unsigned long freed = 0; unsigned long count = nr_to_scan; - unsigned retain_target = get_retain_buffers(c); + unsigned long retain_target = get_retain_buffers(c); for (l = 0; l < LIST_SIZE; l++) { list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { @@ -1794,8 +1794,8 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz) static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) { struct dm_buffer *b, *tmp; - unsigned retain_target = get_retain_buffers(c); - unsigned count; + unsigned long retain_target = get_retain_buffers(c); + unsigned long count; LIST_HEAD(write_list); dm_bufio_lock(c); @@ -1955,7 +1955,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache"); module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); -module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR); +module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); From 7e1b9521f5a8356553f5e58b07952bf346632ea4 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 11 Mar 2017 19:09:45 +0000 Subject: [PATCH 15/15] dm cache: handle kmalloc failure allocating background_tracker struct Currently there is no kmalloc failure check on the allocation of the background_tracker struct in btracker_create(), and so a NULL return will lead to a NULL pointer dereference. Add a NULL check. Detected by CoverityScan, CID#1416587 ("Dereference null return value") Fixes: b29d4986d ("dm cache: significant rework to leverage dm-bio-prison-v2") Signed-off-by: Colin Ian King Signed-off-by: Mike Snitzer --- drivers/md/dm-cache-background-tracker.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c index 9b1afdfb13f0..707233891291 100644 --- a/drivers/md/dm-cache-background-tracker.c +++ b/drivers/md/dm-cache-background-tracker.c @@ -33,6 +33,11 @@ struct background_tracker *btracker_create(unsigned max_work) { struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); + if (!b) { + DMERR("couldn't create background_tracker"); + return NULL; + } + b->max_work = max_work; atomic_set(&b->pending_promotes, 0); atomic_set(&b->pending_writebacks, 0);