2019-08-03 01:02:50 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2017-06-07 09:55:39 +03:00
/*
* Copyright ( C ) 2017 Western Digital Corporation or its affiliates .
*
* This file is released under the GPL .
*/
# include "dm-zoned.h"
# include <linux/module.h>
# define DM_MSG_PREFIX "zoned reclaim"
struct dmz_reclaim {
struct dmz_metadata * metadata ;
struct delayed_work work ;
struct workqueue_struct * wq ;
struct dm_kcopyd_client * kc ;
struct dm_kcopyd_throttle kc_throttle ;
int kc_err ;
2020-06-02 14:09:50 +03:00
int dev_idx ;
2017-06-07 09:55:39 +03:00
unsigned long flags ;
/* Last target access time */
unsigned long atime ;
} ;
/*
* Reclaim state flags .
*/
enum {
DMZ_RECLAIM_KCOPY ,
} ;
/*
* Number of seconds of target BIO inactivity to consider the target idle .
*/
2019-08-11 00:43:11 +03:00
# define DMZ_IDLE_PERIOD (10UL * HZ)
2017-06-07 09:55:39 +03:00
/*
* Percentage of unmapped ( free ) random zones below which reclaim starts
* even if the target is busy .
*/
2020-05-19 11:14:20 +03:00
# define DMZ_RECLAIM_LOW_UNMAP_ZONES 30
2017-06-07 09:55:39 +03:00
/*
* Percentage of unmapped ( free ) random zones above which reclaim will
* stop if the target is busy .
*/
2020-05-19 11:14:20 +03:00
# define DMZ_RECLAIM_HIGH_UNMAP_ZONES 50
2017-06-07 09:55:39 +03:00
/*
* Align a sequential zone write pointer to chunk_block .
*/
static int dmz_reclaim_align_wp ( struct dmz_reclaim * zrc , struct dm_zone * zone ,
sector_t block )
{
struct dmz_metadata * zmd = zrc - > metadata ;
2020-06-02 14:09:48 +03:00
struct dmz_dev * dev = zone - > dev ;
2017-06-07 09:55:39 +03:00
sector_t wp_block = zone - > wp_block ;
unsigned int nr_blocks ;
int ret ;
if ( wp_block = = block )
return 0 ;
if ( wp_block > block )
return - EIO ;
/*
* Zeroout the space between the write
* pointer and the requested position .
*/
nr_blocks = block - wp_block ;
2020-05-11 11:24:24 +03:00
ret = blkdev_issue_zeroout ( dev - > bdev ,
2017-06-07 09:55:39 +03:00
dmz_start_sect ( zmd , zone ) + dmz_blk2sect ( wp_block ) ,
2017-07-24 10:44:37 +03:00
dmz_blk2sect ( nr_blocks ) , GFP_NOIO , 0 ) ;
2017-06-07 09:55:39 +03:00
if ( ret ) {
2020-05-11 11:24:24 +03:00
dmz_dev_err ( dev ,
2017-06-07 09:55:39 +03:00
" Align zone %u wp %llu to %llu (wp+%u) blocks failed %d " ,
2020-05-11 11:24:18 +03:00
zone - > id , ( unsigned long long ) wp_block ,
2017-06-07 09:55:39 +03:00
( unsigned long long ) block , nr_blocks , ret ) ;
2020-05-11 11:24:24 +03:00
dmz_check_bdev ( dev ) ;
2017-06-07 09:55:39 +03:00
return ret ;
}
zone - > wp_block = block ;
return 0 ;
}
/*
* dm_kcopyd_copy end notification .
*/
static void dmz_reclaim_kcopy_end ( int read_err , unsigned long write_err ,
void * context )
{
struct dmz_reclaim * zrc = context ;
if ( read_err | | write_err )
zrc - > kc_err = - EIO ;
else
zrc - > kc_err = 0 ;
clear_bit_unlock ( DMZ_RECLAIM_KCOPY , & zrc - > flags ) ;
smp_mb__after_atomic ( ) ;
wake_up_bit ( & zrc - > flags , DMZ_RECLAIM_KCOPY ) ;
}
/*
* Copy valid blocks of src_zone into dst_zone .
*/
static int dmz_reclaim_copy ( struct dmz_reclaim * zrc ,
struct dm_zone * src_zone , struct dm_zone * dst_zone )
{
struct dmz_metadata * zmd = zrc - > metadata ;
struct dm_io_region src , dst ;
sector_t block = 0 , end_block ;
sector_t nr_blocks ;
sector_t src_zone_block ;
sector_t dst_zone_block ;
unsigned long flags = 0 ;
int ret ;
if ( dmz_is_seq ( src_zone ) )
end_block = src_zone - > wp_block ;
else
2020-05-11 11:24:21 +03:00
end_block = dmz_zone_nr_blocks ( zmd ) ;
2017-06-07 09:55:39 +03:00
src_zone_block = dmz_start_block ( zmd , src_zone ) ;
dst_zone_block = dmz_start_block ( zmd , dst_zone ) ;
if ( dmz_is_seq ( dst_zone ) )
2021-05-26 17:16:01 +03:00
flags | = BIT ( DM_KCOPYD_WRITE_SEQ ) ;
2017-06-07 09:55:39 +03:00
while ( block < end_block ) {
2020-06-02 14:09:48 +03:00
if ( src_zone - > dev - > flags & DMZ_BDEV_DYING )
2020-05-11 11:24:24 +03:00
return - EIO ;
2020-06-02 14:09:48 +03:00
if ( dst_zone - > dev - > flags & DMZ_BDEV_DYING )
2019-08-11 00:43:11 +03:00
return - EIO ;
2020-05-19 11:14:23 +03:00
if ( dmz_reclaim_should_terminate ( src_zone ) )
return - EINTR ;
2017-06-07 09:55:39 +03:00
/* Get a valid region from the source zone */
ret = dmz_first_valid_block ( zmd , src_zone , & block ) ;
if ( ret < = 0 )
return ret ;
nr_blocks = ret ;
/*
* If we are writing in a sequential zone , we must make sure
* that writes are sequential . So Zeroout any eventual hole
* between writes .
*/
if ( dmz_is_seq ( dst_zone ) ) {
ret = dmz_reclaim_align_wp ( zrc , dst_zone , block ) ;
if ( ret )
return ret ;
}
2020-06-02 14:09:48 +03:00
src . bdev = src_zone - > dev - > bdev ;
2017-06-07 09:55:39 +03:00
src . sector = dmz_blk2sect ( src_zone_block + block ) ;
src . count = dmz_blk2sect ( nr_blocks ) ;
2020-06-02 14:09:48 +03:00
dst . bdev = dst_zone - > dev - > bdev ;
2017-06-07 09:55:39 +03:00
dst . sector = dmz_blk2sect ( dst_zone_block + block ) ;
dst . count = src . count ;
/* Copy the valid region */
set_bit ( DMZ_RECLAIM_KCOPY , & zrc - > flags ) ;
2018-08-01 00:27:02 +03:00
dm_kcopyd_copy ( zrc - > kc , & src , 1 , & dst , flags ,
dmz_reclaim_kcopy_end , zrc ) ;
2017-06-07 09:55:39 +03:00
/* Wait for copy to complete */
wait_on_bit_io ( & zrc - > flags , DMZ_RECLAIM_KCOPY ,
TASK_UNINTERRUPTIBLE ) ;
if ( zrc - > kc_err )
return zrc - > kc_err ;
block + = nr_blocks ;
if ( dmz_is_seq ( dst_zone ) )
dst_zone - > wp_block = block ;
}
return 0 ;
}
/*
* Move valid blocks of dzone buffer zone into dzone ( after its write pointer )
* and free the buffer zone .
*/
static int dmz_reclaim_buf ( struct dmz_reclaim * zrc , struct dm_zone * dzone )
{
struct dm_zone * bzone = dzone - > bzone ;
sector_t chunk_block = dzone - > wp_block ;
struct dmz_metadata * zmd = zrc - > metadata ;
int ret ;
2020-06-02 14:09:50 +03:00
DMDEBUG ( " (%s/%u): Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u) " ,
dmz_metadata_label ( zmd ) , zrc - > dev_idx ,
2020-05-11 11:24:24 +03:00
dzone - > chunk , bzone - > id , dmz_weight ( bzone ) ,
dzone - > id , dmz_weight ( dzone ) ) ;
2017-06-07 09:55:39 +03:00
/* Flush data zone into the buffer zone */
ret = dmz_reclaim_copy ( zrc , bzone , dzone ) ;
if ( ret < 0 )
return ret ;
dmz_lock_flush ( zmd ) ;
/* Validate copied blocks */
ret = dmz_merge_valid_blocks ( zmd , bzone , dzone , chunk_block ) ;
if ( ret = = 0 ) {
/* Free the buffer zone */
2020-05-11 11:24:21 +03:00
dmz_invalidate_blocks ( zmd , bzone , 0 , dmz_zone_nr_blocks ( zmd ) ) ;
2017-06-07 09:55:39 +03:00
dmz_lock_map ( zmd ) ;
dmz_unmap_zone ( zmd , bzone ) ;
dmz_unlock_zone_reclaim ( dzone ) ;
dmz_free_zone ( zmd , bzone ) ;
dmz_unlock_map ( zmd ) ;
}
dmz_unlock_flush ( zmd ) ;
2019-08-11 00:43:09 +03:00
return ret ;
2017-06-07 09:55:39 +03:00
}
/*
* Merge valid blocks of dzone into its buffer zone and free dzone .
*/
static int dmz_reclaim_seq_data ( struct dmz_reclaim * zrc , struct dm_zone * dzone )
{
unsigned int chunk = dzone - > chunk ;
struct dm_zone * bzone = dzone - > bzone ;
struct dmz_metadata * zmd = zrc - > metadata ;
int ret = 0 ;
2020-06-02 14:09:50 +03:00
DMDEBUG ( " (%s/%u): Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u) " ,
dmz_metadata_label ( zmd ) , zrc - > dev_idx ,
2020-05-11 11:24:24 +03:00
chunk , dzone - > id , dmz_weight ( dzone ) ,
bzone - > id , dmz_weight ( bzone ) ) ;
2017-06-07 09:55:39 +03:00
/* Flush data zone into the buffer zone */
ret = dmz_reclaim_copy ( zrc , dzone , bzone ) ;
if ( ret < 0 )
return ret ;
dmz_lock_flush ( zmd ) ;
/* Validate copied blocks */
ret = dmz_merge_valid_blocks ( zmd , dzone , bzone , 0 ) ;
if ( ret = = 0 ) {
/*
* Free the data zone and remap the chunk to
* the buffer zone .
*/
2020-05-11 11:24:21 +03:00
dmz_invalidate_blocks ( zmd , dzone , 0 , dmz_zone_nr_blocks ( zmd ) ) ;
2017-06-07 09:55:39 +03:00
dmz_lock_map ( zmd ) ;
dmz_unmap_zone ( zmd , bzone ) ;
dmz_unmap_zone ( zmd , dzone ) ;
dmz_unlock_zone_reclaim ( dzone ) ;
dmz_free_zone ( zmd , dzone ) ;
dmz_map_zone ( zmd , bzone , chunk ) ;
dmz_unlock_map ( zmd ) ;
}
dmz_unlock_flush ( zmd ) ;
2019-08-11 00:43:09 +03:00
return ret ;
2017-06-07 09:55:39 +03:00
}
/*
* Move valid blocks of the random data zone dzone into a free sequential zone .
* Once blocks are moved , remap the zone chunk to the sequential zone .
*/
static int dmz_reclaim_rnd_data ( struct dmz_reclaim * zrc , struct dm_zone * dzone )
{
unsigned int chunk = dzone - > chunk ;
struct dm_zone * szone = NULL ;
struct dmz_metadata * zmd = zrc - > metadata ;
int ret ;
2020-05-19 11:14:22 +03:00
int alloc_flags = DMZ_ALLOC_SEQ ;
2017-06-07 09:55:39 +03:00
2020-05-19 11:14:21 +03:00
/* Get a free random or sequential zone */
2017-06-07 09:55:39 +03:00
dmz_lock_map ( zmd ) ;
2020-05-19 11:14:22 +03:00
again :
2020-06-02 14:09:53 +03:00
szone = dmz_alloc_zone ( zmd , zrc - > dev_idx ,
alloc_flags | DMZ_ALLOC_RECLAIM ) ;
2020-05-19 11:14:22 +03:00
if ( ! szone & & alloc_flags = = DMZ_ALLOC_SEQ & & dmz_nr_cache_zones ( zmd ) ) {
alloc_flags = DMZ_ALLOC_RND ;
goto again ;
}
2017-06-07 09:55:39 +03:00
dmz_unlock_map ( zmd ) ;
if ( ! szone )
return - ENOSPC ;
2020-06-02 14:09:50 +03:00
DMDEBUG ( " (%s/%u): Chunk %u, move %s zone %u (weight %u) to %s zone %u " ,
dmz_metadata_label ( zmd ) , zrc - > dev_idx , chunk ,
2020-05-19 11:14:20 +03:00
dmz_is_cache ( dzone ) ? " cache " : " rnd " ,
dzone - > id , dmz_weight ( dzone ) ,
dmz_is_rnd ( szone ) ? " rnd " : " seq " , szone - > id ) ;
2017-06-07 09:55:39 +03:00
/* Flush the random data zone into the sequential zone */
ret = dmz_reclaim_copy ( zrc , dzone , szone ) ;
dmz_lock_flush ( zmd ) ;
if ( ret = = 0 ) {
/* Validate copied blocks */
ret = dmz_copy_valid_blocks ( zmd , dzone , szone ) ;
}
if ( ret ) {
/* Free the sequential zone */
dmz_lock_map ( zmd ) ;
dmz_free_zone ( zmd , szone ) ;
dmz_unlock_map ( zmd ) ;
} else {
/* Free the data zone and remap the chunk */
2020-05-11 11:24:21 +03:00
dmz_invalidate_blocks ( zmd , dzone , 0 , dmz_zone_nr_blocks ( zmd ) ) ;
2017-06-07 09:55:39 +03:00
dmz_lock_map ( zmd ) ;
dmz_unmap_zone ( zmd , dzone ) ;
dmz_unlock_zone_reclaim ( dzone ) ;
dmz_free_zone ( zmd , dzone ) ;
dmz_map_zone ( zmd , szone , chunk ) ;
dmz_unlock_map ( zmd ) ;
}
dmz_unlock_flush ( zmd ) ;
2019-08-11 00:43:09 +03:00
return ret ;
2017-06-07 09:55:39 +03:00
}
/*
* Reclaim an empty zone .
*/
static void dmz_reclaim_empty ( struct dmz_reclaim * zrc , struct dm_zone * dzone )
{
struct dmz_metadata * zmd = zrc - > metadata ;
dmz_lock_flush ( zmd ) ;
dmz_lock_map ( zmd ) ;
dmz_unmap_zone ( zmd , dzone ) ;
dmz_unlock_zone_reclaim ( dzone ) ;
dmz_free_zone ( zmd , dzone ) ;
dmz_unlock_map ( zmd ) ;
dmz_unlock_flush ( zmd ) ;
}
2020-05-19 11:14:21 +03:00
/*
* Test if the target device is idle .
*/
static inline int dmz_target_idle ( struct dmz_reclaim * zrc )
{
return time_is_before_jiffies ( zrc - > atime + DMZ_IDLE_PERIOD ) ;
}
2017-06-07 09:55:39 +03:00
/*
* Find a candidate zone for reclaim and process it .
*/
2019-08-11 00:43:09 +03:00
static int dmz_do_reclaim ( struct dmz_reclaim * zrc )
2017-06-07 09:55:39 +03:00
{
struct dmz_metadata * zmd = zrc - > metadata ;
struct dm_zone * dzone ;
struct dm_zone * rzone ;
unsigned long start ;
int ret ;
/* Get a data zone */
2020-06-02 14:09:54 +03:00
dzone = dmz_get_zone_for_reclaim ( zmd , zrc - > dev_idx ,
dmz_target_idle ( zrc ) ) ;
2020-06-02 14:09:44 +03:00
if ( ! dzone ) {
2020-06-02 14:09:50 +03:00
DMDEBUG ( " (%s/%u): No zone found to reclaim " ,
dmz_metadata_label ( zmd ) , zrc - > dev_idx ) ;
2020-05-19 11:14:19 +03:00
return - EBUSY ;
2020-06-02 14:09:44 +03:00
}
2020-06-08 07:20:59 +03:00
rzone = dzone ;
2017-06-07 09:55:39 +03:00
start = jiffies ;
2020-05-19 11:14:20 +03:00
if ( dmz_is_cache ( dzone ) | | dmz_is_rnd ( dzone ) ) {
2017-06-07 09:55:39 +03:00
if ( ! dmz_weight ( dzone ) ) {
/* Empty zone */
dmz_reclaim_empty ( zrc , dzone ) ;
ret = 0 ;
} else {
/*
* Reclaim the random data zone by moving its
* valid data blocks to a free sequential zone .
*/
ret = dmz_reclaim_rnd_data ( zrc , dzone ) ;
}
} else {
struct dm_zone * bzone = dzone - > bzone ;
sector_t chunk_block = 0 ;
ret = dmz_first_valid_block ( zmd , bzone , & chunk_block ) ;
if ( ret < 0 )
goto out ;
if ( ret = = 0 | | chunk_block > = dzone - > wp_block ) {
/*
* The buffer zone is empty or its valid blocks are
* after the data zone write pointer .
*/
ret = dmz_reclaim_buf ( zrc , dzone ) ;
rzone = bzone ;
} else {
/*
* Reclaim the data zone by merging it into the
* buffer zone so that the buffer zone itself can
* be later reclaimed .
*/
ret = dmz_reclaim_seq_data ( zrc , dzone ) ;
}
}
out :
if ( ret ) {
2020-06-02 14:09:44 +03:00
if ( ret = = - EINTR )
2020-06-02 14:09:50 +03:00
DMDEBUG ( " (%s/%u): reclaim zone %u interrupted " ,
dmz_metadata_label ( zmd ) , zrc - > dev_idx ,
rzone - > id ) ;
2020-06-02 14:09:44 +03:00
else
2020-06-02 14:09:50 +03:00
DMDEBUG ( " (%s/%u): Failed to reclaim zone %u, err %d " ,
dmz_metadata_label ( zmd ) , zrc - > dev_idx ,
rzone - > id , ret ) ;
2017-06-07 09:55:39 +03:00
dmz_unlock_zone_reclaim ( dzone ) ;
2019-08-11 00:43:09 +03:00
return ret ;
2017-06-07 09:55:39 +03:00
}
2019-08-11 00:43:09 +03:00
ret = dmz_flush_metadata ( zrc - > metadata ) ;
if ( ret ) {
2020-06-02 14:09:50 +03:00
DMDEBUG ( " (%s/%u): Metadata flush for zone %u failed, err %d " ,
dmz_metadata_label ( zmd ) , zrc - > dev_idx , rzone - > id , ret ) ;
2019-08-11 00:43:09 +03:00
return ret ;
}
2017-06-07 09:55:39 +03:00
2020-06-02 14:09:50 +03:00
DMDEBUG ( " (%s/%u): Reclaimed zone %u in %u ms " ,
dmz_metadata_label ( zmd ) , zrc - > dev_idx ,
2020-05-11 11:24:24 +03:00
rzone - > id , jiffies_to_msecs ( jiffies - start ) ) ;
2019-08-11 00:43:09 +03:00
return 0 ;
2017-06-07 09:55:39 +03:00
}
2020-05-19 11:14:20 +03:00
static unsigned int dmz_reclaim_percentage ( struct dmz_reclaim * zrc )
2017-06-07 09:55:39 +03:00
{
struct dmz_metadata * zmd = zrc - > metadata ;
2020-05-19 11:14:20 +03:00
unsigned int nr_cache = dmz_nr_cache_zones ( zmd ) ;
unsigned int nr_unmap , nr_zones ;
2017-06-07 09:55:39 +03:00
2020-05-19 11:14:20 +03:00
if ( nr_cache ) {
nr_zones = nr_cache ;
nr_unmap = dmz_nr_unmap_cache_zones ( zmd ) ;
} else {
2020-06-02 14:09:51 +03:00
nr_zones = dmz_nr_rnd_zones ( zmd , zrc - > dev_idx ) ;
nr_unmap = dmz_nr_unmap_rnd_zones ( zmd , zrc - > dev_idx ) ;
2020-05-19 11:14:20 +03:00
}
2020-07-08 03:20:22 +03:00
if ( nr_unmap < = 1 )
return 0 ;
2020-05-19 11:14:20 +03:00
return nr_unmap * 100 / nr_zones ;
}
/*
* Test if reclaim is necessary .
*/
static bool dmz_should_reclaim ( struct dmz_reclaim * zrc , unsigned int p_unmap )
{
2020-06-02 14:09:50 +03:00
unsigned int nr_reclaim ;
2020-06-02 14:09:51 +03:00
nr_reclaim = dmz_nr_rnd_zones ( zrc - > metadata , zrc - > dev_idx ) ;
2020-05-19 11:14:21 +03:00
2020-06-02 14:09:50 +03:00
if ( dmz_nr_cache_zones ( zrc - > metadata ) ) {
/*
* The first device in a multi - device
* setup only contains cache zones , so
* never start reclaim there .
*/
if ( zrc - > dev_idx = = 0 )
return false ;
2020-05-19 11:14:21 +03:00
nr_reclaim + = dmz_nr_cache_zones ( zrc - > metadata ) ;
2020-06-02 14:09:50 +03:00
}
2020-05-19 11:14:21 +03:00
2017-06-07 09:55:39 +03:00
/* Reclaim when idle */
2020-05-19 11:14:21 +03:00
if ( dmz_target_idle ( zrc ) & & nr_reclaim )
2017-06-07 09:55:39 +03:00
return true ;
2020-05-19 11:14:20 +03:00
/* If there are still plenty of cache zones, do not reclaim */
if ( p_unmap > = DMZ_RECLAIM_HIGH_UNMAP_ZONES )
2017-06-07 09:55:39 +03:00
return false ;
/*
2020-05-19 11:14:20 +03:00
* If the percentage of unmapped cache zones is low ,
2017-06-07 09:55:39 +03:00
* reclaim even if the target is busy .
*/
2020-05-19 11:14:20 +03:00
return p_unmap < = DMZ_RECLAIM_LOW_UNMAP_ZONES ;
2017-06-07 09:55:39 +03:00
}
/*
* Reclaim work function .
*/
static void dmz_reclaim_work ( struct work_struct * work )
{
struct dmz_reclaim * zrc = container_of ( work , struct dmz_reclaim , work . work ) ;
struct dmz_metadata * zmd = zrc - > metadata ;
2020-07-02 18:11:40 +03:00
unsigned int p_unmap ;
2019-08-11 00:43:09 +03:00
int ret ;
2017-06-07 09:55:39 +03:00
2020-05-11 11:24:23 +03:00
if ( dmz_dev_is_dying ( zmd ) )
2019-08-11 00:43:11 +03:00
return ;
2020-05-19 11:14:20 +03:00
p_unmap = dmz_reclaim_percentage ( zrc ) ;
if ( ! dmz_should_reclaim ( zrc , p_unmap ) ) {
2017-06-07 09:55:39 +03:00
mod_delayed_work ( zrc - > wq , & zrc - > work , DMZ_IDLE_PERIOD ) ;
return ;
}
/*
* We need to start reclaiming random zones : set up zone copy
* throttling to either go fast if we are very low on random zones
* and slower if there are still some free random zones to avoid
* as much as possible to negatively impact the user workload .
*/
2020-05-19 11:14:20 +03:00
if ( dmz_target_idle ( zrc ) | | p_unmap < DMZ_RECLAIM_LOW_UNMAP_ZONES / 2 ) {
2017-06-07 09:55:39 +03:00
/* Idle or very low percentage: go fast */
zrc - > kc_throttle . throttle = 100 ;
} else {
/* Busy but we still have some random zone: throttle */
2020-05-19 11:14:20 +03:00
zrc - > kc_throttle . throttle = min ( 75U , 100U - p_unmap / 2 ) ;
2017-06-07 09:55:39 +03:00
}
2020-06-02 14:09:50 +03:00
DMDEBUG ( " (%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random) " ,
dmz_metadata_label ( zmd ) , zrc - > dev_idx ,
2020-05-11 11:24:22 +03:00
zrc - > kc_throttle . throttle ,
( dmz_target_idle ( zrc ) ? " Idle " : " Busy " ) ,
2020-05-19 11:14:20 +03:00
p_unmap , dmz_nr_unmap_cache_zones ( zmd ) ,
dmz_nr_cache_zones ( zmd ) ,
2020-06-02 14:09:51 +03:00
dmz_nr_unmap_rnd_zones ( zmd , zrc - > dev_idx ) ,
dmz_nr_rnd_zones ( zmd , zrc - > dev_idx ) ) ;
2017-06-07 09:55:39 +03:00
2019-08-11 00:43:09 +03:00
ret = dmz_do_reclaim ( zrc ) ;
2020-05-19 11:14:23 +03:00
if ( ret & & ret ! = - EINTR ) {
2020-05-11 11:24:23 +03:00
if ( ! dmz_check_dev ( zmd ) )
2019-08-11 00:43:11 +03:00
return ;
}
2017-06-07 09:55:39 +03:00
dmz_schedule_reclaim ( zrc ) ;
}
/*
* Initialize reclaim .
*/
2020-05-11 11:24:24 +03:00
int dmz_ctr_reclaim ( struct dmz_metadata * zmd ,
2020-06-02 14:09:50 +03:00
struct dmz_reclaim * * reclaim , int idx )
2017-06-07 09:55:39 +03:00
{
struct dmz_reclaim * zrc ;
int ret ;
zrc = kzalloc ( sizeof ( struct dmz_reclaim ) , GFP_KERNEL ) ;
if ( ! zrc )
return - ENOMEM ;
zrc - > metadata = zmd ;
zrc - > atime = jiffies ;
2020-06-02 14:09:50 +03:00
zrc - > dev_idx = idx ;
2017-06-07 09:55:39 +03:00
/* Reclaim kcopyd client */
zrc - > kc = dm_kcopyd_client_create ( & zrc - > kc_throttle ) ;
if ( IS_ERR ( zrc - > kc ) ) {
ret = PTR_ERR ( zrc - > kc ) ;
zrc - > kc = NULL ;
goto err ;
}
/* Reclaim work */
INIT_DELAYED_WORK ( & zrc - > work , dmz_reclaim_work ) ;
2020-06-02 14:09:50 +03:00
zrc - > wq = alloc_ordered_workqueue ( " dmz_rwq_%s_%d " , WQ_MEM_RECLAIM ,
dmz_metadata_label ( zmd ) , idx ) ;
2017-06-07 09:55:39 +03:00
if ( ! zrc - > wq ) {
ret = - ENOMEM ;
goto err ;
}
* reclaim = zrc ;
queue_delayed_work ( zrc - > wq , & zrc - > work , 0 ) ;
return 0 ;
err :
if ( zrc - > kc )
dm_kcopyd_client_destroy ( zrc - > kc ) ;
kfree ( zrc ) ;
return ret ;
}
/*
* Terminate reclaim .
*/
void dmz_dtr_reclaim ( struct dmz_reclaim * zrc )
{
cancel_delayed_work_sync ( & zrc - > work ) ;
destroy_workqueue ( zrc - > wq ) ;
dm_kcopyd_client_destroy ( zrc - > kc ) ;
kfree ( zrc ) ;
}
/*
* Suspend reclaim .
*/
void dmz_suspend_reclaim ( struct dmz_reclaim * zrc )
{
cancel_delayed_work_sync ( & zrc - > work ) ;
}
/*
* Resume reclaim .
*/
void dmz_resume_reclaim ( struct dmz_reclaim * zrc )
{
queue_delayed_work ( zrc - > wq , & zrc - > work , DMZ_IDLE_PERIOD ) ;
}
/*
* BIO accounting .
*/
void dmz_reclaim_bio_acc ( struct dmz_reclaim * zrc )
{
zrc - > atime = jiffies ;
}
/*
* Start reclaim if necessary .
*/
void dmz_schedule_reclaim ( struct dmz_reclaim * zrc )
{
2020-05-19 11:14:20 +03:00
unsigned int p_unmap = dmz_reclaim_percentage ( zrc ) ;
if ( dmz_should_reclaim ( zrc , p_unmap ) )
2017-06-07 09:55:39 +03:00
mod_delayed_work ( zrc - > wq , & zrc - > work , 0 ) ;
}