2013-03-02 02:45:51 +04:00
/*
* Copyright ( C ) 2012 Red Hat . All rights reserved .
*
* This file is released under the GPL .
*/
# include "dm-cache-policy.h"
# include "dm.h"
# include <linux/hash.h>
2015-02-20 17:22:17 +03:00
# include <linux/jiffies.h>
2013-03-02 02:45:51 +04:00
# include <linux/module.h>
# include <linux/mutex.h>
# include <linux/slab.h>
# include <linux/vmalloc.h>
# define DM_MSG_PREFIX "cache-policy-mq"
static struct kmem_cache * mq_entry_cache ;
/*----------------------------------------------------------------*/
static unsigned next_power ( unsigned n , unsigned min )
{
return roundup_pow_of_two ( max ( n , min ) ) ;
}
/*----------------------------------------------------------------*/
/*
* Large , sequential ios are probably better left on the origin device since
* spindles tend to have good bandwidth .
*
* The io_tracker tries to spot when the io is in one of these sequential
* modes .
*
* Two thresholds to switch between random and sequential io mode are defaulting
* as follows and can be adjusted via the constructor and message interfaces .
*/
# define RANDOM_THRESHOLD_DEFAULT 4
# define SEQUENTIAL_THRESHOLD_DEFAULT 512
enum io_pattern {
PATTERN_SEQUENTIAL ,
PATTERN_RANDOM
} ;
struct io_tracker {
enum io_pattern pattern ;
unsigned nr_seq_samples ;
unsigned nr_rand_samples ;
unsigned thresholds [ 2 ] ;
dm_oblock_t last_end_oblock ;
} ;
static void iot_init ( struct io_tracker * t ,
int sequential_threshold , int random_threshold )
{
t - > pattern = PATTERN_RANDOM ;
t - > nr_seq_samples = 0 ;
t - > nr_rand_samples = 0 ;
t - > last_end_oblock = 0 ;
t - > thresholds [ PATTERN_RANDOM ] = random_threshold ;
t - > thresholds [ PATTERN_SEQUENTIAL ] = sequential_threshold ;
}
static enum io_pattern iot_pattern ( struct io_tracker * t )
{
return t - > pattern ;
}
static void iot_update_stats ( struct io_tracker * t , struct bio * bio )
{
2013-10-12 02:44:27 +04:00
if ( bio - > bi_iter . bi_sector = = from_oblock ( t - > last_end_oblock ) + 1 )
2013-03-02 02:45:51 +04:00
t - > nr_seq_samples + + ;
else {
/*
* Just one non - sequential IO is enough to reset the
* counters .
*/
if ( t - > nr_seq_samples ) {
t - > nr_seq_samples = 0 ;
t - > nr_rand_samples = 0 ;
}
t - > nr_rand_samples + + ;
}
2013-10-12 02:44:27 +04:00
t - > last_end_oblock = to_oblock ( bio_end_sector ( bio ) - 1 ) ;
2013-03-02 02:45:51 +04:00
}
static void iot_check_for_pattern_switch ( struct io_tracker * t )
{
switch ( t - > pattern ) {
case PATTERN_SEQUENTIAL :
if ( t - > nr_rand_samples > = t - > thresholds [ PATTERN_RANDOM ] ) {
t - > pattern = PATTERN_RANDOM ;
t - > nr_seq_samples = t - > nr_rand_samples = 0 ;
}
break ;
case PATTERN_RANDOM :
if ( t - > nr_seq_samples > = t - > thresholds [ PATTERN_SEQUENTIAL ] ) {
t - > pattern = PATTERN_SEQUENTIAL ;
t - > nr_seq_samples = t - > nr_rand_samples = 0 ;
}
break ;
}
}
static void iot_examine_bio ( struct io_tracker * t , struct bio * bio )
{
iot_update_stats ( t , bio ) ;
iot_check_for_pattern_switch ( t ) ;
}
/*----------------------------------------------------------------*/
/*
* This queue is divided up into different levels . Allowing us to push
* entries to the back of any of the levels . Think of it as a partially
* sorted queue .
*/
# define NR_QUEUE_LEVELS 16u
2015-02-20 16:49:45 +03:00
# define NR_SENTINELS NR_QUEUE_LEVELS * 3
2013-03-02 02:45:51 +04:00
2015-02-20 17:22:17 +03:00
# define WRITEBACK_PERIOD HZ
2013-03-02 02:45:51 +04:00
struct queue {
2015-02-20 15:58:03 +03:00
unsigned nr_elts ;
2015-02-20 17:22:17 +03:00
bool current_writeback_sentinels ;
unsigned long next_writeback ;
2013-03-02 02:45:51 +04:00
struct list_head qs [ NR_QUEUE_LEVELS ] ;
2015-02-20 16:49:45 +03:00
struct list_head sentinels [ NR_SENTINELS ] ;
2013-03-02 02:45:51 +04:00
} ;
static void queue_init ( struct queue * q )
{
unsigned i ;
2015-02-20 15:58:03 +03:00
q - > nr_elts = 0 ;
2015-02-20 17:22:17 +03:00
q - > current_writeback_sentinels = false ;
q - > next_writeback = 0 ;
2015-02-20 16:49:45 +03:00
for ( i = 0 ; i < NR_QUEUE_LEVELS ; i + + ) {
2013-03-02 02:45:51 +04:00
INIT_LIST_HEAD ( q - > qs + i ) ;
2015-02-20 16:49:45 +03:00
INIT_LIST_HEAD ( q - > sentinels + i ) ;
2015-02-20 17:22:17 +03:00
INIT_LIST_HEAD ( q - > sentinels + NR_QUEUE_LEVELS + i ) ;
INIT_LIST_HEAD ( q - > sentinels + ( 2 * NR_QUEUE_LEVELS ) + i ) ;
2015-02-20 16:49:45 +03:00
}
2013-03-02 02:45:51 +04:00
}
2015-02-20 17:22:17 +03:00
static unsigned queue_size ( struct queue * q )
{
return q - > nr_elts ;
}
2013-10-24 22:10:28 +04:00
static bool queue_empty ( struct queue * q )
{
2015-02-20 15:58:03 +03:00
return q - > nr_elts = = 0 ;
2013-10-24 22:10:28 +04:00
}
2013-03-02 02:45:51 +04:00
/*
* Insert an entry to the back of the given level .
*/
static void queue_push ( struct queue * q , unsigned level , struct list_head * elt )
{
2015-02-20 15:58:03 +03:00
q - > nr_elts + + ;
2013-03-02 02:45:51 +04:00
list_add_tail ( elt , q - > qs + level ) ;
}
2015-02-20 15:58:03 +03:00
static void queue_remove ( struct queue * q , struct list_head * elt )
2013-03-02 02:45:51 +04:00
{
2015-02-20 15:58:03 +03:00
q - > nr_elts - - ;
2013-03-02 02:45:51 +04:00
list_del ( elt ) ;
}
2015-02-20 16:49:45 +03:00
static bool is_sentinel ( struct queue * q , struct list_head * h )
{
return ( h > = q - > sentinels ) & & ( h < ( q - > sentinels + NR_SENTINELS ) ) ;
}
2013-03-02 02:45:51 +04:00
/*
* Gives us the oldest entry of the lowest popoulated level . If the first
* level is emptied then we shift down one level .
*/
2014-10-22 17:30:58 +04:00
static struct list_head * queue_peek ( struct queue * q )
2013-03-02 02:45:51 +04:00
{
unsigned level ;
2015-02-20 16:49:45 +03:00
struct list_head * h ;
2013-03-02 02:45:51 +04:00
for ( level = 0 ; level < NR_QUEUE_LEVELS ; level + + )
2015-02-20 16:49:45 +03:00
list_for_each ( h , q - > qs + level )
if ( ! is_sentinel ( q , h ) )
return h ;
2013-03-02 02:45:51 +04:00
2014-10-22 17:30:58 +04:00
return NULL ;
}
2013-03-02 02:45:51 +04:00
2014-10-22 17:30:58 +04:00
static struct list_head * queue_pop ( struct queue * q )
{
struct list_head * r = queue_peek ( q ) ;
2013-03-02 02:45:51 +04:00
2014-10-22 17:30:58 +04:00
if ( r ) {
2015-02-20 15:58:03 +03:00
q - > nr_elts - - ;
2014-10-22 17:30:58 +04:00
list_del ( r ) ;
}
return r ;
2013-03-02 02:45:51 +04:00
}
2015-02-20 17:22:17 +03:00
/*
* Pops an entry from a level that is not past a sentinel .
*/
static struct list_head * queue_pop_old ( struct queue * q )
{
unsigned level ;
struct list_head * h ;
for ( level = 0 ; level < NR_QUEUE_LEVELS ; level + + )
list_for_each ( h , q - > qs + level ) {
if ( is_sentinel ( q , h ) )
break ;
q - > nr_elts - - ;
list_del ( h ) ;
return h ;
}
return NULL ;
}
2013-03-02 02:45:51 +04:00
static struct list_head * list_pop ( struct list_head * lh )
{
struct list_head * r = lh - > next ;
BUG_ON ( ! r ) ;
list_del_init ( r ) ;
return r ;
}
2015-02-20 17:22:17 +03:00
static struct list_head * writeback_sentinel ( struct queue * q , unsigned level )
{
if ( q - > current_writeback_sentinels )
return q - > sentinels + NR_QUEUE_LEVELS + level ;
else
return q - > sentinels + 2 * NR_QUEUE_LEVELS + level ;
}
static void queue_update_writeback_sentinels ( struct queue * q )
{
unsigned i ;
struct list_head * h ;
if ( time_after ( jiffies , q - > next_writeback ) ) {
for ( i = 0 ; i < NR_QUEUE_LEVELS ; i + + ) {
h = writeback_sentinel ( q , i ) ;
list_del ( h ) ;
list_add_tail ( h , q - > qs + i ) ;
}
q - > next_writeback = jiffies + WRITEBACK_PERIOD ;
q - > current_writeback_sentinels = ! q - > current_writeback_sentinels ;
}
}
2015-02-20 16:49:45 +03:00
/*
* Sometimes we want to iterate through entries that have been pushed since
* a certain event . We use sentinel entries on the queues to delimit these
* ' tick ' events .
*/
static void queue_tick ( struct queue * q )
{
unsigned i ;
for ( i = 0 ; i < NR_QUEUE_LEVELS ; i + + ) {
list_del ( q - > sentinels + i ) ;
list_add_tail ( q - > sentinels + i , q - > qs + i ) ;
}
}
typedef void ( * iter_fn ) ( struct list_head * , void * ) ;
static void queue_iterate_tick ( struct queue * q , iter_fn fn , void * context )
{
unsigned i ;
struct list_head * h ;
for ( i = 0 ; i < NR_QUEUE_LEVELS ; i + + ) {
list_for_each_prev ( h , q - > qs + i ) {
if ( is_sentinel ( q , h ) )
break ;
fn ( h , context ) ;
}
}
}
2013-03-02 02:45:51 +04:00
/*----------------------------------------------------------------*/
/*
* Describes a cache entry . Used in both the cache and the pre_cache .
*/
struct entry {
struct hlist_node hlist ;
struct list_head list ;
dm_oblock_t oblock ;
/*
* FIXME : pack these better
*/
2013-10-24 22:10:28 +04:00
bool dirty : 1 ;
2013-03-02 02:45:51 +04:00
unsigned hit_count ;
} ;
2013-11-09 15:12:51 +04:00
/*
* Rather than storing the cblock in an entry , we allocate all entries in
* an array , and infer the cblock from the entry position .
*
* Free entries are linked together into a list .
*/
struct entry_pool {
struct entry * entries , * entries_end ;
struct list_head free ;
unsigned nr_allocated ;
} ;
static int epool_init ( struct entry_pool * ep , unsigned nr_entries )
{
unsigned i ;
ep - > entries = vzalloc ( sizeof ( struct entry ) * nr_entries ) ;
if ( ! ep - > entries )
return - ENOMEM ;
ep - > entries_end = ep - > entries + nr_entries ;
INIT_LIST_HEAD ( & ep - > free ) ;
for ( i = 0 ; i < nr_entries ; i + + )
list_add ( & ep - > entries [ i ] . list , & ep - > free ) ;
ep - > nr_allocated = 0 ;
return 0 ;
}
static void epool_exit ( struct entry_pool * ep )
{
vfree ( ep - > entries ) ;
}
static struct entry * alloc_entry ( struct entry_pool * ep )
{
struct entry * e ;
if ( list_empty ( & ep - > free ) )
return NULL ;
e = list_entry ( list_pop ( & ep - > free ) , struct entry , list ) ;
INIT_LIST_HEAD ( & e - > list ) ;
INIT_HLIST_NODE ( & e - > hlist ) ;
ep - > nr_allocated + + ;
return e ;
}
/*
* This assumes the cblock hasn ' t already been allocated .
*/
static struct entry * alloc_particular_entry ( struct entry_pool * ep , dm_cblock_t cblock )
{
struct entry * e = ep - > entries + from_cblock ( cblock ) ;
2013-11-18 22:32:43 +04:00
list_del_init ( & e - > list ) ;
2013-11-09 15:12:51 +04:00
INIT_HLIST_NODE ( & e - > hlist ) ;
ep - > nr_allocated + + ;
return e ;
}
static void free_entry ( struct entry_pool * ep , struct entry * e )
{
BUG_ON ( ! ep - > nr_allocated ) ;
ep - > nr_allocated - - ;
INIT_HLIST_NODE ( & e - > hlist ) ;
list_add ( & e - > list , & ep - > free ) ;
}
2013-11-08 20:36:17 +04:00
/*
* Returns NULL if the entry is free .
*/
static struct entry * epool_find ( struct entry_pool * ep , dm_cblock_t cblock )
{
struct entry * e = ep - > entries + from_cblock ( cblock ) ;
2013-11-12 21:17:43 +04:00
return ! hlist_unhashed ( & e - > hlist ) ? e : NULL ;
2013-11-08 20:36:17 +04:00
}
2013-11-09 15:12:51 +04:00
static bool epool_empty ( struct entry_pool * ep )
{
return list_empty ( & ep - > free ) ;
}
static bool in_pool ( struct entry_pool * ep , struct entry * e )
{
return e > = ep - > entries & & e < ep - > entries_end ;
}
static dm_cblock_t infer_cblock ( struct entry_pool * ep , struct entry * e )
{
return to_cblock ( e - ep - > entries ) ;
}
/*----------------------------------------------------------------*/
2013-03-02 02:45:51 +04:00
struct mq_policy {
struct dm_cache_policy policy ;
/* protects everything */
struct mutex lock ;
dm_cblock_t cache_size ;
struct io_tracker tracker ;
2013-11-09 15:12:51 +04:00
/*
* Entries come from two pools , one of pre - cache entries , and one
* for the cache proper .
*/
struct entry_pool pre_cache_pool ;
struct entry_pool cache_pool ;
2013-03-02 02:45:51 +04:00
/*
2013-10-24 22:10:28 +04:00
* We maintain three queues of entries . The cache proper ,
* consisting of a clean and dirty queue , contains the currently
* active mappings . Whereas the pre_cache tracks blocks that
* are being hit frequently and potential candidates for promotion
* to the cache .
2013-03-02 02:45:51 +04:00
*/
struct queue pre_cache ;
2013-10-24 22:10:28 +04:00
struct queue cache_clean ;
struct queue cache_dirty ;
2013-03-02 02:45:51 +04:00
/*
* Keeps track of time , incremented by the core . We use this to
* avoid attributing multiple hits within the same tick .
*
* Access to tick_protected should be done with the spin lock held .
* It ' s copied to tick at the start of the map function ( within the
* mutex ) .
*/
spinlock_t tick_lock ;
unsigned tick_protected ;
unsigned tick ;
/*
* A count of the number of times the map function has been called
* and found an entry in the pre_cache or cache . Currently used to
* calculate the generation .
*/
unsigned hit_count ;
/*
* A generation is a longish period that is used to trigger some
* book keeping effects . eg , decrementing hit counts on entries .
* This is needed to allow the cache to evolve as io patterns
* change .
*/
unsigned generation ;
unsigned generation_period ; /* in lookups (will probably change) */
2013-12-09 16:53:05 +04:00
unsigned discard_promote_adjustment ;
unsigned read_promote_adjustment ;
unsigned write_promote_adjustment ;
2013-03-02 02:45:51 +04:00
/*
* The hash table allows us to quickly find an entry by origin
* block . Both pre_cache and cache entries are in here .
*/
unsigned nr_buckets ;
dm_block_t hash_bits ;
struct hlist_head * table ;
} ;
2013-12-09 16:53:05 +04:00
# define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
# define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
# define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
2014-10-22 17:30:58 +04:00
# define DISCOURAGE_DEMOTING_DIRTY_THRESHOLD 128
2013-12-09 16:53:05 +04:00
2013-03-02 02:45:51 +04:00
/*----------------------------------------------------------------*/
/*
* Simple hash table implementation . Should replace with the standard hash
* table that ' s making its way upstream .
*/
static void hash_insert ( struct mq_policy * mq , struct entry * e )
{
unsigned h = hash_64 ( from_oblock ( e - > oblock ) , mq - > hash_bits ) ;
hlist_add_head ( & e - > hlist , mq - > table + h ) ;
}
static struct entry * hash_lookup ( struct mq_policy * mq , dm_oblock_t oblock )
{
unsigned h = hash_64 ( from_oblock ( oblock ) , mq - > hash_bits ) ;
struct hlist_head * bucket = mq - > table + h ;
struct entry * e ;
hlist_for_each_entry ( e , bucket , hlist )
if ( e - > oblock = = oblock ) {
hlist_del ( & e - > hlist ) ;
hlist_add_head ( & e - > hlist , bucket ) ;
return e ;
}
return NULL ;
}
static void hash_remove ( struct entry * e )
{
hlist_del ( & e - > hlist ) ;
}
/*----------------------------------------------------------------*/
static bool any_free_cblocks ( struct mq_policy * mq )
{
2013-11-09 15:12:51 +04:00
return ! epool_empty ( & mq - > cache_pool ) ;
2013-03-02 02:45:51 +04:00
}
2013-10-24 22:10:28 +04:00
static bool any_clean_cblocks ( struct mq_policy * mq )
{
return ! queue_empty ( & mq - > cache_clean ) ;
}
2013-03-02 02:45:51 +04:00
/*----------------------------------------------------------------*/
/*
* Now we get to the meat of the policy . This section deals with deciding
* when to to add entries to the pre_cache and cache , and move between
* them .
*/
/*
* The queue level is based on the log2 of the hit count .
*/
static unsigned queue_level ( struct entry * e )
{
return min ( ( unsigned ) ilog2 ( e - > hit_count ) , NR_QUEUE_LEVELS - 1u ) ;
}
2013-11-09 15:12:51 +04:00
static bool in_cache ( struct mq_policy * mq , struct entry * e )
{
return in_pool ( & mq - > cache_pool , e ) ;
}
2013-03-02 02:45:51 +04:00
/*
* Inserts the entry into the pre_cache or the cache . Ensures the cache
2013-11-09 15:12:51 +04:00
* block is marked as allocated if necc . Inserts into the hash table .
* Sets the tick which records when the entry was last moved about .
2013-03-02 02:45:51 +04:00
*/
static void push ( struct mq_policy * mq , struct entry * e )
{
hash_insert ( mq , e ) ;
2013-11-09 15:12:51 +04:00
if ( in_cache ( mq , e ) )
2013-10-24 22:10:28 +04:00
queue_push ( e - > dirty ? & mq - > cache_dirty : & mq - > cache_clean ,
queue_level ( e ) , & e - > list ) ;
2013-11-09 15:12:51 +04:00
else
2013-03-02 02:45:51 +04:00
queue_push ( & mq - > pre_cache , queue_level ( e ) , & e - > list ) ;
}
/*
* Removes an entry from pre_cache or cache . Removes from the hash table .
*/
static void del ( struct mq_policy * mq , struct entry * e )
{
2015-02-20 15:58:03 +03:00
if ( in_cache ( mq , e ) )
queue_remove ( e - > dirty ? & mq - > cache_dirty : & mq - > cache_clean , & e - > list ) ;
else
queue_remove ( & mq - > pre_cache , & e - > list ) ;
2013-03-02 02:45:51 +04:00
hash_remove ( e ) ;
}
/*
* Like del , except it removes the first entry in the queue ( ie . the least
* recently used ) .
*/
static struct entry * pop ( struct mq_policy * mq , struct queue * q )
{
2013-10-24 22:10:28 +04:00
struct entry * e ;
struct list_head * h = queue_pop ( q ) ;
2013-03-02 02:45:51 +04:00
2013-10-24 22:10:28 +04:00
if ( ! h )
return NULL ;
2013-03-02 02:45:51 +04:00
2013-10-24 22:10:28 +04:00
e = container_of ( h , struct entry , list ) ;
hash_remove ( e ) ;
2013-03-02 02:45:51 +04:00
return e ;
}
2015-02-20 17:22:17 +03:00
static struct entry * pop_old ( struct mq_policy * mq , struct queue * q )
{
struct entry * e ;
struct list_head * h = queue_pop_old ( q ) ;
if ( ! h )
return NULL ;
e = container_of ( h , struct entry , list ) ;
hash_remove ( e ) ;
return e ;
}
2014-10-22 17:30:58 +04:00
static struct entry * peek ( struct queue * q )
{
struct list_head * h = queue_peek ( q ) ;
return h ? container_of ( h , struct entry , list ) : NULL ;
}
2013-03-02 02:45:51 +04:00
/*
* The promotion threshold is adjusted every generation . As are the counts
* of the entries .
*
* At the moment the threshold is taken by averaging the hit counts of some
2013-10-24 22:10:28 +04:00
* of the entries in the cache ( the first 20 entries across all levels in
* ascending order , giving preference to the clean entries at each level ) .
2013-03-02 02:45:51 +04:00
*
* We can be much cleverer than this though . For example , each promotion
* could bump up the threshold helping to prevent churn . Much more to do
* here .
*/
# define MAX_TO_AVERAGE 20
static void check_generation ( struct mq_policy * mq )
{
unsigned total = 0 , nr = 0 , count = 0 , level ;
struct list_head * head ;
struct entry * e ;
2013-11-09 15:12:51 +04:00
if ( ( mq - > hit_count > = mq - > generation_period ) & & ( epool_empty ( & mq - > cache_pool ) ) ) {
2013-03-02 02:45:51 +04:00
mq - > hit_count = 0 ;
mq - > generation + + ;
for ( level = 0 ; level < NR_QUEUE_LEVELS & & count < MAX_TO_AVERAGE ; level + + ) {
2013-10-24 22:10:28 +04:00
head = mq - > cache_clean . qs + level ;
list_for_each_entry ( e , head , list ) {
nr + + ;
total + = e - > hit_count ;
if ( + + count > = MAX_TO_AVERAGE )
break ;
}
head = mq - > cache_dirty . qs + level ;
2013-03-02 02:45:51 +04:00
list_for_each_entry ( e , head , list ) {
nr + + ;
total + = e - > hit_count ;
if ( + + count > = MAX_TO_AVERAGE )
break ;
}
}
}
}
/*
* Whenever we use an entry we bump up it ' s hit counter , and push it to the
* back to it ' s current level .
*/
2015-02-20 16:49:45 +03:00
static void requeue ( struct mq_policy * mq , struct entry * e )
2013-03-02 02:45:51 +04:00
{
check_generation ( mq ) ;
del ( mq , e ) ;
push ( mq , e ) ;
}
/*
* Demote the least recently used entry from the cache to the pre_cache .
* Returns the new cache entry to use , and the old origin block it was
* mapped to .
*
* We drop the hit count on the demoted entry back to 1 to stop it bouncing
* straight back into the cache if it ' s subsequently hit . There are
* various options here , and more experimentation would be good :
*
* - just forget about the demoted entry completely ( ie . don ' t insert it
into the pre_cache ) .
* - divide the hit count rather that setting to some hard coded value .
* - set the hit count to a hard coded value other than 1 , eg , is it better
* if it goes in at level 2 ?
*/
2015-05-20 12:30:32 +03:00
static int demote_cblock ( struct mq_policy * mq ,
struct policy_locker * locker , dm_oblock_t * oblock )
2013-03-02 02:45:51 +04:00
{
2015-05-20 12:30:32 +03:00
struct entry * demoted = peek ( & mq - > cache_clean ) ;
2013-03-02 02:45:51 +04:00
2013-10-24 22:10:28 +04:00
if ( ! demoted )
/*
* We could get a block from mq - > cache_dirty , but that
* would add extra latency to the triggering bio as it
* waits for the writeback . Better to not promote this
* time and hope there ' s a clean block next time this block
* is hit .
*/
return - ENOSPC ;
2015-05-20 12:30:32 +03:00
if ( locker - > fn ( locker , demoted - > oblock ) )
/*
* We couldn ' t lock the demoted block .
*/
return - EBUSY ;
del ( mq , demoted ) ;
2013-03-02 02:45:51 +04:00
* oblock = demoted - > oblock ;
2013-11-09 15:12:51 +04:00
free_entry ( & mq - > cache_pool , demoted ) ;
/*
* We used to put the demoted block into the pre - cache , but I think
* it ' s simpler to just let it work it ' s way up from zero again .
* Stops blocks flickering in and out of the cache .
*/
2013-03-02 02:45:51 +04:00
2013-10-24 22:10:28 +04:00
return 0 ;
2013-03-02 02:45:51 +04:00
}
2014-10-22 17:30:58 +04:00
/*
* Entries in the pre_cache whose hit count passes the promotion
* threshold move to the cache proper . Working out the correct
* value for the promotion_threshold is crucial to this policy .
*/
static unsigned promote_threshold ( struct mq_policy * mq )
{
struct entry * e ;
if ( any_free_cblocks ( mq ) )
return 0 ;
e = peek ( & mq - > cache_clean ) ;
if ( e )
return e - > hit_count ;
e = peek ( & mq - > cache_dirty ) ;
if ( e )
return e - > hit_count + DISCOURAGE_DEMOTING_DIRTY_THRESHOLD ;
/* This should never happen */
return 0 ;
}
2013-03-02 02:45:51 +04:00
/*
* We modify the basic promotion_threshold depending on the specific io .
*
* If the origin block has been discarded then there ' s no cost to copy it
* to the cache .
*
* We bias towards reads , since they can be demoted at no cost if they
* haven ' t been dirtied .
*/
static unsigned adjusted_promote_threshold ( struct mq_policy * mq ,
bool discarded_oblock , int data_dir )
{
2013-10-24 22:10:28 +04:00
if ( data_dir = = READ )
2014-10-22 17:30:58 +04:00
return promote_threshold ( mq ) + mq - > read_promote_adjustment ;
2013-10-24 22:10:28 +04:00
if ( discarded_oblock & & ( any_free_cblocks ( mq ) | | any_clean_cblocks ( mq ) ) ) {
2013-03-02 02:45:51 +04:00
/*
* We don ' t need to do any copying at all , so give this a
2013-10-24 22:10:28 +04:00
* very low threshold .
2013-03-02 02:45:51 +04:00
*/
2013-12-09 16:53:05 +04:00
return mq - > discard_promote_adjustment ;
2013-10-24 22:10:28 +04:00
}
2013-03-02 02:45:51 +04:00
2014-10-22 17:30:58 +04:00
return promote_threshold ( mq ) + mq - > write_promote_adjustment ;
2013-03-02 02:45:51 +04:00
}
static bool should_promote ( struct mq_policy * mq , struct entry * e ,
bool discarded_oblock , int data_dir )
{
return e - > hit_count > =
adjusted_promote_threshold ( mq , discarded_oblock , data_dir ) ;
}
static int cache_entry_found ( struct mq_policy * mq ,
struct entry * e ,
struct policy_result * result )
{
2015-02-20 16:49:45 +03:00
requeue ( mq , e ) ;
2013-03-02 02:45:51 +04:00
2013-11-09 15:12:51 +04:00
if ( in_cache ( mq , e ) ) {
2013-03-02 02:45:51 +04:00
result - > op = POLICY_HIT ;
2013-11-09 15:12:51 +04:00
result - > cblock = infer_cblock ( & mq - > cache_pool , e ) ;
2013-03-02 02:45:51 +04:00
}
return 0 ;
}
/*
2013-10-24 22:10:28 +04:00
* Moves an entry from the pre_cache to the cache . The main work is
2013-03-02 02:45:51 +04:00
* finding which cache block to use .
*/
static int pre_cache_to_cache ( struct mq_policy * mq , struct entry * e ,
2015-05-20 12:30:32 +03:00
struct policy_locker * locker ,
2013-03-02 02:45:51 +04:00
struct policy_result * result )
{
2013-10-24 22:10:28 +04:00
int r ;
2013-11-09 15:12:51 +04:00
struct entry * new_e ;
2013-03-02 02:45:51 +04:00
2013-11-09 15:12:51 +04:00
/* Ensure there's a free cblock in the cache */
if ( epool_empty ( & mq - > cache_pool ) ) {
2013-03-02 02:45:51 +04:00
result - > op = POLICY_REPLACE ;
2015-05-20 12:30:32 +03:00
r = demote_cblock ( mq , locker , & result - > old_oblock ) ;
2013-10-24 22:10:28 +04:00
if ( r ) {
result - > op = POLICY_MISS ;
return 0 ;
}
2015-05-20 12:30:32 +03:00
2013-03-02 02:45:51 +04:00
} else
result - > op = POLICY_NEW ;
2013-11-09 15:12:51 +04:00
new_e = alloc_entry ( & mq - > cache_pool ) ;
BUG_ON ( ! new_e ) ;
new_e - > oblock = e - > oblock ;
new_e - > dirty = false ;
new_e - > hit_count = e - > hit_count ;
2013-03-02 02:45:51 +04:00
del ( mq , e ) ;
2013-11-09 15:12:51 +04:00
free_entry ( & mq - > pre_cache_pool , e ) ;
push ( mq , new_e ) ;
result - > cblock = infer_cblock ( & mq - > cache_pool , new_e ) ;
2013-03-02 02:45:51 +04:00
return 0 ;
}
static int pre_cache_entry_found ( struct mq_policy * mq , struct entry * e ,
bool can_migrate , bool discarded_oblock ,
2015-05-20 12:30:32 +03:00
int data_dir , struct policy_locker * locker ,
struct policy_result * result )
2013-03-02 02:45:51 +04:00
{
int r = 0 ;
2015-02-20 16:49:45 +03:00
if ( ! should_promote ( mq , e , discarded_oblock , data_dir ) ) {
requeue ( mq , e ) ;
2013-03-02 02:45:51 +04:00
result - > op = POLICY_MISS ;
2013-11-15 14:51:20 +04:00
} else if ( ! can_migrate )
2013-03-02 02:45:51 +04:00
r = - EWOULDBLOCK ;
2013-11-15 14:51:20 +04:00
else {
2015-02-20 16:49:45 +03:00
requeue ( mq , e ) ;
2015-05-20 12:30:32 +03:00
r = pre_cache_to_cache ( mq , e , locker , result ) ;
2013-11-15 14:51:20 +04:00
}
2013-03-02 02:45:51 +04:00
return r ;
}
static void insert_in_pre_cache ( struct mq_policy * mq ,
dm_oblock_t oblock )
{
2013-11-09 15:12:51 +04:00
struct entry * e = alloc_entry ( & mq - > pre_cache_pool ) ;
2013-03-02 02:45:51 +04:00
if ( ! e )
/*
* There ' s no spare entry structure , so we grab the least
* used one from the pre_cache .
*/
e = pop ( mq , & mq - > pre_cache ) ;
if ( unlikely ( ! e ) ) {
DMWARN ( " couldn't pop from pre cache " ) ;
return ;
}
2013-11-09 15:12:51 +04:00
e - > dirty = false ;
e - > oblock = oblock ;
e - > hit_count = 1 ;
push ( mq , e ) ;
2013-03-02 02:45:51 +04:00
}
static void insert_in_cache ( struct mq_policy * mq , dm_oblock_t oblock ,
2015-05-20 12:30:32 +03:00
struct policy_locker * locker ,
2013-03-02 02:45:51 +04:00
struct policy_result * result )
{
2013-10-24 22:10:28 +04:00
int r ;
2013-03-02 02:45:51 +04:00
struct entry * e ;
2013-11-09 15:12:51 +04:00
if ( epool_empty ( & mq - > cache_pool ) ) {
result - > op = POLICY_REPLACE ;
2015-05-20 12:30:32 +03:00
r = demote_cblock ( mq , locker , & result - > old_oblock ) ;
2013-10-24 22:10:28 +04:00
if ( unlikely ( r ) ) {
result - > op = POLICY_MISS ;
insert_in_pre_cache ( mq , oblock ) ;
return ;
}
2013-03-02 02:45:51 +04:00
2013-10-24 22:10:28 +04:00
/*
* This will always succeed , since we ' ve just demoted .
*/
2013-11-09 15:12:51 +04:00
e = alloc_entry ( & mq - > cache_pool ) ;
BUG_ON ( ! e ) ;
2013-10-24 22:10:28 +04:00
} else {
2013-11-09 15:12:51 +04:00
e = alloc_entry ( & mq - > cache_pool ) ;
2013-10-24 22:10:28 +04:00
result - > op = POLICY_NEW ;
2013-03-02 02:45:51 +04:00
}
e - > oblock = oblock ;
2013-10-24 22:10:28 +04:00
e - > dirty = false ;
2013-03-02 02:45:51 +04:00
e - > hit_count = 1 ;
push ( mq , e ) ;
2013-11-09 15:12:51 +04:00
result - > cblock = infer_cblock ( & mq - > cache_pool , e ) ;
2013-03-02 02:45:51 +04:00
}
static int no_entry_found ( struct mq_policy * mq , dm_oblock_t oblock ,
bool can_migrate , bool discarded_oblock ,
2015-05-20 12:30:32 +03:00
int data_dir , struct policy_locker * locker ,
struct policy_result * result )
2013-03-02 02:45:51 +04:00
{
2013-12-09 16:53:05 +04:00
if ( adjusted_promote_threshold ( mq , discarded_oblock , data_dir ) < = 1 ) {
2013-03-02 02:45:51 +04:00
if ( can_migrate )
2015-05-20 12:30:32 +03:00
insert_in_cache ( mq , oblock , locker , result ) ;
2013-03-02 02:45:51 +04:00
else
return - EWOULDBLOCK ;
} else {
insert_in_pre_cache ( mq , oblock ) ;
result - > op = POLICY_MISS ;
}
return 0 ;
}
/*
* Looks the oblock up in the hash table , then decides whether to put in
* pre_cache , or cache etc .
*/
static int map ( struct mq_policy * mq , dm_oblock_t oblock ,
bool can_migrate , bool discarded_oblock ,
2015-05-20 12:30:32 +03:00
int data_dir , struct policy_locker * locker ,
struct policy_result * result )
2013-03-02 02:45:51 +04:00
{
int r = 0 ;
struct entry * e = hash_lookup ( mq , oblock ) ;
2013-11-09 15:12:51 +04:00
if ( e & & in_cache ( mq , e ) )
2013-03-02 02:45:51 +04:00
r = cache_entry_found ( mq , e , result ) ;
2013-11-09 15:12:51 +04:00
2014-10-30 17:02:01 +03:00
else if ( mq - > tracker . thresholds [ PATTERN_SEQUENTIAL ] & &
iot_pattern ( & mq - > tracker ) = = PATTERN_SEQUENTIAL )
2013-03-02 02:45:51 +04:00
result - > op = POLICY_MISS ;
2013-11-09 15:12:51 +04:00
2013-03-02 02:45:51 +04:00
else if ( e )
r = pre_cache_entry_found ( mq , e , can_migrate , discarded_oblock ,
2015-05-20 12:30:32 +03:00
data_dir , locker , result ) ;
2013-11-09 15:12:51 +04:00
2013-03-02 02:45:51 +04:00
else
r = no_entry_found ( mq , oblock , can_migrate , discarded_oblock ,
2015-05-20 12:30:32 +03:00
data_dir , locker , result ) ;
2013-03-02 02:45:51 +04:00
if ( r = = - EWOULDBLOCK )
result - > op = POLICY_MISS ;
return r ;
}
/*----------------------------------------------------------------*/
/*
* Public interface , via the policy struct . See dm - cache - policy . h for a
* description of these .
*/
static struct mq_policy * to_mq_policy ( struct dm_cache_policy * p )
{
return container_of ( p , struct mq_policy , policy ) ;
}
static void mq_destroy ( struct dm_cache_policy * p )
{
struct mq_policy * mq = to_mq_policy ( p ) ;
2014-02-28 21:02:56 +04:00
vfree ( mq - > table ) ;
2013-11-09 15:12:51 +04:00
epool_exit ( & mq - > cache_pool ) ;
epool_exit ( & mq - > pre_cache_pool ) ;
2013-03-02 02:45:51 +04:00
kfree ( mq ) ;
}
2015-02-20 16:49:45 +03:00
static void update_pre_cache_hits ( struct list_head * h , void * context )
{
struct entry * e = container_of ( h , struct entry , list ) ;
e - > hit_count + + ;
}
static void update_cache_hits ( struct list_head * h , void * context )
{
struct mq_policy * mq = context ;
struct entry * e = container_of ( h , struct entry , list ) ;
e - > hit_count + + ;
mq - > hit_count + + ;
}
2013-03-02 02:45:51 +04:00
static void copy_tick ( struct mq_policy * mq )
{
2015-02-20 16:49:45 +03:00
unsigned long flags , tick ;
2013-03-02 02:45:51 +04:00
spin_lock_irqsave ( & mq - > tick_lock , flags ) ;
2015-02-20 16:49:45 +03:00
tick = mq - > tick_protected ;
if ( tick ! = mq - > tick ) {
queue_iterate_tick ( & mq - > pre_cache , update_pre_cache_hits , mq ) ;
queue_iterate_tick ( & mq - > cache_dirty , update_cache_hits , mq ) ;
queue_iterate_tick ( & mq - > cache_clean , update_cache_hits , mq ) ;
mq - > tick = tick ;
}
queue_tick ( & mq - > pre_cache ) ;
queue_tick ( & mq - > cache_dirty ) ;
queue_tick ( & mq - > cache_clean ) ;
2015-02-20 17:22:17 +03:00
queue_update_writeback_sentinels ( & mq - > cache_dirty ) ;
2013-03-02 02:45:51 +04:00
spin_unlock_irqrestore ( & mq - > tick_lock , flags ) ;
}
static int mq_map ( struct dm_cache_policy * p , dm_oblock_t oblock ,
bool can_block , bool can_migrate , bool discarded_oblock ,
2015-05-20 12:30:32 +03:00
struct bio * bio , struct policy_locker * locker ,
struct policy_result * result )
2013-03-02 02:45:51 +04:00
{
int r ;
struct mq_policy * mq = to_mq_policy ( p ) ;
result - > op = POLICY_MISS ;
if ( can_block )
mutex_lock ( & mq - > lock ) ;
else if ( ! mutex_trylock ( & mq - > lock ) )
return - EWOULDBLOCK ;
copy_tick ( mq ) ;
iot_examine_bio ( & mq - > tracker , bio ) ;
r = map ( mq , oblock , can_migrate , discarded_oblock ,
2015-05-20 12:30:32 +03:00
bio_data_dir ( bio ) , locker , result ) ;
2013-03-02 02:45:51 +04:00
mutex_unlock ( & mq - > lock ) ;
return r ;
}
static int mq_lookup ( struct dm_cache_policy * p , dm_oblock_t oblock , dm_cblock_t * cblock )
{
int r ;
struct mq_policy * mq = to_mq_policy ( p ) ;
struct entry * e ;
if ( ! mutex_trylock ( & mq - > lock ) )
return - EWOULDBLOCK ;
e = hash_lookup ( mq , oblock ) ;
2013-11-09 15:12:51 +04:00
if ( e & & in_cache ( mq , e ) ) {
* cblock = infer_cblock ( & mq - > cache_pool , e ) ;
2013-03-02 02:45:51 +04:00
r = 0 ;
} else
r = - ENOENT ;
mutex_unlock ( & mq - > lock ) ;
return r ;
}
2013-11-09 15:12:51 +04:00
static void __mq_set_clear_dirty ( struct mq_policy * mq , dm_oblock_t oblock , bool set )
2013-10-24 22:10:28 +04:00
{
struct entry * e ;
e = hash_lookup ( mq , oblock ) ;
2013-11-09 15:12:51 +04:00
BUG_ON ( ! e | | ! in_cache ( mq , e ) ) ;
2013-10-24 22:10:28 +04:00
2013-11-09 15:12:51 +04:00
del ( mq , e ) ;
e - > dirty = set ;
push ( mq , e ) ;
2013-10-24 22:10:28 +04:00
}
static void mq_set_dirty ( struct dm_cache_policy * p , dm_oblock_t oblock )
{
2013-11-09 15:12:51 +04:00
struct mq_policy * mq = to_mq_policy ( p ) ;
mutex_lock ( & mq - > lock ) ;
__mq_set_clear_dirty ( mq , oblock , true ) ;
mutex_unlock ( & mq - > lock ) ;
2013-10-24 22:10:28 +04:00
}
static void mq_clear_dirty ( struct dm_cache_policy * p , dm_oblock_t oblock )
{
2013-11-09 15:12:51 +04:00
struct mq_policy * mq = to_mq_policy ( p ) ;
mutex_lock ( & mq - > lock ) ;
__mq_set_clear_dirty ( mq , oblock , false ) ;
mutex_unlock ( & mq - > lock ) ;
2013-10-24 22:10:28 +04:00
}
2013-03-02 02:45:51 +04:00
static int mq_load_mapping ( struct dm_cache_policy * p ,
dm_oblock_t oblock , dm_cblock_t cblock ,
uint32_t hint , bool hint_valid )
{
struct mq_policy * mq = to_mq_policy ( p ) ;
struct entry * e ;
2013-11-09 15:12:51 +04:00
e = alloc_particular_entry ( & mq - > cache_pool , cblock ) ;
2013-03-02 02:45:51 +04:00
e - > oblock = oblock ;
2013-10-24 22:10:28 +04:00
e - > dirty = false ; /* this gets corrected in a minute */
2013-03-02 02:45:51 +04:00
e - > hit_count = hint_valid ? hint : 1 ;
push ( mq , e ) ;
return 0 ;
}
2013-11-09 15:12:51 +04:00
static int mq_save_hints ( struct mq_policy * mq , struct queue * q ,
policy_walk_fn fn , void * context )
{
int r ;
unsigned level ;
2015-02-20 16:49:45 +03:00
struct list_head * h ;
2013-11-09 15:12:51 +04:00
struct entry * e ;
for ( level = 0 ; level < NR_QUEUE_LEVELS ; level + + )
2015-02-20 16:49:45 +03:00
list_for_each ( h , q - > qs + level ) {
if ( is_sentinel ( q , h ) )
continue ;
e = container_of ( h , struct entry , list ) ;
2013-11-09 15:12:51 +04:00
r = fn ( context , infer_cblock ( & mq - > cache_pool , e ) ,
e - > oblock , e - > hit_count ) ;
if ( r )
return r ;
}
return 0 ;
}
2013-03-02 02:45:51 +04:00
static int mq_walk_mappings ( struct dm_cache_policy * p , policy_walk_fn fn ,
void * context )
{
struct mq_policy * mq = to_mq_policy ( p ) ;
int r = 0 ;
mutex_lock ( & mq - > lock ) ;
2013-11-09 15:12:51 +04:00
r = mq_save_hints ( mq , & mq - > cache_clean , fn , context ) ;
if ( ! r )
r = mq_save_hints ( mq , & mq - > cache_dirty , fn , context ) ;
2013-03-02 02:45:51 +04:00
mutex_unlock ( & mq - > lock ) ;
return r ;
}
2013-11-09 15:12:51 +04:00
static void __remove_mapping ( struct mq_policy * mq , dm_oblock_t oblock )
2013-03-02 02:45:51 +04:00
{
2013-07-26 11:57:31 +04:00
struct entry * e ;
e = hash_lookup ( mq , oblock ) ;
2013-11-09 15:12:51 +04:00
BUG_ON ( ! e | | ! in_cache ( mq , e ) ) ;
2013-03-02 02:45:51 +04:00
del ( mq , e ) ;
2013-11-09 15:12:51 +04:00
free_entry ( & mq - > cache_pool , e ) ;
}
static void mq_remove_mapping ( struct dm_cache_policy * p , dm_oblock_t oblock )
{
struct mq_policy * mq = to_mq_policy ( p ) ;
2013-03-02 02:45:51 +04:00
2013-11-09 15:12:51 +04:00
mutex_lock ( & mq - > lock ) ;
__remove_mapping ( mq , oblock ) ;
2013-03-02 02:45:51 +04:00
mutex_unlock ( & mq - > lock ) ;
}
2013-11-08 20:36:17 +04:00
static int __remove_cblock ( struct mq_policy * mq , dm_cblock_t cblock )
{
struct entry * e = epool_find ( & mq - > cache_pool , cblock ) ;
if ( ! e )
return - ENODATA ;
del ( mq , e ) ;
free_entry ( & mq - > cache_pool , e ) ;
return 0 ;
}
static int mq_remove_cblock ( struct dm_cache_policy * p , dm_cblock_t cblock )
{
int r ;
struct mq_policy * mq = to_mq_policy ( p ) ;
mutex_lock ( & mq - > lock ) ;
r = __remove_cblock ( mq , cblock ) ;
mutex_unlock ( & mq - > lock ) ;
return r ;
}
2015-02-20 17:22:17 +03:00
# define CLEAN_TARGET_PERCENTAGE 25
static bool clean_target_met ( struct mq_policy * mq )
{
/*
* Cache entries may not be populated . So we ' re cannot rely on the
* size of the clean queue .
*/
unsigned nr_clean = from_cblock ( mq - > cache_size ) - queue_size ( & mq - > cache_dirty ) ;
unsigned target = from_cblock ( mq - > cache_size ) * CLEAN_TARGET_PERCENTAGE / 100 ;
return nr_clean > = target ;
}
2013-10-24 22:10:28 +04:00
static int __mq_writeback_work ( struct mq_policy * mq , dm_oblock_t * oblock ,
dm_cblock_t * cblock )
{
2015-02-20 17:22:17 +03:00
struct entry * e = pop_old ( mq , & mq - > cache_dirty ) ;
if ( ! e & & ! clean_target_met ( mq ) )
e = pop ( mq , & mq - > cache_dirty ) ;
2013-10-24 22:10:28 +04:00
if ( ! e )
return - ENODATA ;
* oblock = e - > oblock ;
2013-11-09 15:12:51 +04:00
* cblock = infer_cblock ( & mq - > cache_pool , e ) ;
2013-10-24 22:10:28 +04:00
e - > dirty = false ;
push ( mq , e ) ;
return 0 ;
}
static int mq_writeback_work ( struct dm_cache_policy * p , dm_oblock_t * oblock ,
2015-05-15 17:20:09 +03:00
dm_cblock_t * cblock , bool critical_only )
2013-10-24 22:10:28 +04:00
{
int r ;
struct mq_policy * mq = to_mq_policy ( p ) ;
mutex_lock ( & mq - > lock ) ;
r = __mq_writeback_work ( mq , oblock , cblock ) ;
mutex_unlock ( & mq - > lock ) ;
return r ;
}
2013-11-09 15:12:51 +04:00
static void __force_mapping ( struct mq_policy * mq ,
dm_oblock_t current_oblock , dm_oblock_t new_oblock )
2013-03-02 02:45:51 +04:00
{
struct entry * e = hash_lookup ( mq , current_oblock ) ;
2013-11-09 15:12:51 +04:00
if ( e & & in_cache ( mq , e ) ) {
del ( mq , e ) ;
e - > oblock = new_oblock ;
e - > dirty = true ;
push ( mq , e ) ;
}
2013-03-02 02:45:51 +04:00
}
static void mq_force_mapping ( struct dm_cache_policy * p ,
dm_oblock_t current_oblock , dm_oblock_t new_oblock )
{
struct mq_policy * mq = to_mq_policy ( p ) ;
mutex_lock ( & mq - > lock ) ;
2013-11-09 15:12:51 +04:00
__force_mapping ( mq , current_oblock , new_oblock ) ;
2013-03-02 02:45:51 +04:00
mutex_unlock ( & mq - > lock ) ;
}
static dm_cblock_t mq_residency ( struct dm_cache_policy * p )
{
2013-10-21 14:44:57 +04:00
dm_cblock_t r ;
2013-03-02 02:45:51 +04:00
struct mq_policy * mq = to_mq_policy ( p ) ;
2013-10-21 14:44:57 +04:00
mutex_lock ( & mq - > lock ) ;
2013-11-09 15:12:51 +04:00
r = to_cblock ( mq - > cache_pool . nr_allocated ) ;
2013-10-21 14:44:57 +04:00
mutex_unlock ( & mq - > lock ) ;
return r ;
2013-03-02 02:45:51 +04:00
}
2015-05-29 12:20:56 +03:00
static void mq_tick ( struct dm_cache_policy * p , bool can_block )
2013-03-02 02:45:51 +04:00
{
struct mq_policy * mq = to_mq_policy ( p ) ;
unsigned long flags ;
spin_lock_irqsave ( & mq - > tick_lock , flags ) ;
mq - > tick_protected + + ;
spin_unlock_irqrestore ( & mq - > tick_lock , flags ) ;
2015-05-29 12:20:56 +03:00
if ( can_block ) {
mutex_lock ( & mq - > lock ) ;
copy_tick ( mq ) ;
mutex_unlock ( & mq - > lock ) ;
}
2013-03-02 02:45:51 +04:00
}
static int mq_set_config_value ( struct dm_cache_policy * p ,
const char * key , const char * value )
{
struct mq_policy * mq = to_mq_policy ( p ) ;
unsigned long tmp ;
if ( kstrtoul ( value , 10 , & tmp ) )
return - EINVAL ;
2013-12-09 16:53:05 +04:00
if ( ! strcasecmp ( key , " random_threshold " ) ) {
mq - > tracker . thresholds [ PATTERN_RANDOM ] = tmp ;
} else if ( ! strcasecmp ( key , " sequential_threshold " ) ) {
mq - > tracker . thresholds [ PATTERN_SEQUENTIAL ] = tmp ;
} else if ( ! strcasecmp ( key , " discard_promote_adjustment " ) )
mq - > discard_promote_adjustment = tmp ;
else if ( ! strcasecmp ( key , " read_promote_adjustment " ) )
mq - > read_promote_adjustment = tmp ;
else if ( ! strcasecmp ( key , " write_promote_adjustment " ) )
mq - > write_promote_adjustment = tmp ;
else
return - EINVAL ;
2013-03-02 02:45:51 +04:00
return 0 ;
}
2015-04-22 23:42:35 +03:00
static int mq_emit_config_values ( struct dm_cache_policy * p , char * result ,
unsigned maxlen , ssize_t * sz_ptr )
2013-03-02 02:45:51 +04:00
{
2015-04-22 23:42:35 +03:00
ssize_t sz = * sz_ptr ;
2013-03-02 02:45:51 +04:00
struct mq_policy * mq = to_mq_policy ( p ) ;
2013-12-09 16:53:05 +04:00
DMEMIT ( " 10 random_threshold %u "
" sequential_threshold %u "
" discard_promote_adjustment %u "
" read_promote_adjustment %u "
2015-04-22 23:42:35 +03:00
" write_promote_adjustment %u " ,
2013-03-02 02:45:51 +04:00
mq - > tracker . thresholds [ PATTERN_RANDOM ] ,
2013-12-09 16:53:05 +04:00
mq - > tracker . thresholds [ PATTERN_SEQUENTIAL ] ,
mq - > discard_promote_adjustment ,
mq - > read_promote_adjustment ,
mq - > write_promote_adjustment ) ;
2013-03-02 02:45:51 +04:00
2015-04-22 23:42:35 +03:00
* sz_ptr = sz ;
2013-03-02 02:45:51 +04:00
return 0 ;
}
/* Init the policy plugin interface function pointers. */
static void init_policy_functions ( struct mq_policy * mq )
{
mq - > policy . destroy = mq_destroy ;
mq - > policy . map = mq_map ;
mq - > policy . lookup = mq_lookup ;
2013-10-24 22:10:28 +04:00
mq - > policy . set_dirty = mq_set_dirty ;
mq - > policy . clear_dirty = mq_clear_dirty ;
2013-03-02 02:45:51 +04:00
mq - > policy . load_mapping = mq_load_mapping ;
mq - > policy . walk_mappings = mq_walk_mappings ;
mq - > policy . remove_mapping = mq_remove_mapping ;
2013-11-08 20:36:17 +04:00
mq - > policy . remove_cblock = mq_remove_cblock ;
2013-10-24 22:10:28 +04:00
mq - > policy . writeback_work = mq_writeback_work ;
2013-03-02 02:45:51 +04:00
mq - > policy . force_mapping = mq_force_mapping ;
mq - > policy . residency = mq_residency ;
mq - > policy . tick = mq_tick ;
mq - > policy . emit_config_values = mq_emit_config_values ;
mq - > policy . set_config_value = mq_set_config_value ;
}
static struct dm_cache_policy * mq_create ( dm_cblock_t cache_size ,
sector_t origin_size ,
sector_t cache_block_size )
{
struct mq_policy * mq = kzalloc ( sizeof ( * mq ) , GFP_KERNEL ) ;
if ( ! mq )
return NULL ;
init_policy_functions ( mq ) ;
iot_init ( & mq - > tracker , SEQUENTIAL_THRESHOLD_DEFAULT , RANDOM_THRESHOLD_DEFAULT ) ;
mq - > cache_size = cache_size ;
2013-11-09 15:12:51 +04:00
if ( epool_init ( & mq - > pre_cache_pool , from_cblock ( cache_size ) ) ) {
DMERR ( " couldn't initialize pool of pre-cache entries " ) ;
goto bad_pre_cache_init ;
}
if ( epool_init ( & mq - > cache_pool , from_cblock ( cache_size ) ) ) {
DMERR ( " couldn't initialize pool of cache entries " ) ;
goto bad_cache_init ;
}
2013-03-02 02:45:51 +04:00
mq - > tick_protected = 0 ;
mq - > tick = 0 ;
mq - > hit_count = 0 ;
mq - > generation = 0 ;
2013-12-09 16:53:05 +04:00
mq - > discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT ;
mq - > read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT ;
mq - > write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT ;
2013-03-02 02:45:51 +04:00
mutex_init ( & mq - > lock ) ;
spin_lock_init ( & mq - > tick_lock ) ;
queue_init ( & mq - > pre_cache ) ;
2013-10-24 22:10:28 +04:00
queue_init ( & mq - > cache_clean ) ;
queue_init ( & mq - > cache_dirty ) ;
2013-03-02 02:45:51 +04:00
mq - > generation_period = max ( ( unsigned ) from_cblock ( cache_size ) , 1024U ) ;
mq - > nr_buckets = next_power ( from_cblock ( cache_size ) / 2 , 16 ) ;
2015-10-02 18:21:24 +03:00
mq - > hash_bits = __ffs ( mq - > nr_buckets ) ;
2014-02-28 21:02:56 +04:00
mq - > table = vzalloc ( sizeof ( * mq - > table ) * mq - > nr_buckets ) ;
2013-03-02 02:45:51 +04:00
if ( ! mq - > table )
goto bad_alloc_table ;
return & mq - > policy ;
bad_alloc_table :
2013-11-09 15:12:51 +04:00
epool_exit ( & mq - > cache_pool ) ;
bad_cache_init :
epool_exit ( & mq - > pre_cache_pool ) ;
bad_pre_cache_init :
2013-03-02 02:45:51 +04:00
kfree ( mq ) ;
return NULL ;
}
/*----------------------------------------------------------------*/
static struct dm_cache_policy_type mq_policy_type = {
. name = " mq " ,
2015-06-17 18:43:38 +03:00
. version = { 1 , 4 , 0 } ,
2013-03-02 02:45:51 +04:00
. hint_size = 4 ,
. owner = THIS_MODULE ,
. create = mq_create
} ;
static int __init mq_init ( void )
{
int r ;
mq_entry_cache = kmem_cache_create ( " dm_mq_policy_cache_entry " ,
sizeof ( struct entry ) ,
__alignof__ ( struct entry ) ,
0 , NULL ) ;
if ( ! mq_entry_cache )
2015-06-17 18:43:38 +03:00
return - ENOMEM ;
2013-03-02 02:45:51 +04:00
r = dm_cache_policy_register ( & mq_policy_type ) ;
if ( r ) {
DMERR ( " register failed %d " , r ) ;
2015-06-17 18:43:38 +03:00
kmem_cache_destroy ( mq_entry_cache ) ;
return - ENOMEM ;
2013-03-02 02:45:51 +04:00
}
2015-06-17 18:43:38 +03:00
return 0 ;
2013-03-02 02:45:51 +04:00
}
static void __exit mq_exit ( void )
{
dm_cache_policy_unregister ( & mq_policy_type ) ;
kmem_cache_destroy ( mq_entry_cache ) ;
}
module_init ( mq_init ) ;
module_exit ( mq_exit ) ;
MODULE_AUTHOR ( " Joe Thornber <dm-devel@redhat.com> " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " mq cache policy " ) ;