2013-08-28 04:17:58 +04:00
/*
* Copyright ( c ) 2013 Red Hat , Inc . and Parallels Inc . All rights reserved .
* Authors : David Chinner and Glauber Costa
*
* Generic LRU infrastructure
*/
# include <linux/kernel.h>
# include <linux/module.h>
2013-08-28 04:18:00 +04:00
# include <linux/mm.h>
2013-08-28 04:17:58 +04:00
# include <linux/list_lru.h>
bool list_lru_add ( struct list_lru * lru , struct list_head * item )
{
2013-08-28 04:18:00 +04:00
int nid = page_to_nid ( virt_to_page ( item ) ) ;
struct list_lru_node * nlru = & lru - > node [ nid ] ;
spin_lock ( & nlru - > lock ) ;
WARN_ON_ONCE ( nlru - > nr_items < 0 ) ;
2013-08-28 04:17:58 +04:00
if ( list_empty ( item ) ) {
2013-08-28 04:18:00 +04:00
list_add_tail ( item , & nlru - > list ) ;
if ( nlru - > nr_items + + = = 0 )
node_set ( nid , lru - > active_nodes ) ;
spin_unlock ( & nlru - > lock ) ;
2013-08-28 04:17:58 +04:00
return true ;
}
2013-08-28 04:18:00 +04:00
spin_unlock ( & nlru - > lock ) ;
2013-08-28 04:17:58 +04:00
return false ;
}
EXPORT_SYMBOL_GPL ( list_lru_add ) ;
bool list_lru_del ( struct list_lru * lru , struct list_head * item )
{
2013-08-28 04:18:00 +04:00
int nid = page_to_nid ( virt_to_page ( item ) ) ;
struct list_lru_node * nlru = & lru - > node [ nid ] ;
spin_lock ( & nlru - > lock ) ;
2013-08-28 04:17:58 +04:00
if ( ! list_empty ( item ) ) {
list_del_init ( item ) ;
2013-08-28 04:18:00 +04:00
if ( - - nlru - > nr_items = = 0 )
node_clear ( nid , lru - > active_nodes ) ;
WARN_ON_ONCE ( nlru - > nr_items < 0 ) ;
spin_unlock ( & nlru - > lock ) ;
2013-08-28 04:17:58 +04:00
return true ;
}
2013-08-28 04:18:00 +04:00
spin_unlock ( & nlru - > lock ) ;
2013-08-28 04:17:58 +04:00
return false ;
}
EXPORT_SYMBOL_GPL ( list_lru_del ) ;
2013-08-28 04:18:00 +04:00
unsigned long list_lru_count ( struct list_lru * lru )
2013-08-28 04:17:58 +04:00
{
2013-08-28 04:18:00 +04:00
unsigned long count = 0 ;
int nid ;
for_each_node_mask ( nid , lru - > active_nodes ) {
struct list_lru_node * nlru = & lru - > node [ nid ] ;
spin_lock ( & nlru - > lock ) ;
WARN_ON_ONCE ( nlru - > nr_items < 0 ) ;
count + = nlru - > nr_items ;
spin_unlock ( & nlru - > lock ) ;
}
return count ;
}
EXPORT_SYMBOL_GPL ( list_lru_count ) ;
static unsigned long
list_lru_walk_node ( struct list_lru * lru , int nid , list_lru_walk_cb isolate ,
void * cb_arg , unsigned long * nr_to_walk )
{
struct list_lru_node * nlru = & lru - > node [ nid ] ;
2013-08-28 04:17:58 +04:00
struct list_head * item , * n ;
2013-08-28 04:18:00 +04:00
unsigned long isolated = 0 ;
2013-08-28 04:17:58 +04:00
/*
* If we don ' t keep state of at which pass we are , we can loop at
* LRU_RETRY , since we have no guarantees that the caller will be able
* to do something other than retry on the next pass . We handle this by
* allowing at most one retry per object . This should not be altered
* by any condition other than LRU_RETRY .
*/
bool first_pass = true ;
2013-08-28 04:18:00 +04:00
spin_lock ( & nlru - > lock ) ;
2013-08-28 04:17:58 +04:00
restart :
2013-08-28 04:18:00 +04:00
list_for_each_safe ( item , n , & nlru - > list ) {
2013-08-28 04:17:58 +04:00
enum lru_status ret ;
2013-08-28 04:18:00 +04:00
ret = isolate ( item , & nlru - > lock , cb_arg ) ;
2013-08-28 04:17:58 +04:00
switch ( ret ) {
case LRU_REMOVED :
2013-08-28 04:18:00 +04:00
if ( - - nlru - > nr_items = = 0 )
node_clear ( nid , lru - > active_nodes ) ;
WARN_ON_ONCE ( nlru - > nr_items < 0 ) ;
isolated + + ;
2013-08-28 04:17:58 +04:00
break ;
case LRU_ROTATE :
2013-08-28 04:18:00 +04:00
list_move_tail ( item , & nlru - > list ) ;
2013-08-28 04:17:58 +04:00
break ;
case LRU_SKIP :
break ;
case LRU_RETRY :
if ( ! first_pass ) {
first_pass = true ;
break ;
}
first_pass = false ;
goto restart ;
default :
BUG ( ) ;
}
2013-08-28 04:18:00 +04:00
if ( ( * nr_to_walk ) - - = = 0 )
2013-08-28 04:17:58 +04:00
break ;
}
2013-08-28 04:18:00 +04:00
spin_unlock ( & nlru - > lock ) ;
return isolated ;
}
EXPORT_SYMBOL_GPL ( list_lru_walk_node ) ;
unsigned long list_lru_walk ( struct list_lru * lru , list_lru_walk_cb isolate ,
void * cb_arg , unsigned long nr_to_walk )
{
unsigned long isolated = 0 ;
int nid ;
for_each_node_mask ( nid , lru - > active_nodes ) {
isolated + = list_lru_walk_node ( lru , nid , isolate ,
cb_arg , & nr_to_walk ) ;
if ( nr_to_walk < = 0 )
break ;
}
return isolated ;
2013-08-28 04:17:58 +04:00
}
EXPORT_SYMBOL_GPL ( list_lru_walk ) ;
2013-08-28 04:18:00 +04:00
static unsigned long list_lru_dispose_all_node ( struct list_lru * lru , int nid ,
list_lru_dispose_cb dispose )
2013-08-28 04:17:58 +04:00
{
2013-08-28 04:18:00 +04:00
struct list_lru_node * nlru = & lru - > node [ nid ] ;
2013-08-28 04:17:58 +04:00
LIST_HEAD ( dispose_list ) ;
2013-08-28 04:18:00 +04:00
unsigned long disposed = 0 ;
2013-08-28 04:17:58 +04:00
2013-08-28 04:18:00 +04:00
spin_lock ( & nlru - > lock ) ;
while ( ! list_empty ( & nlru - > list ) ) {
list_splice_init ( & nlru - > list , & dispose_list ) ;
disposed + = nlru - > nr_items ;
nlru - > nr_items = 0 ;
node_clear ( nid , lru - > active_nodes ) ;
spin_unlock ( & nlru - > lock ) ;
2013-08-28 04:17:58 +04:00
dispose ( & dispose_list ) ;
2013-08-28 04:18:00 +04:00
spin_lock ( & nlru - > lock ) ;
2013-08-28 04:17:58 +04:00
}
2013-08-28 04:18:00 +04:00
spin_unlock ( & nlru - > lock ) ;
2013-08-28 04:17:58 +04:00
return disposed ;
}
2013-08-28 04:18:00 +04:00
unsigned long list_lru_dispose_all ( struct list_lru * lru ,
list_lru_dispose_cb dispose )
{
unsigned long disposed ;
unsigned long total = 0 ;
int nid ;
do {
disposed = 0 ;
for_each_node_mask ( nid , lru - > active_nodes ) {
disposed + = list_lru_dispose_all_node ( lru , nid ,
dispose ) ;
}
total + = disposed ;
} while ( disposed ! = 0 ) ;
return total ;
}
2013-08-28 04:17:58 +04:00
int list_lru_init ( struct list_lru * lru )
{
2013-08-28 04:18:00 +04:00
int i ;
2013-08-28 04:17:58 +04:00
2013-08-28 04:18:00 +04:00
nodes_clear ( lru - > active_nodes ) ;
for ( i = 0 ; i < MAX_NUMNODES ; i + + ) {
spin_lock_init ( & lru - > node [ i ] . lock ) ;
INIT_LIST_HEAD ( & lru - > node [ i ] . list ) ;
lru - > node [ i ] . nr_items = 0 ;
}
2013-08-28 04:17:58 +04:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( list_lru_init ) ;