2007-05-07 01:49:36 +04:00
/*
* SLUB : A slab allocator that limits cache line use instead of queuing
* objects in per cpu and per node lists .
*
* The allocator synchronizes using per slab locks and only
* uses a centralized lock to manage a pool of partial slabs .
*
2008-07-04 20:59:22 +04:00
* ( C ) 2007 SGI , Christoph Lameter
2007-05-07 01:49:36 +04:00
*/
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/bit_spinlock.h>
# include <linux/interrupt.h>
# include <linux/bitops.h>
# include <linux/slab.h>
2008-10-06 02:42:17 +04:00
# include <linux/proc_fs.h>
2007-05-07 01:49:36 +04:00
# include <linux/seq_file.h>
# include <linux/cpu.h>
# include <linux/cpuset.h>
# include <linux/mempolicy.h>
# include <linux/ctype.h>
2008-04-30 11:55:01 +04:00
# include <linux/debugobjects.h>
2007-05-07 01:49:36 +04:00
# include <linux/kallsyms.h>
2007-10-22 03:41:37 +04:00
# include <linux/memory.h>
2008-05-01 15:34:31 +04:00
# include <linux/math64.h>
2008-12-23 13:37:01 +03:00
# include <linux/fault-inject.h>
2007-05-07 01:49:36 +04:00
/*
* Lock order :
* 1. slab_lock ( page )
* 2. slab - > list_lock
*
* The slab_lock protects operations on the object of a particular
* slab and its metadata in the page struct . If the slab lock
* has been taken then no allocations nor frees can be performed
* on the objects in the slab nor can the slab be added or removed
* from the partial or full lists since this would mean modifying
* the page_struct of the slab .
*
* The list_lock protects the partial and full list on each node and
* the partial slab counter . If taken then no new slabs may be added or
* removed from the lists nor make the number of partial slabs be modified .
* ( Note that the total number of slabs is an atomic value that may be
* modified without taking the list lock ) .
*
* The list_lock is a centralized lock and thus we avoid taking it as
* much as possible . As long as SLUB does not have to handle partial
* slabs , operations can continue without any centralized lock . F . e .
* allocating a long series of objects that fill up slabs does not require
* the list lock .
*
* The lock order is sometimes inverted when we are trying to get a slab
* off a list . We take the list_lock and then look for a page on the list
* to use . While we do that objects in the slabs may be freed . We can
* only operate on the slab if we have also taken the slab_lock . So we use
* a slab_trylock ( ) on the slab . If trylock was successful then no frees
* can occur anymore and we can use the slab for allocations etc . If the
* slab_trylock ( ) does not succeed then frees are in progress in the slab and
* we must stay away from it for a while since we may cause a bouncing
* cacheline if we try to acquire the lock . So go onto the next slab .
* If all pages are busy then we may allocate a new slab instead of reusing
* a partial slab . A new slab has noone operating on it and thus there is
* no danger of cacheline contention .
*
* Interrupts are disabled during allocation and deallocation in order to
* make the slab allocator safe to use in the context of an irq . In addition
* interrupts are disabled to ensure that the processor does not change
* while handling per_cpu slabs , due to kernel preemption .
*
* SLUB assigns one slab for allocation to each processor .
* Allocations only occur from these slabs called cpu slabs .
*
2007-05-09 13:32:39 +04:00
* Slabs with free elements are kept on a partial list and during regular
* operations no list for full slabs is used . If an object in a full slab is
2007-05-07 01:49:36 +04:00
* freed then the slab will show up again on the partial lists .
2007-05-09 13:32:39 +04:00
* We track full slabs for debugging purposes though because otherwise we
* cannot scan all objects .
2007-05-07 01:49:36 +04:00
*
* Slabs are freed when they become empty . Teardown and setup is
* minimal so we rely on the page allocators per cpu caches for
* fast frees and allocs .
*
* Overloading of page flags that are otherwise used for LRU management .
*
2007-05-17 09:10:53 +04:00
* PageActive The slab is frozen and exempt from list processing .
* This means that the slab is dedicated to a purpose
* such as satisfying allocations for a specific
* processor . Objects may be freed in the slab while
* it is frozen but slab_free will then skip the usual
* list operations . It is up to the processor holding
* the slab to integrate the slab into the slab lists
* when the slab is no longer needed .
*
* One use of this flag is to mark slabs that are
* used for allocations . Then such a slab becomes a cpu
* slab . The cpu slab may be equipped with an additional
2007-10-16 12:26:05 +04:00
* freelist that allows lockless access to
2007-05-10 14:15:16 +04:00
* free objects in addition to the regular freelist
* that requires the slab lock .
2007-05-07 01:49:36 +04:00
*
* PageError Slab requires special handling due to debug
* options set . This moves slab handling out of
2007-05-10 14:15:16 +04:00
* the fast path and disables lockless freelists .
2007-05-07 01:49:36 +04:00
*/
2007-05-17 09:10:56 +04:00
# ifdef CONFIG_SLUB_DEBUG
2008-07-24 08:27:18 +04:00
# define SLABDEBUG 1
2007-05-17 09:10:56 +04:00
# else
# define SLABDEBUG 0
# endif
2007-05-07 01:49:36 +04:00
/*
* Issues still to be resolved :
*
* - Support PAGE_ALLOC_DEBUG . Should be easy to do .
*
* - Variable sizing of the per node arrays
*/
/* Enable to test recovery from slab corruption on boot */
# undef SLUB_RESILIENCY_TEST
2007-05-07 01:49:46 +04:00
/*
* Mininum number of partial slabs . These will be left on the partial
* lists even if they are empty . kmem_cache_shrink may reclaim them .
*/
2007-12-22 01:37:37 +03:00
# define MIN_PARTIAL 5
2007-05-07 01:49:44 +04:00
2007-05-07 01:49:46 +04:00
/*
* Maximum number of desirable partial slabs .
* The existence of more partial slabs makes kmem_cache_shrink
* sort the partial list by the number of objects in the .
*/
# define MAX_PARTIAL 10
2007-05-07 01:49:36 +04:00
# define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_STORE_USER )
2007-05-09 13:32:39 +04:00
2007-05-07 01:49:36 +04:00
/*
* Set of flags that will prevent slab merging
*/
# define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_DESTROY_BY_RCU )
# define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
SLAB_CACHE_DMA )
# ifndef ARCH_KMALLOC_MINALIGN
2007-05-07 01:49:37 +04:00
# define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
2007-05-07 01:49:36 +04:00
# endif
# ifndef ARCH_SLAB_MINALIGN
2007-05-07 01:49:37 +04:00
# define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
2007-05-07 01:49:36 +04:00
# endif
2008-10-22 23:00:38 +04:00
# define OO_SHIFT 16
# define OO_MASK ((1 << OO_SHIFT) - 1)
# define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
2007-05-07 01:49:36 +04:00
/* Internal SLUB flags */
2007-08-08 02:11:48 +04:00
# define __OBJECT_POISON 0x80000000 /* Poison object */
# define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
2007-05-07 01:49:36 +04:00
static int kmem_size = sizeof ( struct kmem_cache ) ;
# ifdef CONFIG_SMP
static struct notifier_block slab_notifier ;
# endif
static enum {
DOWN , /* No slab functionality available */
PARTIAL , /* kmem_cache_open() works but kmalloc does not */
2007-05-09 13:32:39 +04:00
UP , /* Everything works but does not show up in sysfs */
2007-05-07 01:49:36 +04:00
SYSFS /* Sysfs up */
} slab_state = DOWN ;
/* A list of all slab caches on the system */
static DECLARE_RWSEM ( slub_lock ) ;
2007-07-17 15:03:27 +04:00
static LIST_HEAD ( slab_caches ) ;
2007-05-07 01:49:36 +04:00
2007-05-09 13:32:43 +04:00
/*
* Tracking user of a slab .
*/
struct track {
2008-08-19 21:43:25 +04:00
unsigned long addr ; /* Called from address */
2007-05-09 13:32:43 +04:00
int cpu ; /* Was running on cpu */
int pid ; /* Pid context */
unsigned long when ; /* When did the operation occur */
} ;
enum track_item { TRACK_ALLOC , TRACK_FREE } ;
2008-04-30 03:16:06 +04:00
# ifdef CONFIG_SLUB_DEBUG
2007-05-07 01:49:36 +04:00
static int sysfs_slab_add ( struct kmem_cache * ) ;
static int sysfs_slab_alias ( struct kmem_cache * , const char * ) ;
static void sysfs_slab_remove ( struct kmem_cache * ) ;
2008-02-08 04:47:41 +03:00
2007-05-07 01:49:36 +04:00
# else
2007-07-17 15:03:24 +04:00
static inline int sysfs_slab_add ( struct kmem_cache * s ) { return 0 ; }
static inline int sysfs_slab_alias ( struct kmem_cache * s , const char * p )
{ return 0 ; }
2008-01-08 09:29:05 +03:00
static inline void sysfs_slab_remove ( struct kmem_cache * s )
{
kfree ( s ) ;
}
2008-02-08 04:47:41 +03:00
2007-05-07 01:49:36 +04:00
# endif
2008-02-08 04:47:41 +03:00
static inline void stat ( struct kmem_cache_cpu * c , enum stat_item si )
{
# ifdef CONFIG_SLUB_STATS
c - > stat [ si ] + + ;
# endif
}
2007-05-07 01:49:36 +04:00
/********************************************************************
* Core slab cache functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
int slab_is_available ( void )
{
return slab_state > = UP ;
}
static inline struct kmem_cache_node * get_node ( struct kmem_cache * s , int node )
{
# ifdef CONFIG_NUMA
return s - > node [ node ] ;
# else
return & s - > local_node ;
# endif
}
2007-10-16 12:26:05 +04:00
static inline struct kmem_cache_cpu * get_cpu_slab ( struct kmem_cache * s , int cpu )
{
2007-10-16 12:26:08 +04:00
# ifdef CONFIG_SMP
return s - > cpu_slab [ cpu ] ;
# else
return & s - > cpu_slab ;
# endif
2007-10-16 12:26:05 +04:00
}
2008-02-16 10:45:26 +03:00
/* Verify that a pointer has an address that is valid within a slab page */
2007-05-09 13:32:43 +04:00
static inline int check_valid_pointer ( struct kmem_cache * s ,
struct page * page , const void * object )
{
void * base ;
2008-03-02 00:40:44 +03:00
if ( ! object )
2007-05-09 13:32:43 +04:00
return 1 ;
2008-03-02 00:40:44 +03:00
base = page_address ( page ) ;
2008-04-14 20:11:30 +04:00
if ( object < base | | object > = base + page - > objects * s - > size | |
2007-05-09 13:32:43 +04:00
( object - base ) % s - > size ) {
return 0 ;
}
return 1 ;
}
2007-05-09 13:32:40 +04:00
/*
* Slow version of get and set free pointer .
*
* This version requires touching the cache lines of kmem_cache which
* we avoid to do in the fast alloc free paths . There we obtain the offset
* from the page struct .
*/
static inline void * get_freepointer ( struct kmem_cache * s , void * object )
{
return * ( void * * ) ( object + s - > offset ) ;
}
static inline void set_freepointer ( struct kmem_cache * s , void * object , void * fp )
{
* ( void * * ) ( object + s - > offset ) = fp ;
}
/* Loop over all objects in a slab */
2008-04-14 20:11:31 +04:00
# define for_each_object(__p, __s, __addr, __objects) \
for ( __p = ( __addr ) ; __p < ( __addr ) + ( __objects ) * ( __s ) - > size ; \
2007-05-09 13:32:40 +04:00
__p + = ( __s ) - > size )
/* Scan freelist */
# define for_each_free_object(__p, __s, __free) \
2008-03-02 00:40:44 +03:00
for ( __p = ( __free ) ; __p ; __p = get_freepointer ( ( __s ) , __p ) )
2007-05-09 13:32:40 +04:00
/* Determine object index from a given position */
static inline int slab_index ( void * p , struct kmem_cache * s , void * addr )
{
return ( p - addr ) / s - > size ;
}
2008-04-14 20:11:31 +04:00
static inline struct kmem_cache_order_objects oo_make ( int order ,
unsigned long size )
{
struct kmem_cache_order_objects x = {
2008-10-22 23:00:38 +04:00
( order < < OO_SHIFT ) + ( PAGE_SIZE < < order ) / size
2008-04-14 20:11:31 +04:00
} ;
return x ;
}
static inline int oo_order ( struct kmem_cache_order_objects x )
{
2008-10-22 23:00:38 +04:00
return x . x > > OO_SHIFT ;
2008-04-14 20:11:31 +04:00
}
static inline int oo_objects ( struct kmem_cache_order_objects x )
{
2008-10-22 23:00:38 +04:00
return x . x & OO_MASK ;
2008-04-14 20:11:31 +04:00
}
2007-05-09 13:32:44 +04:00
# ifdef CONFIG_SLUB_DEBUG
/*
* Debug settings :
*/
2007-07-16 10:38:14 +04:00
# ifdef CONFIG_SLUB_DEBUG_ON
static int slub_debug = DEBUG_DEFAULT_FLAGS ;
# else
2007-05-09 13:32:44 +04:00
static int slub_debug ;
2007-07-16 10:38:14 +04:00
# endif
2007-05-09 13:32:44 +04:00
static char * slub_debug_slabs ;
2007-05-07 01:49:36 +04:00
/*
* Object debugging
*/
static void print_section ( char * text , u8 * addr , unsigned int length )
{
int i , offset ;
int newline = 1 ;
char ascii [ 17 ] ;
ascii [ 16 ] = 0 ;
for ( i = 0 ; i < length ; i + + ) {
if ( newline ) {
2007-07-17 15:03:18 +04:00
printk ( KERN_ERR " %8s 0x%p: " , text , addr + i ) ;
2007-05-07 01:49:36 +04:00
newline = 0 ;
}
2008-01-08 10:20:27 +03:00
printk ( KERN_CONT " %02x " , addr [ i ] ) ;
2007-05-07 01:49:36 +04:00
offset = i % 16 ;
ascii [ offset ] = isgraph ( addr [ i ] ) ? addr [ i ] : ' . ' ;
if ( offset = = 15 ) {
2008-01-08 10:20:27 +03:00
printk ( KERN_CONT " %s \n " , ascii ) ;
2007-05-07 01:49:36 +04:00
newline = 1 ;
}
}
if ( ! newline ) {
i % = 16 ;
while ( i < 16 ) {
2008-01-08 10:20:27 +03:00
printk ( KERN_CONT " " ) ;
2007-05-07 01:49:36 +04:00
ascii [ i ] = ' ' ;
i + + ;
}
2008-01-08 10:20:27 +03:00
printk ( KERN_CONT " %s \n " , ascii ) ;
2007-05-07 01:49:36 +04:00
}
}
static struct track * get_track ( struct kmem_cache * s , void * object ,
enum track_item alloc )
{
struct track * p ;
if ( s - > offset )
p = object + s - > offset + sizeof ( void * ) ;
else
p = object + s - > inuse ;
return p + alloc ;
}
static void set_track ( struct kmem_cache * s , void * object ,
2008-08-19 21:43:25 +04:00
enum track_item alloc , unsigned long addr )
2007-05-07 01:49:36 +04:00
{
struct track * p ;
if ( s - > offset )
p = object + s - > offset + sizeof ( void * ) ;
else
p = object + s - > inuse ;
p + = alloc ;
if ( addr ) {
p - > addr = addr ;
p - > cpu = smp_processor_id ( ) ;
2008-06-23 02:58:37 +04:00
p - > pid = current - > pid ;
2007-05-07 01:49:36 +04:00
p - > when = jiffies ;
} else
memset ( p , 0 , sizeof ( struct track ) ) ;
}
static void init_tracking ( struct kmem_cache * s , void * object )
{
2007-07-17 15:03:18 +04:00
if ( ! ( s - > flags & SLAB_STORE_USER ) )
return ;
2008-08-19 21:43:25 +04:00
set_track ( s , object , TRACK_FREE , 0UL ) ;
set_track ( s , object , TRACK_ALLOC , 0UL ) ;
2007-05-07 01:49:36 +04:00
}
static void print_track ( const char * s , struct track * t )
{
if ( ! t - > addr )
return ;
2008-07-14 23:12:53 +04:00
printk ( KERN_ERR " INFO: %s in %pS age=%lu cpu=%u pid=%d \n " ,
2008-08-19 21:43:25 +04:00
s , ( void * ) t - > addr , jiffies - t - > when , t - > cpu , t - > pid ) ;
2007-07-17 15:03:18 +04:00
}
static void print_tracking ( struct kmem_cache * s , void * object )
{
if ( ! ( s - > flags & SLAB_STORE_USER ) )
return ;
print_track ( " Allocated " , get_track ( s , object , TRACK_ALLOC ) ) ;
print_track ( " Freed " , get_track ( s , object , TRACK_FREE ) ) ;
}
static void print_page_info ( struct page * page )
{
2008-04-14 20:11:30 +04:00
printk ( KERN_ERR " INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx \n " ,
page , page - > objects , page - > inuse , page - > freelist , page - > flags ) ;
2007-07-17 15:03:18 +04:00
}
static void slab_bug ( struct kmem_cache * s , char * fmt , . . . )
{
va_list args ;
char buf [ 100 ] ;
va_start ( args , fmt ) ;
vsnprintf ( buf , sizeof ( buf ) , fmt , args ) ;
va_end ( args ) ;
printk ( KERN_ERR " ======================================== "
" ===================================== \n " ) ;
printk ( KERN_ERR " BUG %s: %s \n " , s - > name , buf ) ;
printk ( KERN_ERR " ---------------------------------------- "
" ------------------------------------- \n \n " ) ;
2007-05-07 01:49:36 +04:00
}
2007-07-17 15:03:18 +04:00
static void slab_fix ( struct kmem_cache * s , char * fmt , . . . )
{
va_list args ;
char buf [ 100 ] ;
va_start ( args , fmt ) ;
vsnprintf ( buf , sizeof ( buf ) , fmt , args ) ;
va_end ( args ) ;
printk ( KERN_ERR " FIX %s: %s \n " , s - > name , buf ) ;
}
static void print_trailer ( struct kmem_cache * s , struct page * page , u8 * p )
2007-05-07 01:49:36 +04:00
{
unsigned int off ; /* Offset of last byte */
2008-03-02 00:40:44 +03:00
u8 * addr = page_address ( page ) ;
2007-07-17 15:03:18 +04:00
print_tracking ( s , p ) ;
print_page_info ( page ) ;
printk ( KERN_ERR " INFO: Object 0x%p @offset=%tu fp=0x%p \n \n " ,
p , p - addr , get_freepointer ( s , p ) ) ;
if ( p > addr + 16 )
print_section ( " Bytes b4 " , p - 16 , 16 ) ;
2008-07-19 15:17:22 +04:00
print_section ( " Object " , p , min_t ( unsigned long , s - > objsize , PAGE_SIZE ) ) ;
2007-05-07 01:49:36 +04:00
if ( s - > flags & SLAB_RED_ZONE )
print_section ( " Redzone " , p + s - > objsize ,
s - > inuse - s - > objsize ) ;
if ( s - > offset )
off = s - > offset + sizeof ( void * ) ;
else
off = s - > inuse ;
2007-07-17 15:03:18 +04:00
if ( s - > flags & SLAB_STORE_USER )
2007-05-07 01:49:36 +04:00
off + = 2 * sizeof ( struct track ) ;
if ( off ! = s - > size )
/* Beginning of the filler is the free pointer */
2007-07-17 15:03:18 +04:00
print_section ( " Padding " , p + off , s - > size - off ) ;
dump_stack ( ) ;
2007-05-07 01:49:36 +04:00
}
static void object_err ( struct kmem_cache * s , struct page * page ,
u8 * object , char * reason )
{
2008-04-23 23:28:01 +04:00
slab_bug ( s , " %s " , reason ) ;
2007-07-17 15:03:18 +04:00
print_trailer ( s , page , object ) ;
2007-05-07 01:49:36 +04:00
}
2007-07-17 15:03:18 +04:00
static void slab_err ( struct kmem_cache * s , struct page * page , char * fmt , . . . )
2007-05-07 01:49:36 +04:00
{
va_list args ;
char buf [ 100 ] ;
2007-07-17 15:03:18 +04:00
va_start ( args , fmt ) ;
vsnprintf ( buf , sizeof ( buf ) , fmt , args ) ;
2007-05-07 01:49:36 +04:00
va_end ( args ) ;
2008-04-23 23:28:01 +04:00
slab_bug ( s , " %s " , buf ) ;
2007-07-17 15:03:18 +04:00
print_page_info ( page ) ;
2007-05-07 01:49:36 +04:00
dump_stack ( ) ;
}
static void init_object ( struct kmem_cache * s , void * object , int active )
{
u8 * p = object ;
if ( s - > flags & __OBJECT_POISON ) {
memset ( p , POISON_FREE , s - > objsize - 1 ) ;
2008-01-08 10:20:27 +03:00
p [ s - > objsize - 1 ] = POISON_END ;
2007-05-07 01:49:36 +04:00
}
if ( s - > flags & SLAB_RED_ZONE )
memset ( p + s - > objsize ,
active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE ,
s - > inuse - s - > objsize ) ;
}
2007-07-17 15:03:18 +04:00
static u8 * check_bytes ( u8 * start , unsigned int value , unsigned int bytes )
2007-05-07 01:49:36 +04:00
{
while ( bytes ) {
if ( * start ! = ( u8 ) value )
2007-07-17 15:03:18 +04:00
return start ;
2007-05-07 01:49:36 +04:00
start + + ;
bytes - - ;
}
2007-07-17 15:03:18 +04:00
return NULL ;
}
static void restore_bytes ( struct kmem_cache * s , char * message , u8 data ,
void * from , void * to )
{
slab_fix ( s , " Restoring 0x%p-0x%p=0x%x \n " , from , to - 1 , data ) ;
memset ( from , data , to - from ) ;
}
static int check_bytes_and_report ( struct kmem_cache * s , struct page * page ,
u8 * object , char * what ,
2008-01-08 10:20:27 +03:00
u8 * start , unsigned int value , unsigned int bytes )
2007-07-17 15:03:18 +04:00
{
u8 * fault ;
u8 * end ;
fault = check_bytes ( start , value , bytes ) ;
if ( ! fault )
return 1 ;
end = start + bytes ;
while ( end > fault & & end [ - 1 ] = = value )
end - - ;
slab_bug ( s , " %s overwritten " , what ) ;
printk ( KERN_ERR " INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x \n " ,
fault , end - 1 , fault [ 0 ] , value ) ;
print_trailer ( s , page , object ) ;
restore_bytes ( s , what , value , fault , end ) ;
return 0 ;
2007-05-07 01:49:36 +04:00
}
/*
* Object layout :
*
* object address
* Bytes of the object to be managed .
* If the freepointer may overlay the object then the free
* pointer is the first word of the object .
2007-05-09 13:32:39 +04:00
*
2007-05-07 01:49:36 +04:00
* Poisoning uses 0x6b ( POISON_FREE ) and the last byte is
* 0xa5 ( POISON_END )
*
* object + s - > objsize
* Padding to reach word boundary . This is also used for Redzoning .
2007-05-09 13:32:39 +04:00
* Padding is extended by another word if Redzoning is enabled and
* objsize = = inuse .
*
2007-05-07 01:49:36 +04:00
* We fill with 0xbb ( RED_INACTIVE ) for inactive objects and with
* 0xcc ( RED_ACTIVE ) for objects in use .
*
* object + s - > inuse
2007-05-09 13:32:39 +04:00
* Meta data starts here .
*
2007-05-07 01:49:36 +04:00
* A . Free pointer ( if we cannot overwrite object on free )
* B . Tracking data for SLAB_STORE_USER
2007-05-09 13:32:39 +04:00
* C . Padding to reach required alignment boundary or at mininum
2008-02-16 10:45:26 +03:00
* one word if debugging is on to be able to detect writes
2007-05-09 13:32:39 +04:00
* before the word boundary .
*
* Padding is done using 0x5a ( POISON_INUSE )
2007-05-07 01:49:36 +04:00
*
* object + s - > size
2007-05-09 13:32:39 +04:00
* Nothing is used beyond s - > size .
2007-05-07 01:49:36 +04:00
*
2007-05-09 13:32:39 +04:00
* If slabcaches are merged then the objsize and inuse boundaries are mostly
* ignored . And therefore no slab options that rely on these boundaries
2007-05-07 01:49:36 +04:00
* may be used with merged slabcaches .
*/
static int check_pad_bytes ( struct kmem_cache * s , struct page * page , u8 * p )
{
unsigned long off = s - > inuse ; /* The end of info */
if ( s - > offset )
/* Freepointer is placed after the object. */
off + = sizeof ( void * ) ;
if ( s - > flags & SLAB_STORE_USER )
/* We also have user information there */
off + = 2 * sizeof ( struct track ) ;
if ( s - > size = = off )
return 1 ;
2007-07-17 15:03:18 +04:00
return check_bytes_and_report ( s , page , p , " Object padding " ,
p + off , POISON_INUSE , s - > size - off ) ;
2007-05-07 01:49:36 +04:00
}
2008-04-14 20:11:30 +04:00
/* Check the pad bytes at the end of a slab page */
2007-05-07 01:49:36 +04:00
static int slab_pad_check ( struct kmem_cache * s , struct page * page )
{
2007-07-17 15:03:18 +04:00
u8 * start ;
u8 * fault ;
u8 * end ;
int length ;
int remainder ;
2007-05-07 01:49:36 +04:00
if ( ! ( s - > flags & SLAB_POISON ) )
return 1 ;
2008-03-02 00:40:44 +03:00
start = page_address ( page ) ;
2008-04-14 20:11:31 +04:00
length = ( PAGE_SIZE < < compound_order ( page ) ) ;
2008-04-14 20:11:30 +04:00
end = start + length ;
remainder = length % s - > size ;
2007-05-07 01:49:36 +04:00
if ( ! remainder )
return 1 ;
2008-04-14 20:11:30 +04:00
fault = check_bytes ( end - remainder , POISON_INUSE , remainder ) ;
2007-07-17 15:03:18 +04:00
if ( ! fault )
return 1 ;
while ( end > fault & & end [ - 1 ] = = POISON_INUSE )
end - - ;
slab_err ( s , page , " Padding overwritten. 0x%p-0x%p " , fault , end - 1 ) ;
2008-04-14 20:11:30 +04:00
print_section ( " Padding " , end - remainder , remainder ) ;
2007-07-17 15:03:18 +04:00
restore_bytes ( s , " slab padding " , POISON_INUSE , start , end ) ;
return 0 ;
2007-05-07 01:49:36 +04:00
}
static int check_object ( struct kmem_cache * s , struct page * page ,
void * object , int active )
{
u8 * p = object ;
u8 * endobject = object + s - > objsize ;
if ( s - > flags & SLAB_RED_ZONE ) {
unsigned int red =
active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE ;
2007-07-17 15:03:18 +04:00
if ( ! check_bytes_and_report ( s , page , object , " Redzone " ,
endobject , red , s - > inuse - s - > objsize ) )
2007-05-07 01:49:36 +04:00
return 0 ;
} else {
2008-02-06 04:57:39 +03:00
if ( ( s - > flags & SLAB_POISON ) & & s - > objsize < s - > inuse ) {
check_bytes_and_report ( s , page , p , " Alignment padding " ,
endobject , POISON_INUSE , s - > inuse - s - > objsize ) ;
}
2007-05-07 01:49:36 +04:00
}
if ( s - > flags & SLAB_POISON ) {
if ( ! active & & ( s - > flags & __OBJECT_POISON ) & &
2007-07-17 15:03:18 +04:00
( ! check_bytes_and_report ( s , page , p , " Poison " , p ,
POISON_FREE , s - > objsize - 1 ) | |
! check_bytes_and_report ( s , page , p , " Poison " ,
2008-01-08 10:20:27 +03:00
p + s - > objsize - 1 , POISON_END , 1 ) ) )
2007-05-07 01:49:36 +04:00
return 0 ;
/*
* check_pad_bytes cleans up on its own .
*/
check_pad_bytes ( s , page , p ) ;
}
if ( ! s - > offset & & active )
/*
* Object and freepointer overlap . Cannot check
* freepointer while object is allocated .
*/
return 1 ;
/* Check free pointer validity */
if ( ! check_valid_pointer ( s , page , get_freepointer ( s , p ) ) ) {
object_err ( s , page , p , " Freepointer corrupt " ) ;
/*
2008-12-05 06:08:08 +03:00
* No choice but to zap it and thus lose the remainder
2007-05-07 01:49:36 +04:00
* of the free objects in this slab . May cause
2007-05-09 13:32:39 +04:00
* another error because the object count is now wrong .
2007-05-07 01:49:36 +04:00
*/
2008-03-02 00:40:44 +03:00
set_freepointer ( s , p , NULL ) ;
2007-05-07 01:49:36 +04:00
return 0 ;
}
return 1 ;
}
static int check_slab ( struct kmem_cache * s , struct page * page )
{
2008-04-14 20:11:30 +04:00
int maxobj ;
2007-05-07 01:49:36 +04:00
VM_BUG_ON ( ! irqs_disabled ( ) ) ;
if ( ! PageSlab ( page ) ) {
2007-07-17 15:03:18 +04:00
slab_err ( s , page , " Not a valid slab page " ) ;
2007-05-07 01:49:36 +04:00
return 0 ;
}
2008-04-14 20:11:30 +04:00
maxobj = ( PAGE_SIZE < < compound_order ( page ) ) / s - > size ;
if ( page - > objects > maxobj ) {
slab_err ( s , page , " objects %u > max %u " ,
s - > name , page - > objects , maxobj ) ;
return 0 ;
}
if ( page - > inuse > page - > objects ) {
2007-07-17 15:03:18 +04:00
slab_err ( s , page , " inuse %u > max %u " ,
2008-04-14 20:11:30 +04:00
s - > name , page - > inuse , page - > objects ) ;
2007-05-07 01:49:36 +04:00
return 0 ;
}
/* Slab_pad_check fixes things up after itself */
slab_pad_check ( s , page ) ;
return 1 ;
}
/*
2007-05-09 13:32:39 +04:00
* Determine if a certain object on a page is on the freelist . Must hold the
* slab lock to guarantee that the chains are in a consistent state .
2007-05-07 01:49:36 +04:00
*/
static int on_freelist ( struct kmem_cache * s , struct page * page , void * search )
{
int nr = 0 ;
void * fp = page - > freelist ;
void * object = NULL ;
2008-04-14 20:11:31 +04:00
unsigned long max_objects ;
2007-05-07 01:49:36 +04:00
2008-04-14 20:11:30 +04:00
while ( fp & & nr < = page - > objects ) {
2007-05-07 01:49:36 +04:00
if ( fp = = search )
return 1 ;
if ( ! check_valid_pointer ( s , page , fp ) ) {
if ( object ) {
object_err ( s , page , object ,
" Freechain corrupt " ) ;
2008-03-02 00:40:44 +03:00
set_freepointer ( s , object , NULL ) ;
2007-05-07 01:49:36 +04:00
break ;
} else {
2007-07-17 15:03:18 +04:00
slab_err ( s , page , " Freepointer corrupt " ) ;
2008-03-02 00:40:44 +03:00
page - > freelist = NULL ;
2008-04-14 20:11:30 +04:00
page - > inuse = page - > objects ;
2007-07-17 15:03:18 +04:00
slab_fix ( s , " Freelist cleared " ) ;
2007-05-07 01:49:36 +04:00
return 0 ;
}
break ;
}
object = fp ;
fp = get_freepointer ( s , object ) ;
nr + + ;
}
2008-04-14 20:11:31 +04:00
max_objects = ( PAGE_SIZE < < compound_order ( page ) ) / s - > size ;
2008-10-22 23:00:38 +04:00
if ( max_objects > MAX_OBJS_PER_PAGE )
max_objects = MAX_OBJS_PER_PAGE ;
2008-04-14 20:11:31 +04:00
if ( page - > objects ! = max_objects ) {
slab_err ( s , page , " Wrong number of objects. Found %d but "
" should be %d " , page - > objects , max_objects ) ;
page - > objects = max_objects ;
slab_fix ( s , " Number of objects adjusted. " ) ;
}
2008-04-14 20:11:30 +04:00
if ( page - > inuse ! = page - > objects - nr ) {
2007-05-07 01:49:47 +04:00
slab_err ( s , page , " Wrong object count. Counter is %d but "
2008-04-14 20:11:30 +04:00
" counted were %d " , page - > inuse , page - > objects - nr ) ;
page - > inuse = page - > objects - nr ;
2007-07-17 15:03:18 +04:00
slab_fix ( s , " Object count adjusted. " ) ;
2007-05-07 01:49:36 +04:00
}
return search = = NULL ;
}
2008-04-30 03:11:12 +04:00
static void trace ( struct kmem_cache * s , struct page * page , void * object ,
int alloc )
2007-05-17 09:11:00 +04:00
{
if ( s - > flags & SLAB_TRACE ) {
printk ( KERN_INFO " TRACE %s %s 0x%p inuse=%d fp=0x%p \n " ,
s - > name ,
alloc ? " alloc " : " free " ,
object , page - > inuse ,
page - > freelist ) ;
if ( ! alloc )
print_section ( " Object " , ( void * ) object , s - > objsize ) ;
dump_stack ( ) ;
}
}
2007-05-07 01:49:42 +04:00
/*
2007-05-09 13:32:39 +04:00
* Tracking of fully allocated slabs for debugging purposes .
2007-05-07 01:49:42 +04:00
*/
2007-05-07 01:49:44 +04:00
static void add_full ( struct kmem_cache_node * n , struct page * page )
2007-05-07 01:49:42 +04:00
{
spin_lock ( & n - > list_lock ) ;
list_add ( & page - > lru , & n - > full ) ;
spin_unlock ( & n - > list_lock ) ;
}
static void remove_full ( struct kmem_cache * s , struct page * page )
{
struct kmem_cache_node * n ;
if ( ! ( s - > flags & SLAB_STORE_USER ) )
return ;
n = get_node ( s , page_to_nid ( page ) ) ;
spin_lock ( & n - > list_lock ) ;
list_del ( & page - > lru ) ;
spin_unlock ( & n - > list_lock ) ;
}
2008-04-14 19:53:02 +04:00
/* Tracking of the number of slabs for debugging purposes */
static inline unsigned long slabs_node ( struct kmem_cache * s , int node )
{
struct kmem_cache_node * n = get_node ( s , node ) ;
return atomic_long_read ( & n - > nr_slabs ) ;
}
2008-04-14 20:11:40 +04:00
static inline void inc_slabs_node ( struct kmem_cache * s , int node , int objects )
2008-04-14 19:53:02 +04:00
{
struct kmem_cache_node * n = get_node ( s , node ) ;
/*
* May be called early in order to allocate a slab for the
* kmem_cache_node structure . Solve the chicken - egg
* dilemma by deferring the increment of the count during
* bootstrap ( see early_kmem_cache_node_alloc ) .
*/
2008-04-14 20:11:40 +04:00
if ( ! NUMA_BUILD | | n ) {
2008-04-14 19:53:02 +04:00
atomic_long_inc ( & n - > nr_slabs ) ;
2008-04-14 20:11:40 +04:00
atomic_long_add ( objects , & n - > total_objects ) ;
}
2008-04-14 19:53:02 +04:00
}
2008-04-14 20:11:40 +04:00
static inline void dec_slabs_node ( struct kmem_cache * s , int node , int objects )
2008-04-14 19:53:02 +04:00
{
struct kmem_cache_node * n = get_node ( s , node ) ;
atomic_long_dec ( & n - > nr_slabs ) ;
2008-04-14 20:11:40 +04:00
atomic_long_sub ( objects , & n - > total_objects ) ;
2008-04-14 19:53:02 +04:00
}
/* Object debug checks for alloc/free paths */
2007-05-17 09:11:00 +04:00
static void setup_object_debug ( struct kmem_cache * s , struct page * page ,
void * object )
{
if ( ! ( s - > flags & ( SLAB_STORE_USER | SLAB_RED_ZONE | __OBJECT_POISON ) ) )
return ;
init_object ( s , object , 0 ) ;
init_tracking ( s , object ) ;
}
static int alloc_debug_processing ( struct kmem_cache * s , struct page * page ,
2008-08-19 21:43:25 +04:00
void * object , unsigned long addr )
2007-05-07 01:49:36 +04:00
{
if ( ! check_slab ( s , page ) )
goto bad ;
2008-02-16 10:45:24 +03:00
if ( ! on_freelist ( s , page , object ) ) {
2007-07-17 15:03:18 +04:00
object_err ( s , page , object , " Object already allocated " ) ;
2007-05-07 01:49:47 +04:00
goto bad ;
2007-05-07 01:49:36 +04:00
}
if ( ! check_valid_pointer ( s , page , object ) ) {
object_err ( s , page , object , " Freelist Pointer check fails " ) ;
2007-05-07 01:49:47 +04:00
goto bad ;
2007-05-07 01:49:36 +04:00
}
2008-02-16 10:45:24 +03:00
if ( ! check_object ( s , page , object , 0 ) )
2007-05-07 01:49:36 +04:00
goto bad ;
2007-05-17 09:11:00 +04:00
/* Success perform special debug activities for allocs */
if ( s - > flags & SLAB_STORE_USER )
set_track ( s , object , TRACK_ALLOC , addr ) ;
trace ( s , page , object , 1 ) ;
init_object ( s , object , 1 ) ;
2007-05-07 01:49:36 +04:00
return 1 ;
2007-05-17 09:11:00 +04:00
2007-05-07 01:49:36 +04:00
bad :
if ( PageSlab ( page ) ) {
/*
* If this is a slab page then lets do the best we can
* to avoid issues in the future . Marking all objects
2007-05-09 13:32:39 +04:00
* as used avoids touching the remaining objects .
2007-05-07 01:49:36 +04:00
*/
2007-07-17 15:03:18 +04:00
slab_fix ( s , " Marking all objects used " ) ;
2008-04-14 20:11:30 +04:00
page - > inuse = page - > objects ;
2008-03-02 00:40:44 +03:00
page - > freelist = NULL ;
2007-05-07 01:49:36 +04:00
}
return 0 ;
}
2007-05-17 09:11:00 +04:00
static int free_debug_processing ( struct kmem_cache * s , struct page * page ,
2008-08-19 21:43:25 +04:00
void * object , unsigned long addr )
2007-05-07 01:49:36 +04:00
{
if ( ! check_slab ( s , page ) )
goto fail ;
if ( ! check_valid_pointer ( s , page , object ) ) {
2007-05-07 01:49:47 +04:00
slab_err ( s , page , " Invalid object pointer 0x%p " , object ) ;
2007-05-07 01:49:36 +04:00
goto fail ;
}
if ( on_freelist ( s , page , object ) ) {
2007-07-17 15:03:18 +04:00
object_err ( s , page , object , " Object already free " ) ;
2007-05-07 01:49:36 +04:00
goto fail ;
}
if ( ! check_object ( s , page , object , 1 ) )
return 0 ;
if ( unlikely ( s ! = page - > slab ) ) {
2008-02-06 04:57:39 +03:00
if ( ! PageSlab ( page ) ) {
2007-05-07 01:49:47 +04:00
slab_err ( s , page , " Attempt to free object(0x%p) "
" outside of slab " , object ) ;
2008-02-06 04:57:39 +03:00
} else if ( ! page - > slab ) {
2007-05-07 01:49:36 +04:00
printk ( KERN_ERR
2007-05-07 01:49:47 +04:00
" SLUB <none>: no slab for object 0x%p. \n " ,
2007-05-07 01:49:36 +04:00
object ) ;
2007-05-07 01:49:47 +04:00
dump_stack ( ) ;
2008-01-08 10:20:27 +03:00
} else
2007-07-17 15:03:18 +04:00
object_err ( s , page , object ,
" page slab pointer corrupt. " ) ;
2007-05-07 01:49:36 +04:00
goto fail ;
}
2007-05-17 09:11:00 +04:00
/* Special debug activities for freeing objects */
2008-07-24 08:27:18 +04:00
if ( ! PageSlubFrozen ( page ) & & ! page - > freelist )
2007-05-17 09:11:00 +04:00
remove_full ( s , page ) ;
if ( s - > flags & SLAB_STORE_USER )
set_track ( s , object , TRACK_FREE , addr ) ;
trace ( s , page , object , 0 ) ;
init_object ( s , object , 0 ) ;
2007-05-07 01:49:36 +04:00
return 1 ;
2007-05-17 09:11:00 +04:00
2007-05-07 01:49:36 +04:00
fail :
2007-07-17 15:03:18 +04:00
slab_fix ( s , " Object at 0x%p not freed " , object ) ;
2007-05-07 01:49:36 +04:00
return 0 ;
}
2007-05-09 13:32:44 +04:00
static int __init setup_slub_debug ( char * str )
{
2007-07-16 10:38:14 +04:00
slub_debug = DEBUG_DEFAULT_FLAGS ;
if ( * str + + ! = ' = ' | | ! * str )
/*
* No options specified . Switch on full debugging .
*/
goto out ;
if ( * str = = ' , ' )
/*
* No options but restriction on slabs . This means full
* debugging for slabs matching a pattern .
*/
goto check_slabs ;
slub_debug = 0 ;
if ( * str = = ' - ' )
/*
* Switch off all debugging measures .
*/
goto out ;
/*
* Determine which debug features should be switched on
*/
2008-01-08 10:20:27 +03:00
for ( ; * str & & * str ! = ' , ' ; str + + ) {
2007-07-16 10:38:14 +04:00
switch ( tolower ( * str ) ) {
case ' f ' :
slub_debug | = SLAB_DEBUG_FREE ;
break ;
case ' z ' :
slub_debug | = SLAB_RED_ZONE ;
break ;
case ' p ' :
slub_debug | = SLAB_POISON ;
break ;
case ' u ' :
slub_debug | = SLAB_STORE_USER ;
break ;
case ' t ' :
slub_debug | = SLAB_TRACE ;
break ;
default :
printk ( KERN_ERR " slub_debug option '%c' "
2008-01-08 10:20:27 +03:00
" unknown. skipped \n " , * str ) ;
2007-07-16 10:38:14 +04:00
}
2007-05-09 13:32:44 +04:00
}
2007-07-16 10:38:14 +04:00
check_slabs :
2007-05-09 13:32:44 +04:00
if ( * str = = ' , ' )
slub_debug_slabs = str + 1 ;
2007-07-16 10:38:14 +04:00
out :
2007-05-09 13:32:44 +04:00
return 1 ;
}
__setup ( " slub_debug " , setup_slub_debug ) ;
2007-09-12 02:24:11 +04:00
static unsigned long kmem_cache_flags ( unsigned long objsize ,
unsigned long flags , const char * name ,
2008-07-26 06:45:34 +04:00
void ( * ctor ) ( void * ) )
2007-05-09 13:32:44 +04:00
{
/*
2008-02-16 10:45:24 +03:00
* Enable debugging if selected on the kernel commandline .
2007-05-09 13:32:44 +04:00
*/
2008-02-16 10:45:24 +03:00
if ( slub_debug & & ( ! slub_debug_slabs | |
strncmp ( slub_debug_slabs , name , strlen ( slub_debug_slabs ) ) = = 0 ) )
flags | = slub_debug ;
2007-09-12 02:24:11 +04:00
return flags ;
2007-05-09 13:32:44 +04:00
}
# else
2007-05-17 09:11:00 +04:00
static inline void setup_object_debug ( struct kmem_cache * s ,
struct page * page , void * object ) { }
2007-05-09 13:32:44 +04:00
2007-05-17 09:11:00 +04:00
static inline int alloc_debug_processing ( struct kmem_cache * s ,
2008-08-19 21:43:25 +04:00
struct page * page , void * object , unsigned long addr ) { return 0 ; }
2007-05-09 13:32:44 +04:00
2007-05-17 09:11:00 +04:00
static inline int free_debug_processing ( struct kmem_cache * s ,
2008-08-19 21:43:25 +04:00
struct page * page , void * object , unsigned long addr ) { return 0 ; }
2007-05-09 13:32:44 +04:00
static inline int slab_pad_check ( struct kmem_cache * s , struct page * page )
{ return 1 ; }
static inline int check_object ( struct kmem_cache * s , struct page * page ,
void * object , int active ) { return 1 ; }
2007-05-17 09:11:00 +04:00
static inline void add_full ( struct kmem_cache_node * n , struct page * page ) { }
2007-09-12 02:24:11 +04:00
static inline unsigned long kmem_cache_flags ( unsigned long objsize ,
unsigned long flags , const char * name ,
2008-07-26 06:45:34 +04:00
void ( * ctor ) ( void * ) )
2007-09-12 02:24:11 +04:00
{
return flags ;
}
2007-05-09 13:32:44 +04:00
# define slub_debug 0
2008-04-14 19:53:02 +04:00
static inline unsigned long slabs_node ( struct kmem_cache * s , int node )
{ return 0 ; }
2008-04-14 20:11:40 +04:00
static inline void inc_slabs_node ( struct kmem_cache * s , int node ,
int objects ) { }
static inline void dec_slabs_node ( struct kmem_cache * s , int node ,
int objects ) { }
2007-05-09 13:32:44 +04:00
# endif
2008-04-14 20:11:40 +04:00
2007-05-07 01:49:36 +04:00
/*
* Slab allocation and freeing
*/
2008-04-14 20:11:40 +04:00
static inline struct page * alloc_slab_page ( gfp_t flags , int node ,
struct kmem_cache_order_objects oo )
{
int order = oo_order ( oo ) ;
if ( node = = - 1 )
return alloc_pages ( flags , order ) ;
else
return alloc_pages_node ( node , flags , order ) ;
}
2007-05-07 01:49:36 +04:00
static struct page * allocate_slab ( struct kmem_cache * s , gfp_t flags , int node )
{
2008-01-08 10:20:27 +03:00
struct page * page ;
2008-04-14 20:11:31 +04:00
struct kmem_cache_order_objects oo = s - > oo ;
2007-05-07 01:49:36 +04:00
2008-02-15 01:21:32 +03:00
flags | = s - > allocflags ;
2007-10-16 12:25:52 +04:00
2008-04-14 20:11:40 +04:00
page = alloc_slab_page ( flags | __GFP_NOWARN | __GFP_NORETRY , node ,
oo ) ;
if ( unlikely ( ! page ) ) {
oo = s - > min ;
/*
* Allocation may have failed due to fragmentation .
* Try a lower order alloc if possible
*/
page = alloc_slab_page ( flags , node , oo ) ;
if ( ! page )
return NULL ;
2007-05-07 01:49:36 +04:00
2008-04-14 20:11:40 +04:00
stat ( get_cpu_slab ( s , raw_smp_processor_id ( ) ) , ORDER_FALLBACK ) ;
}
2008-04-14 20:11:31 +04:00
page - > objects = oo_objects ( oo ) ;
2007-05-07 01:49:36 +04:00
mod_zone_page_state ( page_zone ( page ) ,
( s - > flags & SLAB_RECLAIM_ACCOUNT ) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE ,
2008-04-14 20:11:40 +04:00
1 < < oo_order ( oo ) ) ;
2007-05-07 01:49:36 +04:00
return page ;
}
static void setup_object ( struct kmem_cache * s , struct page * page ,
void * object )
{
2007-05-17 09:11:00 +04:00
setup_object_debug ( s , page , object ) ;
2007-05-07 01:50:17 +04:00
if ( unlikely ( s - > ctor ) )
2008-07-26 06:45:34 +04:00
s - > ctor ( object ) ;
2007-05-07 01:49:36 +04:00
}
static struct page * new_slab ( struct kmem_cache * s , gfp_t flags , int node )
{
struct page * page ;
void * start ;
void * last ;
void * p ;
2007-10-16 12:25:41 +04:00
BUG_ON ( flags & GFP_SLAB_BUG_MASK ) ;
2007-05-07 01:49:36 +04:00
2007-10-16 12:25:41 +04:00
page = allocate_slab ( s ,
flags & ( GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK ) , node ) ;
2007-05-07 01:49:36 +04:00
if ( ! page )
goto out ;
2008-04-14 20:11:40 +04:00
inc_slabs_node ( s , page_to_nid ( page ) , page - > objects ) ;
2007-05-07 01:49:36 +04:00
page - > slab = s ;
page - > flags | = 1 < < PG_slab ;
if ( s - > flags & ( SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
SLAB_STORE_USER | SLAB_TRACE ) )
2008-07-24 08:27:18 +04:00
__SetPageSlubDebug ( page ) ;
2007-05-07 01:49:36 +04:00
start = page_address ( page ) ;
if ( unlikely ( s - > flags & SLAB_POISON ) )
2008-04-14 20:11:31 +04:00
memset ( start , POISON_INUSE , PAGE_SIZE < < compound_order ( page ) ) ;
2007-05-07 01:49:36 +04:00
last = start ;
2008-04-14 20:11:31 +04:00
for_each_object ( p , s , start , page - > objects ) {
2007-05-07 01:49:36 +04:00
setup_object ( s , page , last ) ;
set_freepointer ( s , last , p ) ;
last = p ;
}
setup_object ( s , page , last ) ;
2008-03-02 00:40:44 +03:00
set_freepointer ( s , last , NULL ) ;
2007-05-07 01:49:36 +04:00
page - > freelist = start ;
page - > inuse = 0 ;
out :
return page ;
}
static void __free_slab ( struct kmem_cache * s , struct page * page )
{
2008-04-14 20:11:31 +04:00
int order = compound_order ( page ) ;
int pages = 1 < < order ;
2007-05-07 01:49:36 +04:00
2008-07-24 08:27:18 +04:00
if ( unlikely ( SLABDEBUG & & PageSlubDebug ( page ) ) ) {
2007-05-07 01:49:36 +04:00
void * p ;
slab_pad_check ( s , page ) ;
2008-04-14 20:11:31 +04:00
for_each_object ( p , s , page_address ( page ) ,
page - > objects )
2007-05-07 01:49:36 +04:00
check_object ( s , page , p , 0 ) ;
2008-07-24 08:27:18 +04:00
__ClearPageSlubDebug ( page ) ;
2007-05-07 01:49:36 +04:00
}
mod_zone_page_state ( page_zone ( page ) ,
( s - > flags & SLAB_RECLAIM_ACCOUNT ) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE ,
2008-01-08 10:20:27 +03:00
- pages ) ;
2007-05-07 01:49:36 +04:00
2008-04-14 19:52:18 +04:00
__ClearPageSlab ( page ) ;
reset_page_mapcount ( page ) ;
2008-04-14 20:11:31 +04:00
__free_pages ( page , order ) ;
2007-05-07 01:49:36 +04:00
}
static void rcu_free_slab ( struct rcu_head * h )
{
struct page * page ;
page = container_of ( ( struct list_head * ) h , struct page , lru ) ;
__free_slab ( page - > slab , page ) ;
}
static void free_slab ( struct kmem_cache * s , struct page * page )
{
if ( unlikely ( s - > flags & SLAB_DESTROY_BY_RCU ) ) {
/*
* RCU free overloads the RCU head over the LRU
*/
struct rcu_head * head = ( void * ) & page - > lru ;
call_rcu ( head , rcu_free_slab ) ;
} else
__free_slab ( s , page ) ;
}
static void discard_slab ( struct kmem_cache * s , struct page * page )
{
2008-04-14 20:11:40 +04:00
dec_slabs_node ( s , page_to_nid ( page ) , page - > objects ) ;
2007-05-07 01:49:36 +04:00
free_slab ( s , page ) ;
}
/*
* Per slab locking using the pagelock
*/
static __always_inline void slab_lock ( struct page * page )
{
bit_spin_lock ( PG_locked , & page - > flags ) ;
}
static __always_inline void slab_unlock ( struct page * page )
{
2008-01-08 10:20:27 +03:00
__bit_spin_unlock ( PG_locked , & page - > flags ) ;
2007-05-07 01:49:36 +04:00
}
static __always_inline int slab_trylock ( struct page * page )
{
int rc = 1 ;
rc = bit_spin_trylock ( PG_locked , & page - > flags ) ;
return rc ;
}
/*
* Management of partially allocated slabs
*/
2008-01-08 10:20:27 +03:00
static void add_partial ( struct kmem_cache_node * n ,
struct page * page , int tail )
2007-05-07 01:49:36 +04:00
{
2007-05-07 01:49:44 +04:00
spin_lock ( & n - > list_lock ) ;
n - > nr_partial + + ;
2008-01-08 10:20:27 +03:00
if ( tail )
list_add_tail ( & page - > lru , & n - > partial ) ;
else
list_add ( & page - > lru , & n - > partial ) ;
2007-05-07 01:49:36 +04:00
spin_unlock ( & n - > list_lock ) ;
}
2008-04-30 03:11:12 +04:00
static void remove_partial ( struct kmem_cache * s , struct page * page )
2007-05-07 01:49:36 +04:00
{
struct kmem_cache_node * n = get_node ( s , page_to_nid ( page ) ) ;
spin_lock ( & n - > list_lock ) ;
list_del ( & page - > lru ) ;
n - > nr_partial - - ;
spin_unlock ( & n - > list_lock ) ;
}
/*
2007-05-09 13:32:39 +04:00
* Lock slab and remove from the partial list .
2007-05-07 01:49:36 +04:00
*
2007-05-09 13:32:39 +04:00
* Must hold list_lock .
2007-05-07 01:49:36 +04:00
*/
2008-04-30 03:11:12 +04:00
static inline int lock_and_freeze_slab ( struct kmem_cache_node * n ,
struct page * page )
2007-05-07 01:49:36 +04:00
{
if ( slab_trylock ( page ) ) {
list_del ( & page - > lru ) ;
n - > nr_partial - - ;
2008-07-24 08:27:18 +04:00
__SetPageSlubFrozen ( page ) ;
2007-05-07 01:49:36 +04:00
return 1 ;
}
return 0 ;
}
/*
2007-05-09 13:32:39 +04:00
* Try to allocate a partial slab from a specific node .
2007-05-07 01:49:36 +04:00
*/
static struct page * get_partial_node ( struct kmem_cache_node * n )
{
struct page * page ;
/*
* Racy check . If we mistakenly see no partial slabs then we
* just allocate an empty slab . If we mistakenly try to get a
2007-05-09 13:32:39 +04:00
* partial slab and there is none available then get_partials ( )
* will return NULL .
2007-05-07 01:49:36 +04:00
*/
if ( ! n | | ! n - > nr_partial )
return NULL ;
spin_lock ( & n - > list_lock ) ;
list_for_each_entry ( page , & n - > partial , lru )
2007-05-17 09:10:53 +04:00
if ( lock_and_freeze_slab ( n , page ) )
2007-05-07 01:49:36 +04:00
goto out ;
page = NULL ;
out :
spin_unlock ( & n - > list_lock ) ;
return page ;
}
/*
2007-05-09 13:32:39 +04:00
* Get a page from somewhere . Search in increasing NUMA distances .
2007-05-07 01:49:36 +04:00
*/
static struct page * get_any_partial ( struct kmem_cache * s , gfp_t flags )
{
# ifdef CONFIG_NUMA
struct zonelist * zonelist ;
2008-04-28 13:12:17 +04:00
struct zoneref * z ;
2008-04-28 13:12:16 +04:00
struct zone * zone ;
enum zone_type high_zoneidx = gfp_zone ( flags ) ;
2007-05-07 01:49:36 +04:00
struct page * page ;
/*
2007-05-09 13:32:39 +04:00
* The defrag ratio allows a configuration of the tradeoffs between
* inter node defragmentation and node local allocations . A lower
* defrag_ratio increases the tendency to do local allocations
* instead of attempting to obtain partial slabs from other nodes .
2007-05-07 01:49:36 +04:00
*
2007-05-09 13:32:39 +04:00
* If the defrag_ratio is set to 0 then kmalloc ( ) always
* returns node local objects . If the ratio is higher then kmalloc ( )
* may return off node objects because partial slabs are obtained
* from other nodes and filled up .
2007-05-07 01:49:36 +04:00
*
2008-02-16 10:45:26 +03:00
* If / sys / kernel / slab / xx / defrag_ratio is set to 100 ( which makes
2007-05-09 13:32:39 +04:00
* defrag_ratio = 1000 ) then every ( well almost ) allocation will
* first attempt to defrag slab caches on other nodes . This means
* scanning over all nodes to look for partial slabs which may be
* expensive if we do it every time we are trying to find a slab
* with available objects .
2007-05-07 01:49:36 +04:00
*/
2008-01-08 10:20:26 +03:00
if ( ! s - > remote_node_defrag_ratio | |
get_cycles ( ) % 1024 > s - > remote_node_defrag_ratio )
2007-05-07 01:49:36 +04:00
return NULL ;
2008-04-28 13:12:14 +04:00
zonelist = node_zonelist ( slab_node ( current - > mempolicy ) , flags ) ;
2008-04-28 13:12:16 +04:00
for_each_zone_zonelist ( zone , z , zonelist , high_zoneidx ) {
2007-05-07 01:49:36 +04:00
struct kmem_cache_node * n ;
2008-04-28 13:12:16 +04:00
n = get_node ( s , zone_to_nid ( zone ) ) ;
2007-05-07 01:49:36 +04:00
2008-04-28 13:12:16 +04:00
if ( n & & cpuset_zone_allowed_hardwall ( zone , flags ) & &
2008-08-05 10:28:47 +04:00
n - > nr_partial > n - > min_partial ) {
2007-05-07 01:49:36 +04:00
page = get_partial_node ( n ) ;
if ( page )
return page ;
}
}
# endif
return NULL ;
}
/*
* Get a partial page , lock it and return it .
*/
static struct page * get_partial ( struct kmem_cache * s , gfp_t flags , int node )
{
struct page * page ;
int searchnode = ( node = = - 1 ) ? numa_node_id ( ) : node ;
page = get_partial_node ( get_node ( s , searchnode ) ) ;
if ( page | | ( flags & __GFP_THISNODE ) )
return page ;
return get_any_partial ( s , flags ) ;
}
/*
* Move a page back to the lists .
*
* Must be called with the slab lock held .
*
* On exit the slab lock will have been dropped .
*/
2008-01-08 10:20:27 +03:00
static void unfreeze_slab ( struct kmem_cache * s , struct page * page , int tail )
2007-05-07 01:49:36 +04:00
{
2007-05-07 01:49:44 +04:00
struct kmem_cache_node * n = get_node ( s , page_to_nid ( page ) ) ;
2008-02-08 04:47:41 +03:00
struct kmem_cache_cpu * c = get_cpu_slab ( s , smp_processor_id ( ) ) ;
2007-05-07 01:49:44 +04:00
2008-07-24 08:27:18 +04:00
__ClearPageSlubFrozen ( page ) ;
2007-05-07 01:49:36 +04:00
if ( page - > inuse ) {
2007-05-07 01:49:44 +04:00
2008-03-02 00:40:44 +03:00
if ( page - > freelist ) {
2008-01-08 10:20:27 +03:00
add_partial ( n , page , tail ) ;
2008-02-08 04:47:41 +03:00
stat ( c , tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD ) ;
} else {
stat ( c , DEACTIVATE_FULL ) ;
2008-07-24 08:27:18 +04:00
if ( SLABDEBUG & & PageSlubDebug ( page ) & &
( s - > flags & SLAB_STORE_USER ) )
2008-02-08 04:47:41 +03:00
add_full ( n , page ) ;
}
2007-05-07 01:49:36 +04:00
slab_unlock ( page ) ;
} else {
2008-02-08 04:47:41 +03:00
stat ( c , DEACTIVATE_EMPTY ) ;
2008-08-05 10:28:47 +04:00
if ( n - > nr_partial < n - > min_partial ) {
2007-05-07 01:49:44 +04:00
/*
2007-05-09 13:32:39 +04:00
* Adding an empty slab to the partial slabs in order
* to avoid page allocator overhead . This slab needs
* to come after the other slabs with objects in
2008-02-16 10:45:26 +03:00
* so that the others get filled first . That way the
* size of the partial list stays small .
*
2008-04-30 03:11:12 +04:00
* kmem_cache_shrink can reclaim any empty slabs from
* the partial list .
2007-05-07 01:49:44 +04:00
*/
2008-01-08 10:20:27 +03:00
add_partial ( n , page , 1 ) ;
2007-05-07 01:49:44 +04:00
slab_unlock ( page ) ;
} else {
slab_unlock ( page ) ;
2008-02-08 04:47:41 +03:00
stat ( get_cpu_slab ( s , raw_smp_processor_id ( ) ) , FREE_SLAB ) ;
2007-05-07 01:49:44 +04:00
discard_slab ( s , page ) ;
}
2007-05-07 01:49:36 +04:00
}
}
/*
* Remove the cpu slab
*/
2007-10-16 12:26:05 +04:00
static void deactivate_slab ( struct kmem_cache * s , struct kmem_cache_cpu * c )
2007-05-07 01:49:36 +04:00
{
2007-10-16 12:26:05 +04:00
struct page * page = c - > page ;
2008-01-08 10:20:27 +03:00
int tail = 1 ;
2008-02-08 04:47:41 +03:00
2008-03-04 22:10:17 +03:00
if ( page - > freelist )
2008-02-08 04:47:41 +03:00
stat ( c , DEACTIVATE_REMOTE_FREES ) ;
2007-05-10 14:15:16 +04:00
/*
2008-02-16 10:45:26 +03:00
* Merge cpu freelist into slab freelist . Typically we get here
2007-05-10 14:15:16 +04:00
* because both freelists are empty . So this is unlikely
* to occur .
*/
2008-03-02 00:40:44 +03:00
while ( unlikely ( c - > freelist ) ) {
2007-05-10 14:15:16 +04:00
void * * object ;
2008-01-08 10:20:27 +03:00
tail = 0 ; /* Hot objects. Put the slab first */
2007-05-10 14:15:16 +04:00
/* Retrieve object from cpu_freelist */
2007-10-16 12:26:05 +04:00
object = c - > freelist ;
2007-10-16 12:26:06 +04:00
c - > freelist = c - > freelist [ c - > offset ] ;
2007-05-10 14:15:16 +04:00
/* And put onto the regular freelist */
2007-10-16 12:26:06 +04:00
object [ c - > offset ] = page - > freelist ;
2007-05-10 14:15:16 +04:00
page - > freelist = object ;
page - > inuse - - ;
}
2007-10-16 12:26:05 +04:00
c - > page = NULL ;
2008-01-08 10:20:27 +03:00
unfreeze_slab ( s , page , tail ) ;
2007-05-07 01:49:36 +04:00
}
2007-10-16 12:26:05 +04:00
static inline void flush_slab ( struct kmem_cache * s , struct kmem_cache_cpu * c )
2007-05-07 01:49:36 +04:00
{
2008-02-08 04:47:41 +03:00
stat ( c , CPUSLAB_FLUSH ) ;
2007-10-16 12:26:05 +04:00
slab_lock ( c - > page ) ;
deactivate_slab ( s , c ) ;
2007-05-07 01:49:36 +04:00
}
/*
* Flush cpu slab .
2008-02-16 10:45:26 +03:00
*
2007-05-07 01:49:36 +04:00
* Called from IPI handler with interrupts disabled .
*/
2007-07-17 15:03:24 +04:00
static inline void __flush_cpu_slab ( struct kmem_cache * s , int cpu )
2007-05-07 01:49:36 +04:00
{
2007-10-16 12:26:05 +04:00
struct kmem_cache_cpu * c = get_cpu_slab ( s , cpu ) ;
2007-05-07 01:49:36 +04:00
2007-10-16 12:26:05 +04:00
if ( likely ( c & & c - > page ) )
flush_slab ( s , c ) ;
2007-05-07 01:49:36 +04:00
}
static void flush_cpu_slab ( void * d )
{
struct kmem_cache * s = d ;
2007-10-16 12:26:05 +04:00
__flush_cpu_slab ( s , smp_processor_id ( ) ) ;
2007-05-07 01:49:36 +04:00
}
static void flush_all ( struct kmem_cache * s )
{
2008-05-09 11:39:44 +04:00
on_each_cpu ( flush_cpu_slab , s , 1 ) ;
2007-05-07 01:49:36 +04:00
}
2007-10-16 12:26:05 +04:00
/*
* Check if the objects in a per cpu structure fit numa
* locality expectations .
*/
static inline int node_match ( struct kmem_cache_cpu * c , int node )
{
# ifdef CONFIG_NUMA
if ( node ! = - 1 & & c - > node ! = node )
return 0 ;
# endif
return 1 ;
}
2007-05-07 01:49:36 +04:00
/*
2007-05-10 14:15:16 +04:00
* Slow path . The lockless freelist is empty or we need to perform
* debugging duties .
*
* Interrupts are disabled .
2007-05-07 01:49:36 +04:00
*
2007-05-10 14:15:16 +04:00
* Processing is still very fast if new objects have been freed to the
* regular freelist . In that case we simply take over the regular freelist
* as the lockless freelist and zap the regular freelist .
2007-05-07 01:49:36 +04:00
*
2007-05-10 14:15:16 +04:00
* If that is not working then we fall back to the partial lists . We take the
* first element of the freelist as the object to allocate now and move the
* rest of the freelist to the lockless freelist .
2007-05-07 01:49:36 +04:00
*
2007-05-10 14:15:16 +04:00
* And if we were unable to get a new slab from the partial slab lists then
2008-02-16 10:45:26 +03:00
* we need to allocate a new slab . This is the slowest path since it involves
* a call to the page allocator and the setup of a new slab .
2007-05-07 01:49:36 +04:00
*/
2008-08-19 21:43:25 +04:00
static void * __slab_alloc ( struct kmem_cache * s , gfp_t gfpflags , int node ,
unsigned long addr , struct kmem_cache_cpu * c )
2007-05-07 01:49:36 +04:00
{
void * * object ;
2007-10-16 12:26:05 +04:00
struct page * new ;
2007-05-07 01:49:36 +04:00
2008-03-28 06:56:33 +03:00
/* We handle __GFP_ZERO in the caller */
gfpflags & = ~ __GFP_ZERO ;
2007-10-16 12:26:05 +04:00
if ( ! c - > page )
2007-05-07 01:49:36 +04:00
goto new_slab ;
2007-10-16 12:26:05 +04:00
slab_lock ( c - > page ) ;
if ( unlikely ( ! node_match ( c , node ) ) )
2007-05-07 01:49:36 +04:00
goto another_slab ;
2008-02-16 10:45:26 +03:00
2008-02-08 04:47:41 +03:00
stat ( c , ALLOC_REFILL ) ;
2008-02-16 10:45:26 +03:00
2007-05-10 14:15:16 +04:00
load_freelist :
2007-10-16 12:26:05 +04:00
object = c - > page - > freelist ;
2008-03-02 00:40:44 +03:00
if ( unlikely ( ! object ) )
2007-05-07 01:49:36 +04:00
goto another_slab ;
2008-07-24 08:27:18 +04:00
if ( unlikely ( SLABDEBUG & & PageSlubDebug ( c - > page ) ) )
2007-05-07 01:49:36 +04:00
goto debug ;
2007-10-16 12:26:06 +04:00
c - > freelist = object [ c - > offset ] ;
2008-04-14 20:11:30 +04:00
c - > page - > inuse = c - > page - > objects ;
2008-03-02 00:40:44 +03:00
c - > page - > freelist = NULL ;
2007-10-16 12:26:05 +04:00
c - > node = page_to_nid ( c - > page ) ;
2008-01-08 10:20:30 +03:00
unlock_out :
2007-10-16 12:26:05 +04:00
slab_unlock ( c - > page ) ;
2008-02-08 04:47:41 +03:00
stat ( c , ALLOC_SLOWPATH ) ;
2007-05-07 01:49:36 +04:00
return object ;
another_slab :
2007-10-16 12:26:05 +04:00
deactivate_slab ( s , c ) ;
2007-05-07 01:49:36 +04:00
new_slab :
2007-10-16 12:26:05 +04:00
new = get_partial ( s , gfpflags , node ) ;
if ( new ) {
c - > page = new ;
2008-02-08 04:47:41 +03:00
stat ( c , ALLOC_FROM_PARTIAL ) ;
2007-05-10 14:15:16 +04:00
goto load_freelist ;
2007-05-07 01:49:36 +04:00
}
2007-10-17 10:25:51 +04:00
if ( gfpflags & __GFP_WAIT )
local_irq_enable ( ) ;
2007-10-16 12:26:05 +04:00
new = new_slab ( s , gfpflags , node ) ;
2007-10-17 10:25:51 +04:00
if ( gfpflags & __GFP_WAIT )
local_irq_disable ( ) ;
2007-10-16 12:26:05 +04:00
if ( new ) {
c = get_cpu_slab ( s , smp_processor_id ( ) ) ;
2008-02-08 04:47:41 +03:00
stat ( c , ALLOC_SLAB ) ;
2007-11-05 22:31:58 +03:00
if ( c - > page )
2007-10-16 12:26:05 +04:00
flush_slab ( s , c ) ;
slab_lock ( new ) ;
2008-07-24 08:27:18 +04:00
__SetPageSlubFrozen ( new ) ;
2007-10-16 12:26:05 +04:00
c - > page = new ;
2007-05-17 09:10:53 +04:00
goto load_freelist ;
2007-05-07 01:49:36 +04:00
}
2008-02-15 01:28:01 +03:00
return NULL ;
2007-05-07 01:49:36 +04:00
debug :
2007-10-16 12:26:05 +04:00
if ( ! alloc_debug_processing ( s , c - > page , object , addr ) )
2007-05-07 01:49:36 +04:00
goto another_slab ;
2007-05-10 14:15:16 +04:00
2007-10-16 12:26:05 +04:00
c - > page - > inuse + + ;
2007-10-16 12:26:06 +04:00
c - > page - > freelist = object [ c - > offset ] ;
2007-10-16 12:26:07 +04:00
c - > node = - 1 ;
2008-01-08 10:20:30 +03:00
goto unlock_out ;
2007-05-10 14:15:16 +04:00
}
/*
* Inlined fastpath so that allocation functions ( kmalloc , kmem_cache_alloc )
* have the fastpath folded into their functions . So no function call
* overhead for requests that can be satisfied on the fastpath .
*
* The fastpath works by first checking if the lockless freelist can be used .
* If not then __slab_alloc is called for slow processing .
*
* Otherwise we can simply pick the next object from the lockless free list .
*/
2008-01-08 10:20:27 +03:00
static __always_inline void * slab_alloc ( struct kmem_cache * s ,
2008-08-19 21:43:25 +04:00
gfp_t gfpflags , int node , unsigned long addr )
2007-05-10 14:15:16 +04:00
{
void * * object ;
2007-10-16 12:26:05 +04:00
struct kmem_cache_cpu * c ;
2008-01-08 10:20:30 +03:00
unsigned long flags ;
slub: Fix use-after-preempt of per-CPU data structure
Vegard Nossum reported a crash in kmem_cache_alloc():
BUG: unable to handle kernel paging request at da87d000
IP: [<c01991c7>] kmem_cache_alloc+0xc7/0xe0
*pde = 28180163 *pte = 1a87d160
Oops: 0002 [#1] PREEMPT SMP DEBUG_PAGEALLOC
Pid: 3850, comm: grep Not tainted (2.6.26-rc9-00059-gb190333 #5)
EIP: 0060:[<c01991c7>] EFLAGS: 00210203 CPU: 0
EIP is at kmem_cache_alloc+0xc7/0xe0
EAX: 00000000 EBX: da87c100 ECX: 1adad71a EDX: 6b6b6b6b
ESI: 00200282 EDI: da87d000 EBP: f60bfe74 ESP: f60bfe54
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
and analyzed it:
"The register %ecx looks innocent but is very important here. The disassembly:
mov %edx,%ecx
shr $0x2,%ecx
rep stos %eax,%es:(%edi) <-- the fault
So %ecx has been loaded from %edx... which is 0x6b6b6b6b/POISON_FREE.
(0x6b6b6b6b >> 2 == 0x1adadada.)
%ecx is the counter for the memset, from here:
memset(object, 0, c->objsize);
i.e. %ecx was loaded from c->objsize, so "c" must have been freed.
Where did "c" come from? Uh-oh...
c = get_cpu_slab(s, smp_processor_id());
This looks like it has very much to do with CPU hotplug/unplug. Is
there a race between SLUB/hotplug since the CPU slab is used after it
has been freed?"
Good analysis.
Yeah, it's possible that a caller of kmem_cache_alloc() -> slab_alloc()
can be migrated on another CPU right after local_irq_restore() and
before memset(). The inital cpu can become offline in the mean time (or
a migration is a consequence of the CPU going offline) so its
'kmem_cache_cpu' structure gets freed ( slab_cpuup_callback).
At some point of time the caller continues on another CPU having an
obsolete pointer...
Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com>
Reported-by: Vegard Nossum <vegard.nossum@gmail.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-11 00:21:58 +04:00
unsigned int objsize ;
2008-01-08 10:20:30 +03:00
2008-11-19 15:23:59 +03:00
might_sleep_if ( gfpflags & __GFP_WAIT ) ;
2008-12-29 12:47:05 +03:00
2008-12-23 13:37:01 +03:00
if ( should_failslab ( s - > objsize , gfpflags ) )
return NULL ;
2008-01-08 10:20:30 +03:00
2007-05-10 14:15:16 +04:00
local_irq_save ( flags ) ;
2007-10-16 12:26:05 +04:00
c = get_cpu_slab ( s , smp_processor_id ( ) ) ;
slub: Fix use-after-preempt of per-CPU data structure
Vegard Nossum reported a crash in kmem_cache_alloc():
BUG: unable to handle kernel paging request at da87d000
IP: [<c01991c7>] kmem_cache_alloc+0xc7/0xe0
*pde = 28180163 *pte = 1a87d160
Oops: 0002 [#1] PREEMPT SMP DEBUG_PAGEALLOC
Pid: 3850, comm: grep Not tainted (2.6.26-rc9-00059-gb190333 #5)
EIP: 0060:[<c01991c7>] EFLAGS: 00210203 CPU: 0
EIP is at kmem_cache_alloc+0xc7/0xe0
EAX: 00000000 EBX: da87c100 ECX: 1adad71a EDX: 6b6b6b6b
ESI: 00200282 EDI: da87d000 EBP: f60bfe74 ESP: f60bfe54
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
and analyzed it:
"The register %ecx looks innocent but is very important here. The disassembly:
mov %edx,%ecx
shr $0x2,%ecx
rep stos %eax,%es:(%edi) <-- the fault
So %ecx has been loaded from %edx... which is 0x6b6b6b6b/POISON_FREE.
(0x6b6b6b6b >> 2 == 0x1adadada.)
%ecx is the counter for the memset, from here:
memset(object, 0, c->objsize);
i.e. %ecx was loaded from c->objsize, so "c" must have been freed.
Where did "c" come from? Uh-oh...
c = get_cpu_slab(s, smp_processor_id());
This looks like it has very much to do with CPU hotplug/unplug. Is
there a race between SLUB/hotplug since the CPU slab is used after it
has been freed?"
Good analysis.
Yeah, it's possible that a caller of kmem_cache_alloc() -> slab_alloc()
can be migrated on another CPU right after local_irq_restore() and
before memset(). The inital cpu can become offline in the mean time (or
a migration is a consequence of the CPU going offline) so its
'kmem_cache_cpu' structure gets freed ( slab_cpuup_callback).
At some point of time the caller continues on another CPU having an
obsolete pointer...
Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com>
Reported-by: Vegard Nossum <vegard.nossum@gmail.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-11 00:21:58 +04:00
objsize = c - > objsize ;
2008-03-02 00:40:44 +03:00
if ( unlikely ( ! c - > freelist | | ! node_match ( c , node ) ) )
2007-05-10 14:15:16 +04:00
2007-10-16 12:26:05 +04:00
object = __slab_alloc ( s , gfpflags , node , addr , c ) ;
2007-05-10 14:15:16 +04:00
else {
2007-10-16 12:26:05 +04:00
object = c - > freelist ;
2007-10-16 12:26:06 +04:00
c - > freelist = object [ c - > offset ] ;
2008-02-08 04:47:41 +03:00
stat ( c , ALLOC_FASTPATH ) ;
2007-05-10 14:15:16 +04:00
}
local_irq_restore ( flags ) ;
2007-07-17 15:03:23 +04:00
if ( unlikely ( ( gfpflags & __GFP_ZERO ) & & object ) )
slub: Fix use-after-preempt of per-CPU data structure
Vegard Nossum reported a crash in kmem_cache_alloc():
BUG: unable to handle kernel paging request at da87d000
IP: [<c01991c7>] kmem_cache_alloc+0xc7/0xe0
*pde = 28180163 *pte = 1a87d160
Oops: 0002 [#1] PREEMPT SMP DEBUG_PAGEALLOC
Pid: 3850, comm: grep Not tainted (2.6.26-rc9-00059-gb190333 #5)
EIP: 0060:[<c01991c7>] EFLAGS: 00210203 CPU: 0
EIP is at kmem_cache_alloc+0xc7/0xe0
EAX: 00000000 EBX: da87c100 ECX: 1adad71a EDX: 6b6b6b6b
ESI: 00200282 EDI: da87d000 EBP: f60bfe74 ESP: f60bfe54
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
and analyzed it:
"The register %ecx looks innocent but is very important here. The disassembly:
mov %edx,%ecx
shr $0x2,%ecx
rep stos %eax,%es:(%edi) <-- the fault
So %ecx has been loaded from %edx... which is 0x6b6b6b6b/POISON_FREE.
(0x6b6b6b6b >> 2 == 0x1adadada.)
%ecx is the counter for the memset, from here:
memset(object, 0, c->objsize);
i.e. %ecx was loaded from c->objsize, so "c" must have been freed.
Where did "c" come from? Uh-oh...
c = get_cpu_slab(s, smp_processor_id());
This looks like it has very much to do with CPU hotplug/unplug. Is
there a race between SLUB/hotplug since the CPU slab is used after it
has been freed?"
Good analysis.
Yeah, it's possible that a caller of kmem_cache_alloc() -> slab_alloc()
can be migrated on another CPU right after local_irq_restore() and
before memset(). The inital cpu can become offline in the mean time (or
a migration is a consequence of the CPU going offline) so its
'kmem_cache_cpu' structure gets freed ( slab_cpuup_callback).
At some point of time the caller continues on another CPU having an
obsolete pointer...
Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com>
Reported-by: Vegard Nossum <vegard.nossum@gmail.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-11 00:21:58 +04:00
memset ( object , 0 , objsize ) ;
2007-07-17 15:03:23 +04:00
2007-05-10 14:15:16 +04:00
return object ;
2007-05-07 01:49:36 +04:00
}
void * kmem_cache_alloc ( struct kmem_cache * s , gfp_t gfpflags )
{
2008-08-19 21:43:25 +04:00
return slab_alloc ( s , gfpflags , - 1 , _RET_IP_ ) ;
2007-05-07 01:49:36 +04:00
}
EXPORT_SYMBOL ( kmem_cache_alloc ) ;
# ifdef CONFIG_NUMA
void * kmem_cache_alloc_node ( struct kmem_cache * s , gfp_t gfpflags , int node )
{
2008-08-19 21:43:25 +04:00
return slab_alloc ( s , gfpflags , node , _RET_IP_ ) ;
2007-05-07 01:49:36 +04:00
}
EXPORT_SYMBOL ( kmem_cache_alloc_node ) ;
# endif
/*
2007-05-10 14:15:16 +04:00
* Slow patch handling . This may still be called frequently since objects
* have a longer lifetime than the cpu slabs in most processing loads .
2007-05-07 01:49:36 +04:00
*
2007-05-10 14:15:16 +04:00
* So we still attempt to reduce cache line usage . Just take the slab
* lock and free the item . If there is no additional partial page
* handling required then we can return immediately .
2007-05-07 01:49:36 +04:00
*/
2007-05-10 14:15:16 +04:00
static void __slab_free ( struct kmem_cache * s , struct page * page ,
2008-08-19 21:43:25 +04:00
void * x , unsigned long addr , unsigned int offset )
2007-05-07 01:49:36 +04:00
{
void * prior ;
void * * object = ( void * ) x ;
2008-02-08 04:47:41 +03:00
struct kmem_cache_cpu * c ;
2007-05-07 01:49:36 +04:00
2008-02-08 04:47:41 +03:00
c = get_cpu_slab ( s , raw_smp_processor_id ( ) ) ;
stat ( c , FREE_SLOWPATH ) ;
2007-05-07 01:49:36 +04:00
slab_lock ( page ) ;
2008-07-24 08:27:18 +04:00
if ( unlikely ( SLABDEBUG & & PageSlubDebug ( page ) ) )
2007-05-07 01:49:36 +04:00
goto debug ;
2008-02-16 10:45:26 +03:00
2007-05-07 01:49:36 +04:00
checks_ok :
2007-10-16 12:26:06 +04:00
prior = object [ offset ] = page - > freelist ;
2007-05-07 01:49:36 +04:00
page - > freelist = object ;
page - > inuse - - ;
2008-07-24 08:27:18 +04:00
if ( unlikely ( PageSlubFrozen ( page ) ) ) {
2008-02-08 04:47:41 +03:00
stat ( c , FREE_FROZEN ) ;
2007-05-07 01:49:36 +04:00
goto out_unlock ;
2008-02-08 04:47:41 +03:00
}
2007-05-07 01:49:36 +04:00
if ( unlikely ( ! page - > inuse ) )
goto slab_empty ;
/*
2008-02-16 10:45:26 +03:00
* Objects left in the slab . If it was not on the partial list before
2007-05-07 01:49:36 +04:00
* then add it .
*/
2008-03-02 00:40:44 +03:00
if ( unlikely ( ! prior ) ) {
2008-01-08 10:20:27 +03:00
add_partial ( get_node ( s , page_to_nid ( page ) ) , page , 1 ) ;
2008-02-08 04:47:41 +03:00
stat ( c , FREE_ADD_PARTIAL ) ;
}
2007-05-07 01:49:36 +04:00
out_unlock :
slab_unlock ( page ) ;
return ;
slab_empty :
2008-03-02 00:40:44 +03:00
if ( prior ) {
2007-05-07 01:49:36 +04:00
/*
2007-05-09 13:32:39 +04:00
* Slab still on the partial list .
2007-05-07 01:49:36 +04:00
*/
remove_partial ( s , page ) ;
2008-02-08 04:47:41 +03:00
stat ( c , FREE_REMOVE_PARTIAL ) ;
}
2007-05-07 01:49:36 +04:00
slab_unlock ( page ) ;
2008-02-08 04:47:41 +03:00
stat ( c , FREE_SLAB ) ;
2007-05-07 01:49:36 +04:00
discard_slab ( s , page ) ;
return ;
debug :
2007-05-17 09:11:00 +04:00
if ( ! free_debug_processing ( s , page , x , addr ) )
2007-05-07 01:49:42 +04:00
goto out_unlock ;
goto checks_ok ;
2007-05-07 01:49:36 +04:00
}
2007-05-10 14:15:16 +04:00
/*
* Fastpath with forced inlining to produce a kfree and kmem_cache_free that
* can perform fastpath freeing without additional function calls .
*
* The fastpath is only possible if we are freeing to the current cpu slab
* of this processor . This typically the case if we have just allocated
* the item before .
*
* If fastpath is not possible then fall back to __slab_free where we deal
* with all sorts of special processing .
*/
2008-01-08 10:20:27 +03:00
static __always_inline void slab_free ( struct kmem_cache * s ,
2008-08-19 21:43:25 +04:00
struct page * page , void * x , unsigned long addr )
2007-05-10 14:15:16 +04:00
{
void * * object = ( void * ) x ;
2007-10-16 12:26:05 +04:00
struct kmem_cache_cpu * c ;
2008-01-08 10:20:30 +03:00
unsigned long flags ;
2007-05-10 14:15:16 +04:00
local_irq_save ( flags ) ;
2007-10-16 12:26:05 +04:00
c = get_cpu_slab ( s , smp_processor_id ( ) ) ;
2008-02-16 10:45:25 +03:00
debug_check_no_locks_freed ( object , c - > objsize ) ;
2008-04-30 11:55:01 +04:00
if ( ! ( s - > flags & SLAB_DEBUG_OBJECTS ) )
debug_check_no_obj_freed ( object , s - > objsize ) ;
2007-10-16 12:26:07 +04:00
if ( likely ( page = = c - > page & & c - > node > = 0 ) ) {
2007-10-16 12:26:06 +04:00
object [ c - > offset ] = c - > freelist ;
2007-10-16 12:26:05 +04:00
c - > freelist = object ;
2008-02-08 04:47:41 +03:00
stat ( c , FREE_FASTPATH ) ;
2007-05-10 14:15:16 +04:00
} else
2007-10-16 12:26:06 +04:00
__slab_free ( s , page , x , addr , c - > offset ) ;
2007-05-10 14:15:16 +04:00
local_irq_restore ( flags ) ;
}
2007-05-07 01:49:36 +04:00
void kmem_cache_free ( struct kmem_cache * s , void * x )
{
2007-05-07 01:49:42 +04:00
struct page * page ;
2007-05-07 01:49:36 +04:00
2007-05-07 01:49:41 +04:00
page = virt_to_head_page ( x ) ;
2007-05-07 01:49:36 +04:00
2008-08-19 21:43:25 +04:00
slab_free ( s , page , x , _RET_IP_ ) ;
2007-05-07 01:49:36 +04:00
}
EXPORT_SYMBOL ( kmem_cache_free ) ;
2008-10-28 22:02:26 +03:00
/* Figure out on which slab page the object resides */
2007-05-07 01:49:36 +04:00
static struct page * get_object_page ( const void * x )
{
2007-05-07 01:49:41 +04:00
struct page * page = virt_to_head_page ( x ) ;
2007-05-07 01:49:36 +04:00
if ( ! PageSlab ( page ) )
return NULL ;
return page ;
}
/*
2007-05-09 13:32:39 +04:00
* Object placement in a slab is made very easy because we always start at
* offset 0. If we tune the size of the object to the alignment then we can
* get the required alignment by putting one properly sized object after
* another .
2007-05-07 01:49:36 +04:00
*
* Notice that the allocation order determines the sizes of the per cpu
* caches . Each processor has always one slab available for allocations .
* Increasing the allocation order reduces the number of times that slabs
2007-05-09 13:32:39 +04:00
* must be moved on and off the partial lists and is therefore a factor in
2007-05-07 01:49:36 +04:00
* locking overhead .
*/
/*
* Mininum / Maximum order of slab pages . This influences locking overhead
* and slab fragmentation . A higher order reduces the number of partial slabs
* and increases the number of allocations possible without having to
* take the list_lock .
*/
static int slub_min_order ;
2008-04-14 20:11:41 +04:00
static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER ;
2008-04-14 20:11:41 +04:00
static int slub_min_objects ;
2007-05-07 01:49:36 +04:00
/*
* Merge control . If this is set then no merging of slab caches will occur .
2007-05-09 13:32:39 +04:00
* ( Could be removed . This was introduced to pacify the merge skeptics . )
2007-05-07 01:49:36 +04:00
*/
static int slub_nomerge ;
/*
* Calculate the order of allocation given an slab object size .
*
2007-05-09 13:32:39 +04:00
* The order of allocation has significant impact on performance and other
* system components . Generally order 0 allocations should be preferred since
* order 0 does not cause fragmentation in the page allocator . Larger objects
* be problematic to put into order 0 slabs because there may be too much
2008-04-14 20:13:29 +04:00
* unused space left . We go to a higher order if more than 1 / 16 th of the slab
2007-05-09 13:32:39 +04:00
* would be wasted .
*
* In order to reach satisfactory performance we must ensure that a minimum
* number of objects is in one slab . Otherwise we may generate too much
* activity on the partial lists which requires taking the list_lock . This is
* less a concern for large slabs though which are rarely used .
2007-05-07 01:49:36 +04:00
*
2007-05-09 13:32:39 +04:00
* slub_max_order specifies the order where we begin to stop considering the
* number of objects in a slab as critical . If we reach slub_max_order then
* we try to keep the page order as low as possible . So we accept more waste
* of space in favor of a small page order .
2007-05-07 01:49:36 +04:00
*
2007-05-09 13:32:39 +04:00
* Higher order allocations also allow the placement of more objects in a
* slab and thereby reduce object handling overhead . If the user has
* requested a higher mininum order then we start with that one instead of
* the smallest order which will fit the object .
2007-05-07 01:49:36 +04:00
*/
2007-05-09 13:32:46 +04:00
static inline int slab_order ( int size , int min_objects ,
int max_order , int fract_leftover )
2007-05-07 01:49:36 +04:00
{
int order ;
int rem ;
2007-07-17 15:03:20 +04:00
int min_order = slub_min_order ;
2007-05-07 01:49:36 +04:00
2008-10-22 23:00:38 +04:00
if ( ( PAGE_SIZE < < min_order ) / size > MAX_OBJS_PER_PAGE )
return get_order ( size * MAX_OBJS_PER_PAGE ) - 1 ;
2008-04-14 20:11:30 +04:00
2007-07-17 15:03:20 +04:00
for ( order = max ( min_order ,
2007-05-09 13:32:46 +04:00
fls ( min_objects * size - 1 ) - PAGE_SHIFT ) ;
order < = max_order ; order + + ) {
2007-05-07 01:49:36 +04:00
2007-05-09 13:32:46 +04:00
unsigned long slab_size = PAGE_SIZE < < order ;
2007-05-07 01:49:36 +04:00
2007-05-09 13:32:46 +04:00
if ( slab_size < min_objects * size )
2007-05-07 01:49:36 +04:00
continue ;
rem = slab_size % size ;
2007-05-09 13:32:46 +04:00
if ( rem < = slab_size / fract_leftover )
2007-05-07 01:49:36 +04:00
break ;
}
2007-05-09 13:32:39 +04:00
2007-05-07 01:49:36 +04:00
return order ;
}
2007-05-09 13:32:46 +04:00
static inline int calculate_order ( int size )
{
int order ;
int min_objects ;
int fraction ;
/*
* Attempt to find best configuration for a slab . This
* works by first attempting to generate a layout with
* the best configuration and backing off gradually .
*
* First we reduce the acceptable waste in a slab . Then
* we reduce the minimum objects required in a slab .
*/
min_objects = slub_min_objects ;
2008-04-14 20:11:41 +04:00
if ( ! min_objects )
min_objects = 4 * ( fls ( nr_cpu_ids ) + 1 ) ;
2007-05-09 13:32:46 +04:00
while ( min_objects > 1 ) {
2008-04-14 20:13:29 +04:00
fraction = 16 ;
2007-05-09 13:32:46 +04:00
while ( fraction > = 4 ) {
order = slab_order ( size , min_objects ,
slub_max_order , fraction ) ;
if ( order < = slub_max_order )
return order ;
fraction / = 2 ;
}
min_objects / = 2 ;
}
/*
* We were unable to place multiple objects in a slab . Now
* lets see if we can place a single object there .
*/
order = slab_order ( size , 1 , slub_max_order , 1 ) ;
if ( order < = slub_max_order )
return order ;
/*
* Doh this slab cannot be placed using slub_max_order .
*/
order = slab_order ( size , 1 , MAX_ORDER , 1 ) ;
if ( order < = MAX_ORDER )
return order ;
return - ENOSYS ;
}
2007-05-07 01:49:36 +04:00
/*
2007-05-09 13:32:39 +04:00
* Figure out what the alignment of the objects will be .
2007-05-07 01:49:36 +04:00
*/
static unsigned long calculate_alignment ( unsigned long flags ,
unsigned long align , unsigned long size )
{
/*
2008-02-16 10:45:26 +03:00
* If the user wants hardware cache aligned objects then follow that
* suggestion if the object is sufficiently large .
2007-05-07 01:49:36 +04:00
*
2008-02-16 10:45:26 +03:00
* The hardware cache alignment cannot override the specified
* alignment though . If that is greater then use it .
2007-05-07 01:49:36 +04:00
*/
2008-03-06 01:05:56 +03:00
if ( flags & SLAB_HWCACHE_ALIGN ) {
unsigned long ralign = cache_line_size ( ) ;
while ( size < = ralign / 2 )
ralign / = 2 ;
align = max ( align , ralign ) ;
}
2007-05-07 01:49:36 +04:00
if ( align < ARCH_SLAB_MINALIGN )
2008-03-06 01:05:56 +03:00
align = ARCH_SLAB_MINALIGN ;
2007-05-07 01:49:36 +04:00
return ALIGN ( align , sizeof ( void * ) ) ;
}
2007-10-16 12:26:05 +04:00
static void init_kmem_cache_cpu ( struct kmem_cache * s ,
struct kmem_cache_cpu * c )
{
c - > page = NULL ;
2008-03-02 00:40:44 +03:00
c - > freelist = NULL ;
2007-10-16 12:26:05 +04:00
c - > node = 0 ;
2007-10-16 12:26:09 +04:00
c - > offset = s - > offset / sizeof ( void * ) ;
c - > objsize = s - > objsize ;
2008-04-14 19:50:44 +04:00
# ifdef CONFIG_SLUB_STATS
memset ( c - > stat , 0 , NR_SLUB_STAT_ITEMS * sizeof ( unsigned ) ) ;
# endif
2007-10-16 12:26:05 +04:00
}
2008-08-05 10:28:47 +04:00
static void
init_kmem_cache_node ( struct kmem_cache_node * n , struct kmem_cache * s )
2007-05-07 01:49:36 +04:00
{
n - > nr_partial = 0 ;
2008-08-05 10:28:47 +04:00
/*
* The larger the object size is , the more pages we want on the partial
* list to avoid pounding the page allocator excessively .
*/
n - > min_partial = ilog2 ( s - > size ) ;
if ( n - > min_partial < MIN_PARTIAL )
n - > min_partial = MIN_PARTIAL ;
else if ( n - > min_partial > MAX_PARTIAL )
n - > min_partial = MAX_PARTIAL ;
2007-05-07 01:49:36 +04:00
spin_lock_init ( & n - > list_lock ) ;
INIT_LIST_HEAD ( & n - > partial ) ;
2007-07-17 15:03:32 +04:00
# ifdef CONFIG_SLUB_DEBUG
2008-04-14 19:53:02 +04:00
atomic_long_set ( & n - > nr_slabs , 0 ) ;
2008-09-11 23:25:41 +04:00
atomic_long_set ( & n - > total_objects , 0 ) ;
2007-05-07 01:49:42 +04:00
INIT_LIST_HEAD ( & n - > full ) ;
2007-07-17 15:03:32 +04:00
# endif
2007-05-07 01:49:36 +04:00
}
2007-10-16 12:26:08 +04:00
# ifdef CONFIG_SMP
/*
* Per cpu array for per cpu structures .
*
* The per cpu array places all kmem_cache_cpu structures from one processor
* close together meaning that it becomes possible that multiple per cpu
* structures are contained in one cacheline . This may be particularly
* beneficial for the kmalloc caches .
*
* A desktop system typically has around 60 - 80 slabs . With 100 here we are
* likely able to get per cpu structures for all caches from the array defined
* here . We must be able to cover all kmalloc caches during bootstrap .
*
* If the per cpu array is exhausted then fall back to kmalloc
* of individual cachelines . No sharing is possible then .
*/
# define NR_KMEM_CACHE_CPU 100
static DEFINE_PER_CPU ( struct kmem_cache_cpu ,
kmem_cache_cpu ) [ NR_KMEM_CACHE_CPU ] ;
static DEFINE_PER_CPU ( struct kmem_cache_cpu * , kmem_cache_cpu_free ) ;
2009-01-01 02:42:29 +03:00
static DECLARE_BITMAP ( kmem_cach_cpu_free_init_once , CONFIG_NR_CPUS ) ;
2007-10-16 12:26:08 +04:00
static struct kmem_cache_cpu * alloc_kmem_cache_cpu ( struct kmem_cache * s ,
int cpu , gfp_t flags )
{
struct kmem_cache_cpu * c = per_cpu ( kmem_cache_cpu_free , cpu ) ;
if ( c )
per_cpu ( kmem_cache_cpu_free , cpu ) =
( void * ) c - > freelist ;
else {
/* Table overflow: So allocate ourselves */
c = kmalloc_node (
ALIGN ( sizeof ( struct kmem_cache_cpu ) , cache_line_size ( ) ) ,
flags , cpu_to_node ( cpu ) ) ;
if ( ! c )
return NULL ;
}
init_kmem_cache_cpu ( s , c ) ;
return c ;
}
static void free_kmem_cache_cpu ( struct kmem_cache_cpu * c , int cpu )
{
if ( c < per_cpu ( kmem_cache_cpu , cpu ) | |
2009-01-28 05:59:46 +03:00
c > = per_cpu ( kmem_cache_cpu , cpu ) + NR_KMEM_CACHE_CPU ) {
2007-10-16 12:26:08 +04:00
kfree ( c ) ;
return ;
}
c - > freelist = ( void * ) per_cpu ( kmem_cache_cpu_free , cpu ) ;
per_cpu ( kmem_cache_cpu_free , cpu ) = c ;
}
static void free_kmem_cache_cpus ( struct kmem_cache * s )
{
int cpu ;
for_each_online_cpu ( cpu ) {
struct kmem_cache_cpu * c = get_cpu_slab ( s , cpu ) ;
if ( c ) {
s - > cpu_slab [ cpu ] = NULL ;
free_kmem_cache_cpu ( c , cpu ) ;
}
}
}
static int alloc_kmem_cache_cpus ( struct kmem_cache * s , gfp_t flags )
{
int cpu ;
for_each_online_cpu ( cpu ) {
struct kmem_cache_cpu * c = get_cpu_slab ( s , cpu ) ;
if ( c )
continue ;
c = alloc_kmem_cache_cpu ( s , cpu , flags ) ;
if ( ! c ) {
free_kmem_cache_cpus ( s ) ;
return 0 ;
}
s - > cpu_slab [ cpu ] = c ;
}
return 1 ;
}
/*
* Initialize the per cpu array .
*/
static void init_alloc_cpu_cpu ( int cpu )
{
int i ;
2009-01-01 02:42:29 +03:00
if ( cpumask_test_cpu ( cpu , to_cpumask ( kmem_cach_cpu_free_init_once ) ) )
2007-10-16 12:26:08 +04:00
return ;
for ( i = NR_KMEM_CACHE_CPU - 1 ; i > = 0 ; i - - )
free_kmem_cache_cpu ( & per_cpu ( kmem_cache_cpu , cpu ) [ i ] , cpu ) ;
2009-01-01 02:42:29 +03:00
cpumask_set_cpu ( cpu , to_cpumask ( kmem_cach_cpu_free_init_once ) ) ;
2007-10-16 12:26:08 +04:00
}
static void __init init_alloc_cpu ( void )
{
int cpu ;
for_each_online_cpu ( cpu )
init_alloc_cpu_cpu ( cpu ) ;
}
# else
static inline void free_kmem_cache_cpus ( struct kmem_cache * s ) { }
static inline void init_alloc_cpu ( void ) { }
static inline int alloc_kmem_cache_cpus ( struct kmem_cache * s , gfp_t flags )
{
init_kmem_cache_cpu ( s , & s - > cpu_slab ) ;
return 1 ;
}
# endif
2007-05-07 01:49:36 +04:00
# ifdef CONFIG_NUMA
/*
* No kmalloc_node yet so do it by hand . We know that this is the first
* slab on the node for this slabcache . There are no concurrent accesses
* possible .
*
* Note that this function only works on the kmalloc_node_cache
2007-10-16 12:26:08 +04:00
* when allocating for the kmalloc_node_cache . This is used for bootstrapping
* memory on a fresh node that has no slab structures yet .
2007-05-07 01:49:36 +04:00
*/
2008-11-26 06:14:19 +03:00
static void early_kmem_cache_node_alloc ( gfp_t gfpflags , int node )
2007-05-07 01:49:36 +04:00
{
struct page * page ;
struct kmem_cache_node * n ;
2008-01-08 10:20:28 +03:00
unsigned long flags ;
2007-05-07 01:49:36 +04:00
BUG_ON ( kmalloc_caches - > size < sizeof ( struct kmem_cache_node ) ) ;
2007-08-23 01:01:57 +04:00
page = new_slab ( kmalloc_caches , gfpflags , node ) ;
2007-05-07 01:49:36 +04:00
BUG_ON ( ! page ) ;
2007-08-23 01:01:57 +04:00
if ( page_to_nid ( page ) ! = node ) {
printk ( KERN_ERR " SLUB: Unable to allocate memory from "
" node %d \n " , node ) ;
printk ( KERN_ERR " SLUB: Allocating a useless per node structure "
" in order to be able to continue \n " ) ;
}
2007-05-07 01:49:36 +04:00
n = page - > freelist ;
BUG_ON ( ! n ) ;
page - > freelist = get_freepointer ( kmalloc_caches , n ) ;
page - > inuse + + ;
kmalloc_caches - > node [ node ] = n ;
2007-07-17 15:03:32 +04:00
# ifdef CONFIG_SLUB_DEBUG
2007-07-17 15:03:21 +04:00
init_object ( kmalloc_caches , n , 1 ) ;
init_tracking ( kmalloc_caches , n ) ;
2007-07-17 15:03:32 +04:00
# endif
2008-08-05 10:28:47 +04:00
init_kmem_cache_node ( n , kmalloc_caches ) ;
2008-04-14 20:11:40 +04:00
inc_slabs_node ( kmalloc_caches , node , page - > objects ) ;
2008-02-16 10:45:26 +03:00
2008-01-08 10:20:28 +03:00
/*
* lockdep requires consistent irq usage for each lock
* so even though there cannot be a race this early in
* the boot sequence , we still disable irqs .
*/
local_irq_save ( flags ) ;
2008-01-08 10:20:27 +03:00
add_partial ( n , page , 0 ) ;
2008-01-08 10:20:28 +03:00
local_irq_restore ( flags ) ;
2007-05-07 01:49:36 +04:00
}
static void free_kmem_cache_nodes ( struct kmem_cache * s )
{
int node ;
2007-10-16 12:25:33 +04:00
for_each_node_state ( node , N_NORMAL_MEMORY ) {
2007-05-07 01:49:36 +04:00
struct kmem_cache_node * n = s - > node [ node ] ;
if ( n & & n ! = & s - > local_node )
kmem_cache_free ( kmalloc_caches , n ) ;
s - > node [ node ] = NULL ;
}
}
static int init_kmem_cache_nodes ( struct kmem_cache * s , gfp_t gfpflags )
{
int node ;
int local_node ;
if ( slab_state > = UP )
local_node = page_to_nid ( virt_to_page ( s ) ) ;
else
local_node = 0 ;
2007-10-16 12:25:33 +04:00
for_each_node_state ( node , N_NORMAL_MEMORY ) {
2007-05-07 01:49:36 +04:00
struct kmem_cache_node * n ;
if ( local_node = = node )
n = & s - > local_node ;
else {
if ( slab_state = = DOWN ) {
2008-11-26 06:14:19 +03:00
early_kmem_cache_node_alloc ( gfpflags , node ) ;
2007-05-07 01:49:36 +04:00
continue ;
}
n = kmem_cache_alloc_node ( kmalloc_caches ,
gfpflags , node ) ;
if ( ! n ) {
free_kmem_cache_nodes ( s ) ;
return 0 ;
}
}
s - > node [ node ] = n ;
2008-08-05 10:28:47 +04:00
init_kmem_cache_node ( n , s ) ;
2007-05-07 01:49:36 +04:00
}
return 1 ;
}
# else
static void free_kmem_cache_nodes ( struct kmem_cache * s )
{
}
static int init_kmem_cache_nodes ( struct kmem_cache * s , gfp_t gfpflags )
{
2008-08-05 10:28:47 +04:00
init_kmem_cache_node ( & s - > local_node , s ) ;
2007-05-07 01:49:36 +04:00
return 1 ;
}
# endif
/*
* calculate_sizes ( ) determines the order and the distribution of data within
* a slab object .
*/
2008-04-14 20:11:41 +04:00
static int calculate_sizes ( struct kmem_cache * s , int forced_order )
2007-05-07 01:49:36 +04:00
{
unsigned long flags = s - > flags ;
unsigned long size = s - > objsize ;
unsigned long align = s - > align ;
2008-04-14 20:11:31 +04:00
int order ;
2007-05-07 01:49:36 +04:00
2008-02-16 10:45:25 +03:00
/*
* Round up object size to the next word boundary . We can only
* place the free pointer at word boundaries and this determines
* the possible location of the free pointer .
*/
size = ALIGN ( size , sizeof ( void * ) ) ;
# ifdef CONFIG_SLUB_DEBUG
2007-05-07 01:49:36 +04:00
/*
* Determine if we can poison the object itself . If the user of
* the slab may touch the object after free or before allocation
* then we should never poison the object itself .
*/
if ( ( flags & SLAB_POISON ) & & ! ( flags & SLAB_DESTROY_BY_RCU ) & &
2007-05-17 09:10:50 +04:00
! s - > ctor )
2007-05-07 01:49:36 +04:00
s - > flags | = __OBJECT_POISON ;
else
s - > flags & = ~ __OBJECT_POISON ;
/*
2007-05-09 13:32:39 +04:00
* If we are Redzoning then check if there is some space between the
2007-05-07 01:49:36 +04:00
* end of the object and the free pointer . If not then add an
2007-05-09 13:32:39 +04:00
* additional word to have some bytes to store Redzone information .
2007-05-07 01:49:36 +04:00
*/
if ( ( flags & SLAB_RED_ZONE ) & & size = = s - > objsize )
size + = sizeof ( void * ) ;
2007-05-09 13:32:44 +04:00
# endif
2007-05-07 01:49:36 +04:00
/*
2007-05-09 13:32:39 +04:00
* With that we have determined the number of bytes in actual use
* by the object . This is the potential offset to the free pointer .
2007-05-07 01:49:36 +04:00
*/
s - > inuse = size ;
if ( ( ( flags & ( SLAB_DESTROY_BY_RCU | SLAB_POISON ) ) | |
2007-05-17 09:10:50 +04:00
s - > ctor ) ) {
2007-05-07 01:49:36 +04:00
/*
* Relocate free pointer after the object if it is not
* permitted to overwrite the first word of the object on
* kmem_cache_free .
*
* This is the case if we do RCU , have a constructor or
* destructor or are poisoning the objects .
*/
s - > offset = size ;
size + = sizeof ( void * ) ;
}
2007-05-24 00:57:31 +04:00
# ifdef CONFIG_SLUB_DEBUG
2007-05-07 01:49:36 +04:00
if ( flags & SLAB_STORE_USER )
/*
* Need to store information about allocs and frees after
* the object .
*/
size + = 2 * sizeof ( struct track ) ;
2007-05-09 13:32:36 +04:00
if ( flags & SLAB_RED_ZONE )
2007-05-07 01:49:36 +04:00
/*
* Add some empty padding so that we can catch
* overwrites from earlier objects rather than let
* tracking information or the free pointer be
2008-12-30 00:14:56 +03:00
* corrupted if a user writes before the start
2007-05-07 01:49:36 +04:00
* of the object .
*/
size + = sizeof ( void * ) ;
2007-05-09 13:32:44 +04:00
# endif
2007-05-09 13:32:39 +04:00
2007-05-07 01:49:36 +04:00
/*
* Determine the alignment based on various parameters that the
2007-05-09 13:32:35 +04:00
* user specified and the dynamic determination of cache line size
* on bootup .
2007-05-07 01:49:36 +04:00
*/
align = calculate_alignment ( flags , align , s - > objsize ) ;
/*
* SLUB stores one object immediately after another beginning from
* offset 0. In order to align the objects we have to simply size
* each object to conform to the alignment .
*/
size = ALIGN ( size , align ) ;
s - > size = size ;
2008-04-14 20:11:41 +04:00
if ( forced_order > = 0 )
order = forced_order ;
else
order = calculate_order ( size ) ;
2007-05-07 01:49:36 +04:00
2008-04-14 20:11:31 +04:00
if ( order < 0 )
2007-05-07 01:49:36 +04:00
return 0 ;
2008-02-15 01:21:32 +03:00
s - > allocflags = 0 ;
2008-04-14 20:11:31 +04:00
if ( order )
2008-02-15 01:21:32 +03:00
s - > allocflags | = __GFP_COMP ;
if ( s - > flags & SLAB_CACHE_DMA )
s - > allocflags | = SLUB_DMA ;
if ( s - > flags & SLAB_RECLAIM_ACCOUNT )
s - > allocflags | = __GFP_RECLAIMABLE ;
2007-05-07 01:49:36 +04:00
/*
* Determine the number of objects per slab
*/
2008-04-14 20:11:31 +04:00
s - > oo = oo_make ( order , size ) ;
2008-04-14 20:11:40 +04:00
s - > min = oo_make ( get_order ( size ) , size ) ;
2008-04-14 20:11:40 +04:00
if ( oo_objects ( s - > oo ) > oo_objects ( s - > max ) )
s - > max = s - > oo ;
2007-05-07 01:49:36 +04:00
2008-04-14 20:11:31 +04:00
return ! ! oo_objects ( s - > oo ) ;
2007-05-07 01:49:36 +04:00
}
static int kmem_cache_open ( struct kmem_cache * s , gfp_t gfpflags ,
const char * name , size_t size ,
size_t align , unsigned long flags ,
2008-07-26 06:45:34 +04:00
void ( * ctor ) ( void * ) )
2007-05-07 01:49:36 +04:00
{
memset ( s , 0 , kmem_size ) ;
s - > name = name ;
s - > ctor = ctor ;
s - > objsize = size ;
s - > align = align ;
2007-09-12 02:24:11 +04:00
s - > flags = kmem_cache_flags ( size , flags , name , ctor ) ;
2007-05-07 01:49:36 +04:00
2008-04-14 20:11:41 +04:00
if ( ! calculate_sizes ( s , - 1 ) )
2007-05-07 01:49:36 +04:00
goto error ;
s - > refcount = 1 ;
# ifdef CONFIG_NUMA
2008-08-19 17:51:22 +04:00
s - > remote_node_defrag_ratio = 1000 ;
2007-05-07 01:49:36 +04:00
# endif
2007-10-16 12:26:05 +04:00
if ( ! init_kmem_cache_nodes ( s , gfpflags & ~ SLUB_DMA ) )
goto error ;
2007-05-07 01:49:36 +04:00
2007-10-16 12:26:05 +04:00
if ( alloc_kmem_cache_cpus ( s , gfpflags & ~ SLUB_DMA ) )
2007-05-07 01:49:36 +04:00
return 1 ;
2007-10-16 12:26:08 +04:00
free_kmem_cache_nodes ( s ) ;
2007-05-07 01:49:36 +04:00
error :
if ( flags & SLAB_PANIC )
panic ( " Cannot create slab %s size=%lu realsize=%u "
" order=%u offset=%u flags=%lx \n " ,
2008-04-14 20:11:31 +04:00
s - > name , ( unsigned long ) size , s - > size , oo_order ( s - > oo ) ,
2007-05-07 01:49:36 +04:00
s - > offset , flags ) ;
return 0 ;
}
/*
* Check if a given pointer is valid
*/
int kmem_ptr_validate ( struct kmem_cache * s , const void * object )
{
2008-01-08 10:20:27 +03:00
struct page * page ;
2007-05-07 01:49:36 +04:00
page = get_object_page ( object ) ;
if ( ! page | | s ! = page - > slab )
/* No slab or wrong slab */
return 0 ;
2007-05-09 13:32:37 +04:00
if ( ! check_valid_pointer ( s , page , object ) )
2007-05-07 01:49:36 +04:00
return 0 ;
/*
* We could also check if the object is on the slabs freelist .
* But this would be too expensive and it seems that the main
2008-02-16 10:45:26 +03:00
* purpose of kmem_ptr_valid ( ) is to check if the object belongs
2007-05-07 01:49:36 +04:00
* to a certain slab .
*/
return 1 ;
}
EXPORT_SYMBOL ( kmem_ptr_validate ) ;
/*
* Determine the size of a slab object
*/
unsigned int kmem_cache_size ( struct kmem_cache * s )
{
return s - > objsize ;
}
EXPORT_SYMBOL ( kmem_cache_size ) ;
const char * kmem_cache_name ( struct kmem_cache * s )
{
return s - > name ;
}
EXPORT_SYMBOL ( kmem_cache_name ) ;
2008-04-25 23:22:43 +04:00
static void list_slab_objects ( struct kmem_cache * s , struct page * page ,
const char * text )
{
# ifdef CONFIG_SLUB_DEBUG
void * addr = page_address ( page ) ;
void * p ;
DECLARE_BITMAP ( map , page - > objects ) ;
bitmap_zero ( map , page - > objects ) ;
slab_err ( s , page , " %s " , text ) ;
slab_lock ( page ) ;
for_each_free_object ( p , s , page - > freelist )
set_bit ( slab_index ( p , s , addr ) , map ) ;
for_each_object ( p , s , addr , page - > objects ) {
if ( ! test_bit ( slab_index ( p , s , addr ) , map ) ) {
printk ( KERN_ERR " INFO: Object 0x%p @offset=%tu \n " ,
p , p - addr ) ;
print_tracking ( s , p ) ;
}
}
slab_unlock ( page ) ;
# endif
}
2007-05-07 01:49:36 +04:00
/*
2008-04-23 23:36:52 +04:00
* Attempt to free all partial slabs on a node .
2007-05-07 01:49:36 +04:00
*/
2008-04-23 23:36:52 +04:00
static void free_partial ( struct kmem_cache * s , struct kmem_cache_node * n )
2007-05-07 01:49:36 +04:00
{
unsigned long flags ;
struct page * page , * h ;
spin_lock_irqsave ( & n - > list_lock , flags ) ;
2008-04-25 23:22:43 +04:00
list_for_each_entry_safe ( page , h , & n - > partial , lru ) {
2007-05-07 01:49:36 +04:00
if ( ! page - > inuse ) {
list_del ( & page - > lru ) ;
discard_slab ( s , page ) ;
2008-04-23 23:36:52 +04:00
n - > nr_partial - - ;
2008-04-25 23:22:43 +04:00
} else {
list_slab_objects ( s , page ,
" Objects remaining on kmem_cache_close() " ) ;
2008-04-23 23:36:52 +04:00
}
2008-04-25 23:22:43 +04:00
}
2007-05-07 01:49:36 +04:00
spin_unlock_irqrestore ( & n - > list_lock , flags ) ;
}
/*
2007-05-09 13:32:39 +04:00
* Release all resources used by a slab cache .
2007-05-07 01:49:36 +04:00
*/
2007-07-17 15:03:24 +04:00
static inline int kmem_cache_close ( struct kmem_cache * s )
2007-05-07 01:49:36 +04:00
{
int node ;
flush_all ( s ) ;
/* Attempt to free all objects */
2007-10-16 12:26:08 +04:00
free_kmem_cache_cpus ( s ) ;
2007-10-16 12:25:33 +04:00
for_each_node_state ( node , N_NORMAL_MEMORY ) {
2007-05-07 01:49:36 +04:00
struct kmem_cache_node * n = get_node ( s , node ) ;
2008-04-23 23:36:52 +04:00
free_partial ( s , n ) ;
if ( n - > nr_partial | | slabs_node ( s , node ) )
2007-05-07 01:49:36 +04:00
return 1 ;
}
free_kmem_cache_nodes ( s ) ;
return 0 ;
}
/*
* Close a cache and release the kmem_cache structure
* ( must be used for caches created using kmem_cache_create )
*/
void kmem_cache_destroy ( struct kmem_cache * s )
{
down_write ( & slub_lock ) ;
s - > refcount - - ;
if ( ! s - > refcount ) {
list_del ( & s - > list ) ;
2007-07-17 15:03:31 +04:00
up_write ( & slub_lock ) ;
2008-04-23 23:31:08 +04:00
if ( kmem_cache_close ( s ) ) {
printk ( KERN_ERR " SLUB %s: %s called for cache that "
" still has objects. \n " , s - > name , __func__ ) ;
dump_stack ( ) ;
}
2007-05-07 01:49:36 +04:00
sysfs_slab_remove ( s ) ;
2007-07-17 15:03:31 +04:00
} else
up_write ( & slub_lock ) ;
2007-05-07 01:49:36 +04:00
}
EXPORT_SYMBOL ( kmem_cache_destroy ) ;
/********************************************************************
* Kmalloc subsystem
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2008-02-15 01:28:09 +03:00
struct kmem_cache kmalloc_caches [ PAGE_SHIFT + 1 ] __cacheline_aligned ;
2007-05-07 01:49:36 +04:00
EXPORT_SYMBOL ( kmalloc_caches ) ;
static int __init setup_slub_min_order ( char * str )
{
2008-01-08 10:20:27 +03:00
get_option ( & str , & slub_min_order ) ;
2007-05-07 01:49:36 +04:00
return 1 ;
}
__setup ( " slub_min_order= " , setup_slub_min_order ) ;
static int __init setup_slub_max_order ( char * str )
{
2008-01-08 10:20:27 +03:00
get_option ( & str , & slub_max_order ) ;
2007-05-07 01:49:36 +04:00
return 1 ;
}
__setup ( " slub_max_order= " , setup_slub_max_order ) ;
static int __init setup_slub_min_objects ( char * str )
{
2008-01-08 10:20:27 +03:00
get_option ( & str , & slub_min_objects ) ;
2007-05-07 01:49:36 +04:00
return 1 ;
}
__setup ( " slub_min_objects= " , setup_slub_min_objects ) ;
static int __init setup_slub_nomerge ( char * str )
{
slub_nomerge = 1 ;
return 1 ;
}
__setup ( " slub_nomerge " , setup_slub_nomerge ) ;
static struct kmem_cache * create_kmalloc_cache ( struct kmem_cache * s ,
const char * name , int size , gfp_t gfp_flags )
{
unsigned int flags = 0 ;
if ( gfp_flags & SLUB_DMA )
flags = SLAB_CACHE_DMA ;
down_write ( & slub_lock ) ;
if ( ! kmem_cache_open ( s , gfp_flags , name , size , ARCH_KMALLOC_MINALIGN ,
2008-04-14 20:11:41 +04:00
flags , NULL ) )
2007-05-07 01:49:36 +04:00
goto panic ;
list_add ( & s - > list , & slab_caches ) ;
up_write ( & slub_lock ) ;
if ( sysfs_slab_add ( s ) )
goto panic ;
return s ;
panic :
panic ( " Creation of kmalloc slab %s size=%d failed. \n " , name , size ) ;
}
2007-07-17 15:03:24 +04:00
# ifdef CONFIG_ZONE_DMA
2008-04-14 19:51:18 +04:00
static struct kmem_cache * kmalloc_caches_dma [ PAGE_SHIFT + 1 ] ;
2007-08-08 02:11:48 +04:00
static void sysfs_add_func ( struct work_struct * w )
{
struct kmem_cache * s ;
down_write ( & slub_lock ) ;
list_for_each_entry ( s , & slab_caches , list ) {
if ( s - > flags & __SYSFS_ADD_DEFERRED ) {
s - > flags & = ~ __SYSFS_ADD_DEFERRED ;
sysfs_slab_add ( s ) ;
}
}
up_write ( & slub_lock ) ;
}
static DECLARE_WORK ( sysfs_add_work , sysfs_add_func ) ;
2007-07-17 15:03:24 +04:00
static noinline struct kmem_cache * dma_kmalloc_cache ( int index , gfp_t flags )
{
struct kmem_cache * s ;
char * text ;
size_t realsize ;
s = kmalloc_caches_dma [ index ] ;
if ( s )
return s ;
/* Dynamically create dma cache */
2007-08-08 02:11:48 +04:00
if ( flags & __GFP_WAIT )
down_write ( & slub_lock ) ;
else {
if ( ! down_write_trylock ( & slub_lock ) )
goto out ;
}
if ( kmalloc_caches_dma [ index ] )
goto unlock_out ;
2007-07-17 15:03:24 +04:00
2007-07-17 15:03:27 +04:00
realsize = kmalloc_caches [ index ] . objsize ;
2008-02-06 04:57:39 +03:00
text = kasprintf ( flags & ~ SLUB_DMA , " kmalloc_dma-%d " ,
( unsigned int ) realsize ) ;
2007-08-08 02:11:48 +04:00
s = kmalloc ( kmem_size , flags & ~ SLUB_DMA ) ;
if ( ! s | | ! text | | ! kmem_cache_open ( s , flags , text ,
realsize , ARCH_KMALLOC_MINALIGN ,
SLAB_CACHE_DMA | __SYSFS_ADD_DEFERRED , NULL ) ) {
kfree ( s ) ;
kfree ( text ) ;
goto unlock_out ;
2007-07-17 15:03:25 +04:00
}
2007-08-08 02:11:48 +04:00
list_add ( & s - > list , & slab_caches ) ;
kmalloc_caches_dma [ index ] = s ;
schedule_work ( & sysfs_add_work ) ;
unlock_out :
2007-07-17 15:03:25 +04:00
up_write ( & slub_lock ) ;
2007-08-08 02:11:48 +04:00
out :
2007-07-17 15:03:25 +04:00
return kmalloc_caches_dma [ index ] ;
2007-07-17 15:03:24 +04:00
}
# endif
2007-07-17 15:03:26 +04:00
/*
* Conversion table for small slabs sizes / 8 to the index in the
* kmalloc array . This is necessary for slabs < 192 since we have non power
* of two cache sizes there . The size of larger slabs can be determined using
* fls .
*/
static s8 size_index [ 24 ] = {
3 , /* 8 */
4 , /* 16 */
5 , /* 24 */
5 , /* 32 */
6 , /* 40 */
6 , /* 48 */
6 , /* 56 */
6 , /* 64 */
1 , /* 72 */
1 , /* 80 */
1 , /* 88 */
1 , /* 96 */
7 , /* 104 */
7 , /* 112 */
7 , /* 120 */
7 , /* 128 */
2 , /* 136 */
2 , /* 144 */
2 , /* 152 */
2 , /* 160 */
2 , /* 168 */
2 , /* 176 */
2 , /* 184 */
2 /* 192 */
} ;
2007-05-07 01:49:36 +04:00
static struct kmem_cache * get_slab ( size_t size , gfp_t flags )
{
2007-07-17 15:03:26 +04:00
int index ;
2007-05-07 01:49:36 +04:00
2007-07-17 15:03:26 +04:00
if ( size < = 192 ) {
if ( ! size )
return ZERO_SIZE_PTR ;
2007-05-07 01:49:36 +04:00
2007-07-17 15:03:26 +04:00
index = size_index [ ( size - 1 ) / 8 ] ;
2007-10-16 12:24:38 +04:00
} else
2007-07-17 15:03:26 +04:00
index = fls ( size - 1 ) ;
2007-05-07 01:49:36 +04:00
# ifdef CONFIG_ZONE_DMA
2007-07-17 15:03:26 +04:00
if ( unlikely ( ( flags & SLUB_DMA ) ) )
2007-07-17 15:03:24 +04:00
return dma_kmalloc_cache ( index , flags ) ;
2007-07-17 15:03:26 +04:00
2007-05-07 01:49:36 +04:00
# endif
return & kmalloc_caches [ index ] ;
}
void * __kmalloc ( size_t size , gfp_t flags )
{
2007-10-16 12:24:38 +04:00
struct kmem_cache * s ;
2007-05-07 01:49:36 +04:00
2008-02-15 01:28:09 +03:00
if ( unlikely ( size > PAGE_SIZE ) )
2008-02-11 23:47:46 +03:00
return kmalloc_large ( size , flags ) ;
2007-10-16 12:24:38 +04:00
s = get_slab ( size , flags ) ;
if ( unlikely ( ZERO_OR_NULL_PTR ( s ) ) )
2007-07-17 15:03:22 +04:00
return s ;
2008-08-19 21:43:25 +04:00
return slab_alloc ( s , flags , - 1 , _RET_IP_ ) ;
2007-05-07 01:49:36 +04:00
}
EXPORT_SYMBOL ( __kmalloc ) ;
2008-03-02 00:56:40 +03:00
static void * kmalloc_large_node ( size_t size , gfp_t flags , int node )
{
struct page * page = alloc_pages_node ( node , flags | __GFP_COMP ,
get_order ( size ) ) ;
if ( page )
return page_address ( page ) ;
else
return NULL ;
}
2007-05-07 01:49:36 +04:00
# ifdef CONFIG_NUMA
void * __kmalloc_node ( size_t size , gfp_t flags , int node )
{
2007-10-16 12:24:38 +04:00
struct kmem_cache * s ;
2007-05-07 01:49:36 +04:00
2008-02-15 01:28:09 +03:00
if ( unlikely ( size > PAGE_SIZE ) )
2008-03-02 00:56:40 +03:00
return kmalloc_large_node ( size , flags , node ) ;
2007-10-16 12:24:38 +04:00
s = get_slab ( size , flags ) ;
if ( unlikely ( ZERO_OR_NULL_PTR ( s ) ) )
2007-07-17 15:03:22 +04:00
return s ;
2008-08-19 21:43:25 +04:00
return slab_alloc ( s , flags , node , _RET_IP_ ) ;
2007-05-07 01:49:36 +04:00
}
EXPORT_SYMBOL ( __kmalloc_node ) ;
# endif
size_t ksize ( const void * object )
{
2007-06-09 00:46:49 +04:00
struct page * page ;
2007-05-07 01:49:36 +04:00
struct kmem_cache * s ;
2007-10-16 12:24:46 +04:00
if ( unlikely ( object = = ZERO_SIZE_PTR ) )
2007-06-09 00:46:49 +04:00
return 0 ;
2007-12-05 10:45:30 +03:00
page = virt_to_head_page ( object ) ;
2008-05-22 20:22:25 +04:00
if ( unlikely ( ! PageSlab ( page ) ) ) {
WARN_ON ( ! PageCompound ( page ) ) ;
2007-12-05 10:45:30 +03:00
return PAGE_SIZE < < compound_order ( page ) ;
2008-05-22 20:22:25 +04:00
}
2007-05-07 01:49:36 +04:00
s = page - > slab ;
2008-02-16 10:45:25 +03:00
# ifdef CONFIG_SLUB_DEBUG
2007-05-07 01:49:36 +04:00
/*
* Debugging requires use of the padding between object
* and whatever may come after it .
*/
if ( s - > flags & ( SLAB_RED_ZONE | SLAB_POISON ) )
return s - > objsize ;
2008-02-16 10:45:25 +03:00
# endif
2007-05-07 01:49:36 +04:00
/*
* If we have the need to store the freelist pointer
* back there or track user information then we can
* only use the space before that information .
*/
if ( s - > flags & ( SLAB_DESTROY_BY_RCU | SLAB_STORE_USER ) )
return s - > inuse ;
/*
* Else we can use all the padding etc for the allocation
*/
return s - > size ;
}
void kfree ( const void * x )
{
struct page * page ;
2008-02-08 04:47:41 +03:00
void * object = ( void * ) x ;
2007-05-07 01:49:36 +04:00
2007-10-16 12:24:44 +04:00
if ( unlikely ( ZERO_OR_NULL_PTR ( x ) ) )
2007-05-07 01:49:36 +04:00
return ;
2007-05-07 01:49:41 +04:00
page = virt_to_head_page ( x ) ;
2007-10-16 12:24:38 +04:00
if ( unlikely ( ! PageSlab ( page ) ) ) {
2008-05-28 21:32:22 +04:00
BUG_ON ( ! PageCompound ( page ) ) ;
2007-10-16 12:24:38 +04:00
put_page ( page ) ;
return ;
}
2008-08-19 21:43:25 +04:00
slab_free ( page - > slab , page , object , _RET_IP_ ) ;
2007-05-07 01:49:36 +04:00
}
EXPORT_SYMBOL ( kfree ) ;
2007-05-07 01:49:46 +04:00
/*
2007-05-09 13:32:39 +04:00
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
* the remaining slabs by the number of items in use . The slabs with the
* most items in use come first . New allocations will then fill those up
* and thus they can be removed from the partial lists .
*
* The slabs with the least items are placed last . This results in them
* being allocated from last increasing the chance that the last objects
* are freed in them .
2007-05-07 01:49:46 +04:00
*/
int kmem_cache_shrink ( struct kmem_cache * s )
{
int node ;
int i ;
struct kmem_cache_node * n ;
struct page * page ;
struct page * t ;
2008-04-14 20:11:40 +04:00
int objects = oo_objects ( s - > max ) ;
2007-05-07 01:49:46 +04:00
struct list_head * slabs_by_inuse =
2008-04-14 20:11:31 +04:00
kmalloc ( sizeof ( struct list_head ) * objects , GFP_KERNEL ) ;
2007-05-07 01:49:46 +04:00
unsigned long flags ;
if ( ! slabs_by_inuse )
return - ENOMEM ;
flush_all ( s ) ;
2007-10-16 12:25:33 +04:00
for_each_node_state ( node , N_NORMAL_MEMORY ) {
2007-05-07 01:49:46 +04:00
n = get_node ( s , node ) ;
if ( ! n - > nr_partial )
continue ;
2008-04-14 20:11:31 +04:00
for ( i = 0 ; i < objects ; i + + )
2007-05-07 01:49:46 +04:00
INIT_LIST_HEAD ( slabs_by_inuse + i ) ;
spin_lock_irqsave ( & n - > list_lock , flags ) ;
/*
2007-05-09 13:32:39 +04:00
* Build lists indexed by the items in use in each slab .
2007-05-07 01:49:46 +04:00
*
2007-05-09 13:32:39 +04:00
* Note that concurrent frees may occur while we hold the
* list_lock . page - > inuse here is the upper limit .
2007-05-07 01:49:46 +04:00
*/
list_for_each_entry_safe ( page , t , & n - > partial , lru ) {
if ( ! page - > inuse & & slab_trylock ( page ) ) {
/*
* Must hold slab lock here because slab_free
* may have freed the last object and be
* waiting to release the slab .
*/
list_del ( & page - > lru ) ;
n - > nr_partial - - ;
slab_unlock ( page ) ;
discard_slab ( s , page ) ;
} else {
2007-07-31 00:06:46 +04:00
list_move ( & page - > lru ,
slabs_by_inuse + page - > inuse ) ;
2007-05-07 01:49:46 +04:00
}
}
/*
2007-05-09 13:32:39 +04:00
* Rebuild the partial list with the slabs filled up most
* first and the least used slabs at the end .
2007-05-07 01:49:46 +04:00
*/
2008-04-14 20:11:31 +04:00
for ( i = objects - 1 ; i > = 0 ; i - - )
2007-05-07 01:49:46 +04:00
list_splice ( slabs_by_inuse + i , n - > partial . prev ) ;
spin_unlock_irqrestore ( & n - > list_lock , flags ) ;
}
kfree ( slabs_by_inuse ) ;
return 0 ;
}
EXPORT_SYMBOL ( kmem_cache_shrink ) ;
2007-10-22 03:41:37 +04:00
# if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
static int slab_mem_going_offline_callback ( void * arg )
{
struct kmem_cache * s ;
down_read ( & slub_lock ) ;
list_for_each_entry ( s , & slab_caches , list )
kmem_cache_shrink ( s ) ;
up_read ( & slub_lock ) ;
return 0 ;
}
static void slab_mem_offline_callback ( void * arg )
{
struct kmem_cache_node * n ;
struct kmem_cache * s ;
struct memory_notify * marg = arg ;
int offline_node ;
offline_node = marg - > status_change_nid ;
/*
* If the node still has available memory . we need kmem_cache_node
* for it yet .
*/
if ( offline_node < 0 )
return ;
down_read ( & slub_lock ) ;
list_for_each_entry ( s , & slab_caches , list ) {
n = get_node ( s , offline_node ) ;
if ( n ) {
/*
* if n - > nr_slabs > 0 , slabs still exist on the node
* that is going down . We were unable to free them ,
* and offline_pages ( ) function shoudn ' t call this
* callback . So , we must fail .
*/
2008-04-14 19:53:02 +04:00
BUG_ON ( slabs_node ( s , offline_node ) ) ;
2007-10-22 03:41:37 +04:00
s - > node [ offline_node ] = NULL ;
kmem_cache_free ( kmalloc_caches , n ) ;
}
}
up_read ( & slub_lock ) ;
}
static int slab_mem_going_online_callback ( void * arg )
{
struct kmem_cache_node * n ;
struct kmem_cache * s ;
struct memory_notify * marg = arg ;
int nid = marg - > status_change_nid ;
int ret = 0 ;
/*
* If the node ' s memory is already available , then kmem_cache_node is
* already created . Nothing to do .
*/
if ( nid < 0 )
return 0 ;
/*
2008-04-30 03:11:12 +04:00
* We are bringing a node online . No memory is available yet . We must
2007-10-22 03:41:37 +04:00
* allocate a kmem_cache_node structure in order to bring the node
* online .
*/
down_read ( & slub_lock ) ;
list_for_each_entry ( s , & slab_caches , list ) {
/*
* XXX : kmem_cache_alloc_node will fallback to other nodes
* since memory is not yet available from the node that
* is brought up .
*/
n = kmem_cache_alloc ( kmalloc_caches , GFP_KERNEL ) ;
if ( ! n ) {
ret = - ENOMEM ;
goto out ;
}
2008-08-05 10:28:47 +04:00
init_kmem_cache_node ( n , s ) ;
2007-10-22 03:41:37 +04:00
s - > node [ nid ] = n ;
}
out :
up_read ( & slub_lock ) ;
return ret ;
}
static int slab_memory_callback ( struct notifier_block * self ,
unsigned long action , void * arg )
{
int ret = 0 ;
switch ( action ) {
case MEM_GOING_ONLINE :
ret = slab_mem_going_online_callback ( arg ) ;
break ;
case MEM_GOING_OFFLINE :
ret = slab_mem_going_offline_callback ( arg ) ;
break ;
case MEM_OFFLINE :
case MEM_CANCEL_ONLINE :
slab_mem_offline_callback ( arg ) ;
break ;
case MEM_ONLINE :
case MEM_CANCEL_OFFLINE :
break ;
}
2008-12-02 00:13:48 +03:00
if ( ret )
ret = notifier_from_errno ( ret ) ;
else
ret = NOTIFY_OK ;
2007-10-22 03:41:37 +04:00
return ret ;
}
# endif /* CONFIG_MEMORY_HOTPLUG */
2007-05-07 01:49:36 +04:00
/********************************************************************
* Basic setup of slabs
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
void __init kmem_cache_init ( void )
{
int i ;
2007-06-16 21:16:13 +04:00
int caches = 0 ;
2007-05-07 01:49:36 +04:00
2007-10-16 12:26:08 +04:00
init_alloc_cpu ( ) ;
2007-05-07 01:49:36 +04:00
# ifdef CONFIG_NUMA
/*
* Must first have the slab cache available for the allocations of the
2007-05-09 13:32:39 +04:00
* struct kmem_cache_node ' s . There is special bootstrap code in
2007-05-07 01:49:36 +04:00
* kmem_cache_open for slab_state = = DOWN .
*/
create_kmalloc_cache ( & kmalloc_caches [ 0 ] , " kmem_cache_node " ,
sizeof ( struct kmem_cache_node ) , GFP_KERNEL ) ;
2007-05-31 11:40:51 +04:00
kmalloc_caches [ 0 ] . refcount = - 1 ;
2007-06-16 21:16:13 +04:00
caches + + ;
2007-10-22 03:41:37 +04:00
2008-04-29 12:00:41 +04:00
hotplug_memory_notifier ( slab_memory_callback , SLAB_CALLBACK_PRI ) ;
2007-05-07 01:49:36 +04:00
# endif
/* Able to allocate the per node structures */
slab_state = PARTIAL ;
/* Caches that are not of the two-to-the-power-of size */
2007-06-16 21:16:13 +04:00
if ( KMALLOC_MIN_SIZE < = 64 ) {
create_kmalloc_cache ( & kmalloc_caches [ 1 ] ,
2007-05-07 01:49:36 +04:00
" kmalloc-96 " , 96 , GFP_KERNEL ) ;
2007-06-16 21:16:13 +04:00
caches + + ;
create_kmalloc_cache ( & kmalloc_caches [ 2 ] ,
2007-05-07 01:49:36 +04:00
" kmalloc-192 " , 192 , GFP_KERNEL ) ;
2007-06-16 21:16:13 +04:00
caches + + ;
}
2007-05-07 01:49:36 +04:00
2008-02-15 01:28:09 +03:00
for ( i = KMALLOC_SHIFT_LOW ; i < = PAGE_SHIFT ; i + + ) {
2007-05-07 01:49:36 +04:00
create_kmalloc_cache ( & kmalloc_caches [ i ] ,
" kmalloc " , 1 < < i , GFP_KERNEL ) ;
2007-06-16 21:16:13 +04:00
caches + + ;
}
2007-05-07 01:49:36 +04:00
2007-07-17 15:03:26 +04:00
/*
* Patch up the size_index table if we have strange large alignment
* requirements for the kmalloc array . This is only the case for
2008-02-16 10:45:26 +03:00
* MIPS it seems . The standard arches will not generate any code here .
2007-07-17 15:03:26 +04:00
*
* Largest permitted alignment is 256 bytes due to the way we
* handle the index determination for the smaller caches .
*
* Make sure that nothing crazy happens if someone starts tinkering
* around with ARCH_KMALLOC_MINALIGN
*/
BUILD_BUG_ON ( KMALLOC_MIN_SIZE > 256 | |
( KMALLOC_MIN_SIZE & ( KMALLOC_MIN_SIZE - 1 ) ) ) ;
2007-07-17 15:03:28 +04:00
for ( i = 8 ; i < KMALLOC_MIN_SIZE ; i + = 8 )
2007-07-17 15:03:26 +04:00
size_index [ ( i - 1 ) / 8 ] = KMALLOC_SHIFT_LOW ;
2008-07-03 18:14:26 +04:00
if ( KMALLOC_MIN_SIZE = = 128 ) {
/*
* The 192 byte sized cache is not used if the alignment
* is 128 byte . Redirect kmalloc to use the 256 byte cache
* instead .
*/
for ( i = 128 + 8 ; i < = 192 ; i + = 8 )
size_index [ ( i - 1 ) / 8 ] = 8 ;
}
2007-05-07 01:49:36 +04:00
slab_state = UP ;
/* Provide the correct kmalloc names now that the caches are up */
2008-02-15 01:28:09 +03:00
for ( i = KMALLOC_SHIFT_LOW ; i < = PAGE_SHIFT ; i + + )
2007-05-07 01:49:36 +04:00
kmalloc_caches [ i ] . name =
kasprintf ( GFP_KERNEL , " kmalloc-%d " , 1 < < i ) ;
# ifdef CONFIG_SMP
register_cpu_notifier ( & slab_notifier ) ;
2007-10-16 12:26:08 +04:00
kmem_size = offsetof ( struct kmem_cache , cpu_slab ) +
nr_cpu_ids * sizeof ( struct kmem_cache_cpu * ) ;
# else
kmem_size = sizeof ( struct kmem_cache ) ;
2007-05-07 01:49:36 +04:00
# endif
2008-02-06 04:57:39 +03:00
printk ( KERN_INFO
" SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d, "
2007-06-16 21:16:13 +04:00
" CPUs=%d, Nodes=%d \n " ,
caches , cache_line_size ( ) ,
2007-05-07 01:49:36 +04:00
slub_min_order , slub_max_order , slub_min_objects ,
nr_cpu_ids , nr_node_ids ) ;
}
/*
* Find a mergeable slab cache
*/
static int slab_unmergeable ( struct kmem_cache * s )
{
if ( slub_nomerge | | ( s - > flags & SLUB_NEVER_MERGE ) )
return 1 ;
2007-05-17 09:10:50 +04:00
if ( s - > ctor )
2007-05-07 01:49:36 +04:00
return 1 ;
2007-05-31 11:40:51 +04:00
/*
* We may have set a slab to be unmergeable during bootstrap .
*/
if ( s - > refcount < 0 )
return 1 ;
2007-05-07 01:49:36 +04:00
return 0 ;
}
static struct kmem_cache * find_mergeable ( size_t size ,
2007-09-12 02:24:11 +04:00
size_t align , unsigned long flags , const char * name ,
2008-07-26 06:45:34 +04:00
void ( * ctor ) ( void * ) )
2007-05-07 01:49:36 +04:00
{
2007-07-17 15:03:19 +04:00
struct kmem_cache * s ;
2007-05-07 01:49:36 +04:00
if ( slub_nomerge | | ( flags & SLUB_NEVER_MERGE ) )
return NULL ;
2007-05-17 09:10:50 +04:00
if ( ctor )
2007-05-07 01:49:36 +04:00
return NULL ;
size = ALIGN ( size , sizeof ( void * ) ) ;
align = calculate_alignment ( flags , align , size ) ;
size = ALIGN ( size , align ) ;
2007-09-12 02:24:11 +04:00
flags = kmem_cache_flags ( size , flags , name , NULL ) ;
2007-05-07 01:49:36 +04:00
2007-07-17 15:03:19 +04:00
list_for_each_entry ( s , & slab_caches , list ) {
2007-05-07 01:49:36 +04:00
if ( slab_unmergeable ( s ) )
continue ;
if ( size > s - > size )
continue ;
2007-09-12 02:24:11 +04:00
if ( ( flags & SLUB_MERGE_SAME ) ! = ( s - > flags & SLUB_MERGE_SAME ) )
2007-05-07 01:49:36 +04:00
continue ;
/*
* Check if alignment is compatible .
* Courtesy of Adrian Drzewiecki
*/
2008-01-08 10:20:27 +03:00
if ( ( s - > size & ~ ( align - 1 ) ) ! = s - > size )
2007-05-07 01:49:36 +04:00
continue ;
if ( s - > size - size > = sizeof ( void * ) )
continue ;
return s ;
}
return NULL ;
}
struct kmem_cache * kmem_cache_create ( const char * name , size_t size ,
2008-07-26 06:45:34 +04:00
size_t align , unsigned long flags , void ( * ctor ) ( void * ) )
2007-05-07 01:49:36 +04:00
{
struct kmem_cache * s ;
down_write ( & slub_lock ) ;
2007-09-12 02:24:11 +04:00
s = find_mergeable ( size , align , flags , name , ctor ) ;
2007-05-07 01:49:36 +04:00
if ( s ) {
2007-10-16 12:26:09 +04:00
int cpu ;
2007-05-07 01:49:36 +04:00
s - > refcount + + ;
/*
* Adjust the object sizes so that we clear
* the complete object on kzalloc .
*/
s - > objsize = max ( s - > objsize , ( int ) size ) ;
2007-10-16 12:26:09 +04:00
/*
* And then we need to update the object size in the
* per cpu structures
*/
for_each_online_cpu ( cpu )
get_cpu_slab ( s , cpu ) - > objsize = s - > objsize ;
2008-02-16 10:45:26 +03:00
2007-05-07 01:49:36 +04:00
s - > inuse = max_t ( int , s - > inuse , ALIGN ( size , sizeof ( void * ) ) ) ;
2007-07-17 15:03:31 +04:00
up_write ( & slub_lock ) ;
2008-02-16 10:45:26 +03:00
2008-12-18 09:09:46 +03:00
if ( sysfs_slab_alias ( s , name ) ) {
down_write ( & slub_lock ) ;
s - > refcount - - ;
up_write ( & slub_lock ) ;
2007-05-07 01:49:36 +04:00
goto err ;
2008-12-18 09:09:46 +03:00
}
2007-07-17 15:03:31 +04:00
return s ;
}
2008-02-16 10:45:26 +03:00
2007-07-17 15:03:31 +04:00
s = kmalloc ( kmem_size , GFP_KERNEL ) ;
if ( s ) {
if ( kmem_cache_open ( s , GFP_KERNEL , name ,
2007-05-17 09:10:50 +04:00
size , align , flags , ctor ) ) {
2007-05-07 01:49:36 +04:00
list_add ( & s - > list , & slab_caches ) ;
2007-07-17 15:03:31 +04:00
up_write ( & slub_lock ) ;
2008-12-18 09:09:46 +03:00
if ( sysfs_slab_add ( s ) ) {
down_write ( & slub_lock ) ;
list_del ( & s - > list ) ;
up_write ( & slub_lock ) ;
kfree ( s ) ;
2007-07-17 15:03:31 +04:00
goto err ;
2008-12-18 09:09:46 +03:00
}
2007-07-17 15:03:31 +04:00
return s ;
}
kfree ( s ) ;
2007-05-07 01:49:36 +04:00
}
up_write ( & slub_lock ) ;
err :
if ( flags & SLAB_PANIC )
panic ( " Cannot create slabcache %s \n " , name ) ;
else
s = NULL ;
return s ;
}
EXPORT_SYMBOL ( kmem_cache_create ) ;
# ifdef CONFIG_SMP
/*
2007-05-09 13:32:39 +04:00
* Use the cpu notifier to insure that the cpu slabs are flushed when
* necessary .
2007-05-07 01:49:36 +04:00
*/
static int __cpuinit slab_cpuup_callback ( struct notifier_block * nfb ,
unsigned long action , void * hcpu )
{
long cpu = ( long ) hcpu ;
2007-07-17 15:03:19 +04:00
struct kmem_cache * s ;
unsigned long flags ;
2007-05-07 01:49:36 +04:00
switch ( action ) {
2007-10-16 12:26:08 +04:00
case CPU_UP_PREPARE :
case CPU_UP_PREPARE_FROZEN :
init_alloc_cpu_cpu ( cpu ) ;
down_read ( & slub_lock ) ;
list_for_each_entry ( s , & slab_caches , list )
s - > cpu_slab [ cpu ] = alloc_kmem_cache_cpu ( s , cpu ,
GFP_KERNEL ) ;
up_read ( & slub_lock ) ;
break ;
2007-05-07 01:49:36 +04:00
case CPU_UP_CANCELED :
2007-05-09 13:35:10 +04:00
case CPU_UP_CANCELED_FROZEN :
2007-05-07 01:49:36 +04:00
case CPU_DEAD :
2007-05-09 13:35:10 +04:00
case CPU_DEAD_FROZEN :
2007-07-17 15:03:19 +04:00
down_read ( & slub_lock ) ;
list_for_each_entry ( s , & slab_caches , list ) {
2007-10-16 12:26:08 +04:00
struct kmem_cache_cpu * c = get_cpu_slab ( s , cpu ) ;
2007-07-17 15:03:19 +04:00
local_irq_save ( flags ) ;
__flush_cpu_slab ( s , cpu ) ;
local_irq_restore ( flags ) ;
2007-10-16 12:26:08 +04:00
free_kmem_cache_cpu ( c , cpu ) ;
s - > cpu_slab [ cpu ] = NULL ;
2007-07-17 15:03:19 +04:00
}
up_read ( & slub_lock ) ;
2007-05-07 01:49:36 +04:00
break ;
default :
break ;
}
return NOTIFY_OK ;
}
2008-01-08 10:20:27 +03:00
static struct notifier_block __cpuinitdata slab_notifier = {
2008-02-06 04:57:39 +03:00
. notifier_call = slab_cpuup_callback
2008-01-08 10:20:27 +03:00
} ;
2007-05-07 01:49:36 +04:00
# endif
2008-08-19 21:43:25 +04:00
void * __kmalloc_track_caller ( size_t size , gfp_t gfpflags , unsigned long caller )
2007-05-07 01:49:36 +04:00
{
2007-10-16 12:24:38 +04:00
struct kmem_cache * s ;
2008-02-15 01:28:09 +03:00
if ( unlikely ( size > PAGE_SIZE ) )
2008-02-11 23:47:46 +03:00
return kmalloc_large ( size , gfpflags ) ;
2007-10-16 12:24:38 +04:00
s = get_slab ( size , gfpflags ) ;
2007-05-07 01:49:36 +04:00
2007-10-16 12:24:44 +04:00
if ( unlikely ( ZERO_OR_NULL_PTR ( s ) ) )
2007-07-17 15:03:22 +04:00
return s ;
2007-05-07 01:49:36 +04:00
2007-07-17 15:03:28 +04:00
return slab_alloc ( s , gfpflags , - 1 , caller ) ;
2007-05-07 01:49:36 +04:00
}
void * __kmalloc_node_track_caller ( size_t size , gfp_t gfpflags ,
2008-08-19 21:43:25 +04:00
int node , unsigned long caller )
2007-05-07 01:49:36 +04:00
{
2007-10-16 12:24:38 +04:00
struct kmem_cache * s ;
2008-02-15 01:28:09 +03:00
if ( unlikely ( size > PAGE_SIZE ) )
2008-03-02 00:56:40 +03:00
return kmalloc_large_node ( size , gfpflags , node ) ;
2008-02-11 23:47:46 +03:00
2007-10-16 12:24:38 +04:00
s = get_slab ( size , gfpflags ) ;
2007-05-07 01:49:36 +04:00
2007-10-16 12:24:44 +04:00
if ( unlikely ( ZERO_OR_NULL_PTR ( s ) ) )
2007-07-17 15:03:22 +04:00
return s ;
2007-05-07 01:49:36 +04:00
2007-07-17 15:03:28 +04:00
return slab_alloc ( s , gfpflags , node , caller ) ;
2007-05-07 01:49:36 +04:00
}
2008-04-30 03:16:06 +04:00
# ifdef CONFIG_SLUB_DEBUG
2008-04-14 20:11:40 +04:00
static unsigned long count_partial ( struct kmem_cache_node * n ,
int ( * get_count ) ( struct page * ) )
2008-04-14 19:51:34 +04:00
{
unsigned long flags ;
unsigned long x = 0 ;
struct page * page ;
spin_lock_irqsave ( & n - > list_lock , flags ) ;
list_for_each_entry ( page , & n - > partial , lru )
2008-04-14 20:11:40 +04:00
x + = get_count ( page ) ;
2008-04-14 19:51:34 +04:00
spin_unlock_irqrestore ( & n - > list_lock , flags ) ;
return x ;
}
2008-04-14 20:11:40 +04:00
static int count_inuse ( struct page * page )
{
return page - > inuse ;
}
static int count_total ( struct page * page )
{
return page - > objects ;
}
static int count_free ( struct page * page )
{
return page - > objects - page - > inuse ;
}
2008-04-14 19:51:34 +04:00
2007-07-17 15:03:30 +04:00
static int validate_slab ( struct kmem_cache * s , struct page * page ,
unsigned long * map )
2007-05-07 01:49:43 +04:00
{
void * p ;
2008-03-02 00:40:44 +03:00
void * addr = page_address ( page ) ;
2007-05-07 01:49:43 +04:00
if ( ! check_slab ( s , page ) | |
! on_freelist ( s , page , NULL ) )
return 0 ;
/* Now we know that a valid freelist exists */
2008-04-14 20:11:30 +04:00
bitmap_zero ( map , page - > objects ) ;
2007-05-07 01:49:43 +04:00
2007-05-09 13:32:40 +04:00
for_each_free_object ( p , s , page - > freelist ) {
set_bit ( slab_index ( p , s , addr ) , map ) ;
2007-05-07 01:49:43 +04:00
if ( ! check_object ( s , page , p , 0 ) )
return 0 ;
}
2008-04-14 20:11:31 +04:00
for_each_object ( p , s , addr , page - > objects )
2007-05-09 13:32:40 +04:00
if ( ! test_bit ( slab_index ( p , s , addr ) , map ) )
2007-05-07 01:49:43 +04:00
if ( ! check_object ( s , page , p , 1 ) )
return 0 ;
return 1 ;
}
2007-07-17 15:03:30 +04:00
static void validate_slab_slab ( struct kmem_cache * s , struct page * page ,
unsigned long * map )
2007-05-07 01:49:43 +04:00
{
if ( slab_trylock ( page ) ) {
2007-07-17 15:03:30 +04:00
validate_slab ( s , page , map ) ;
2007-05-07 01:49:43 +04:00
slab_unlock ( page ) ;
} else
printk ( KERN_INFO " SLUB %s: Skipped busy slab 0x%p \n " ,
s - > name , page ) ;
if ( s - > flags & DEBUG_DEFAULT_FLAGS ) {
2008-07-24 08:27:18 +04:00
if ( ! PageSlubDebug ( page ) )
printk ( KERN_ERR " SLUB %s: SlubDebug not set "
2007-05-07 01:49:43 +04:00
" on slab 0x%p \n " , s - > name , page ) ;
} else {
2008-07-24 08:27:18 +04:00
if ( PageSlubDebug ( page ) )
printk ( KERN_ERR " SLUB %s: SlubDebug set on "
2007-05-07 01:49:43 +04:00
" slab 0x%p \n " , s - > name , page ) ;
}
}
2007-07-17 15:03:30 +04:00
static int validate_slab_node ( struct kmem_cache * s ,
struct kmem_cache_node * n , unsigned long * map )
2007-05-07 01:49:43 +04:00
{
unsigned long count = 0 ;
struct page * page ;
unsigned long flags ;
spin_lock_irqsave ( & n - > list_lock , flags ) ;
list_for_each_entry ( page , & n - > partial , lru ) {
2007-07-17 15:03:30 +04:00
validate_slab_slab ( s , page , map ) ;
2007-05-07 01:49:43 +04:00
count + + ;
}
if ( count ! = n - > nr_partial )
printk ( KERN_ERR " SLUB %s: %ld partial slabs counted but "
" counter=%ld \n " , s - > name , count , n - > nr_partial ) ;
if ( ! ( s - > flags & SLAB_STORE_USER ) )
goto out ;
list_for_each_entry ( page , & n - > full , lru ) {
2007-07-17 15:03:30 +04:00
validate_slab_slab ( s , page , map ) ;
2007-05-07 01:49:43 +04:00
count + + ;
}
if ( count ! = atomic_long_read ( & n - > nr_slabs ) )
printk ( KERN_ERR " SLUB: %s %ld slabs counted but "
" counter=%ld \n " , s - > name , count ,
atomic_long_read ( & n - > nr_slabs ) ) ;
out :
spin_unlock_irqrestore ( & n - > list_lock , flags ) ;
return count ;
}
2007-07-17 15:03:30 +04:00
static long validate_slab_cache ( struct kmem_cache * s )
2007-05-07 01:49:43 +04:00
{
int node ;
unsigned long count = 0 ;
2008-04-14 20:11:40 +04:00
unsigned long * map = kmalloc ( BITS_TO_LONGS ( oo_objects ( s - > max ) ) *
2007-07-17 15:03:30 +04:00
sizeof ( unsigned long ) , GFP_KERNEL ) ;
if ( ! map )
return - ENOMEM ;
2007-05-07 01:49:43 +04:00
flush_all ( s ) ;
2007-10-16 12:25:33 +04:00
for_each_node_state ( node , N_NORMAL_MEMORY ) {
2007-05-07 01:49:43 +04:00
struct kmem_cache_node * n = get_node ( s , node ) ;
2007-07-17 15:03:30 +04:00
count + = validate_slab_node ( s , n , map ) ;
2007-05-07 01:49:43 +04:00
}
2007-07-17 15:03:30 +04:00
kfree ( map ) ;
2007-05-07 01:49:43 +04:00
return count ;
}
2007-05-09 13:32:41 +04:00
# ifdef SLUB_RESILIENCY_TEST
static void resiliency_test ( void )
{
u8 * p ;
printk ( KERN_ERR " SLUB resiliency testing \n " ) ;
printk ( KERN_ERR " ----------------------- \n " ) ;
printk ( KERN_ERR " A. Corruption after allocation \n " ) ;
p = kzalloc ( 16 , GFP_KERNEL ) ;
p [ 16 ] = 0x12 ;
printk ( KERN_ERR " \n 1. kmalloc-16: Clobber Redzone/next pointer "
" 0x12->0x%p \n \n " , p + 16 ) ;
validate_slab_cache ( kmalloc_caches + 4 ) ;
/* Hmmm... The next two are dangerous */
p = kzalloc ( 32 , GFP_KERNEL ) ;
p [ 32 + sizeof ( void * ) ] = 0x34 ;
printk ( KERN_ERR " \n 2. kmalloc-32: Clobber next pointer/next slab "
2008-02-06 04:57:39 +03:00
" 0x34 -> -0x%p \n " , p ) ;
printk ( KERN_ERR
" If allocated object is overwritten then not detectable \n \n " ) ;
2007-05-09 13:32:41 +04:00
validate_slab_cache ( kmalloc_caches + 5 ) ;
p = kzalloc ( 64 , GFP_KERNEL ) ;
p + = 64 + ( get_cycles ( ) & 0xff ) * sizeof ( void * ) ;
* p = 0x56 ;
printk ( KERN_ERR " \n 3. kmalloc-64: corrupting random byte 0x56->0x%p \n " ,
p ) ;
2008-02-06 04:57:39 +03:00
printk ( KERN_ERR
" If allocated object is overwritten then not detectable \n \n " ) ;
2007-05-09 13:32:41 +04:00
validate_slab_cache ( kmalloc_caches + 6 ) ;
printk ( KERN_ERR " \n B. Corruption after free \n " ) ;
p = kzalloc ( 128 , GFP_KERNEL ) ;
kfree ( p ) ;
* p = 0x78 ;
printk ( KERN_ERR " 1. kmalloc-128: Clobber first word 0x78->0x%p \n \n " , p ) ;
validate_slab_cache ( kmalloc_caches + 7 ) ;
p = kzalloc ( 256 , GFP_KERNEL ) ;
kfree ( p ) ;
p [ 50 ] = 0x9a ;
2008-02-06 04:57:39 +03:00
printk ( KERN_ERR " \n 2. kmalloc-256: Clobber 50th byte 0x9a->0x%p \n \n " ,
p ) ;
2007-05-09 13:32:41 +04:00
validate_slab_cache ( kmalloc_caches + 8 ) ;
p = kzalloc ( 512 , GFP_KERNEL ) ;
kfree ( p ) ;
p [ 512 ] = 0xab ;
printk ( KERN_ERR " \n 3. kmalloc-512: Clobber redzone 0xab->0x%p \n \n " , p ) ;
validate_slab_cache ( kmalloc_caches + 9 ) ;
}
# else
static void resiliency_test ( void ) { } ;
# endif
2007-05-07 01:49:45 +04:00
/*
2007-05-09 13:32:39 +04:00
* Generate lists of code addresses where slabcache objects are allocated
2007-05-07 01:49:45 +04:00
* and freed .
*/
struct location {
unsigned long count ;
2008-08-19 21:43:25 +04:00
unsigned long addr ;
2007-05-09 13:32:45 +04:00
long long sum_time ;
long min_time ;
long max_time ;
long min_pid ;
long max_pid ;
2009-01-01 02:42:29 +03:00
DECLARE_BITMAP ( cpus , NR_CPUS ) ;
2007-05-09 13:32:45 +04:00
nodemask_t nodes ;
2007-05-07 01:49:45 +04:00
} ;
struct loc_track {
unsigned long max ;
unsigned long count ;
struct location * loc ;
} ;
static void free_loc_track ( struct loc_track * t )
{
if ( t - > max )
free_pages ( ( unsigned long ) t - > loc ,
get_order ( sizeof ( struct location ) * t - > max ) ) ;
}
2007-07-17 15:03:20 +04:00
static int alloc_loc_track ( struct loc_track * t , unsigned long max , gfp_t flags )
2007-05-07 01:49:45 +04:00
{
struct location * l ;
int order ;
order = get_order ( sizeof ( struct location ) * max ) ;
2007-07-17 15:03:20 +04:00
l = ( void * ) __get_free_pages ( flags , order ) ;
2007-05-07 01:49:45 +04:00
if ( ! l )
return 0 ;
if ( t - > count ) {
memcpy ( l , t - > loc , sizeof ( struct location ) * t - > count ) ;
free_loc_track ( t ) ;
}
t - > max = max ;
t - > loc = l ;
return 1 ;
}
static int add_location ( struct loc_track * t , struct kmem_cache * s ,
2007-05-09 13:32:45 +04:00
const struct track * track )
2007-05-07 01:49:45 +04:00
{
long start , end , pos ;
struct location * l ;
2008-08-19 21:43:25 +04:00
unsigned long caddr ;
2007-05-09 13:32:45 +04:00
unsigned long age = jiffies - track - > when ;
2007-05-07 01:49:45 +04:00
start = - 1 ;
end = t - > count ;
for ( ; ; ) {
pos = start + ( end - start + 1 ) / 2 ;
/*
* There is nothing at " end " . If we end up there
* we need to add something to before end .
*/
if ( pos = = end )
break ;
caddr = t - > loc [ pos ] . addr ;
2007-05-09 13:32:45 +04:00
if ( track - > addr = = caddr ) {
l = & t - > loc [ pos ] ;
l - > count + + ;
if ( track - > when ) {
l - > sum_time + = age ;
if ( age < l - > min_time )
l - > min_time = age ;
if ( age > l - > max_time )
l - > max_time = age ;
if ( track - > pid < l - > min_pid )
l - > min_pid = track - > pid ;
if ( track - > pid > l - > max_pid )
l - > max_pid = track - > pid ;
2009-01-01 02:42:29 +03:00
cpumask_set_cpu ( track - > cpu ,
to_cpumask ( l - > cpus ) ) ;
2007-05-09 13:32:45 +04:00
}
node_set ( page_to_nid ( virt_to_page ( track ) ) , l - > nodes ) ;
2007-05-07 01:49:45 +04:00
return 1 ;
}
2007-05-09 13:32:45 +04:00
if ( track - > addr < caddr )
2007-05-07 01:49:45 +04:00
end = pos ;
else
start = pos ;
}
/*
2007-05-09 13:32:39 +04:00
* Not found . Insert new tracking element .
2007-05-07 01:49:45 +04:00
*/
2007-07-17 15:03:20 +04:00
if ( t - > count > = t - > max & & ! alloc_loc_track ( t , 2 * t - > max , GFP_ATOMIC ) )
2007-05-07 01:49:45 +04:00
return 0 ;
l = t - > loc + pos ;
if ( pos < t - > count )
memmove ( l + 1 , l ,
( t - > count - pos ) * sizeof ( struct location ) ) ;
t - > count + + ;
l - > count = 1 ;
2007-05-09 13:32:45 +04:00
l - > addr = track - > addr ;
l - > sum_time = age ;
l - > min_time = age ;
l - > max_time = age ;
l - > min_pid = track - > pid ;
l - > max_pid = track - > pid ;
2009-01-01 02:42:29 +03:00
cpumask_clear ( to_cpumask ( l - > cpus ) ) ;
cpumask_set_cpu ( track - > cpu , to_cpumask ( l - > cpus ) ) ;
2007-05-09 13:32:45 +04:00
nodes_clear ( l - > nodes ) ;
node_set ( page_to_nid ( virt_to_page ( track ) ) , l - > nodes ) ;
2007-05-07 01:49:45 +04:00
return 1 ;
}
static void process_slab ( struct loc_track * t , struct kmem_cache * s ,
struct page * page , enum track_item alloc )
{
2008-03-02 00:40:44 +03:00
void * addr = page_address ( page ) ;
2008-04-14 20:11:30 +04:00
DECLARE_BITMAP ( map , page - > objects ) ;
2007-05-07 01:49:45 +04:00
void * p ;
2008-04-14 20:11:30 +04:00
bitmap_zero ( map , page - > objects ) ;
2007-05-09 13:32:40 +04:00
for_each_free_object ( p , s , page - > freelist )
set_bit ( slab_index ( p , s , addr ) , map ) ;
2007-05-07 01:49:45 +04:00
2008-04-14 20:11:31 +04:00
for_each_object ( p , s , addr , page - > objects )
2007-05-09 13:32:45 +04:00
if ( ! test_bit ( slab_index ( p , s , addr ) , map ) )
add_location ( t , s , get_track ( s , p , alloc ) ) ;
2007-05-07 01:49:45 +04:00
}
static int list_locations ( struct kmem_cache * s , char * buf ,
enum track_item alloc )
{
2008-02-01 02:20:50 +03:00
int len = 0 ;
2007-05-07 01:49:45 +04:00
unsigned long i ;
2007-07-17 15:03:20 +04:00
struct loc_track t = { 0 , 0 , NULL } ;
2007-05-07 01:49:45 +04:00
int node ;
2007-07-17 15:03:20 +04:00
if ( ! alloc_loc_track ( & t , PAGE_SIZE / sizeof ( struct location ) ,
2007-10-16 12:26:09 +04:00
GFP_TEMPORARY ) )
2007-07-17 15:03:20 +04:00
return sprintf ( buf , " Out of memory \n " ) ;
2007-05-07 01:49:45 +04:00
/* Push back cpu slabs */
flush_all ( s ) ;
2007-10-16 12:25:33 +04:00
for_each_node_state ( node , N_NORMAL_MEMORY ) {
2007-05-07 01:49:45 +04:00
struct kmem_cache_node * n = get_node ( s , node ) ;
unsigned long flags ;
struct page * page ;
2007-08-23 01:01:56 +04:00
if ( ! atomic_long_read ( & n - > nr_slabs ) )
2007-05-07 01:49:45 +04:00
continue ;
spin_lock_irqsave ( & n - > list_lock , flags ) ;
list_for_each_entry ( page , & n - > partial , lru )
process_slab ( & t , s , page , alloc ) ;
list_for_each_entry ( page , & n - > full , lru )
process_slab ( & t , s , page , alloc ) ;
spin_unlock_irqrestore ( & n - > list_lock , flags ) ;
}
for ( i = 0 ; i < t . count ; i + + ) {
2007-05-09 13:32:45 +04:00
struct location * l = & t . loc [ i ] ;
2007-05-07 01:49:45 +04:00
2008-12-10 00:14:27 +03:00
if ( len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100 )
2007-05-07 01:49:45 +04:00
break ;
2008-02-01 02:20:50 +03:00
len + = sprintf ( buf + len , " %7ld " , l - > count ) ;
2007-05-09 13:32:45 +04:00
if ( l - > addr )
2008-02-01 02:20:50 +03:00
len + = sprint_symbol ( buf + len , ( unsigned long ) l - > addr ) ;
2007-05-07 01:49:45 +04:00
else
2008-02-01 02:20:50 +03:00
len + = sprintf ( buf + len , " <not-available> " ) ;
2007-05-09 13:32:45 +04:00
if ( l - > sum_time ! = l - > min_time ) {
2008-02-01 02:20:50 +03:00
len + = sprintf ( buf + len , " age=%ld/%ld/%ld " ,
2008-05-01 15:34:31 +04:00
l - > min_time ,
( long ) div_u64 ( l - > sum_time , l - > count ) ,
l - > max_time ) ;
2007-05-09 13:32:45 +04:00
} else
2008-02-01 02:20:50 +03:00
len + = sprintf ( buf + len , " age=%ld " ,
2007-05-09 13:32:45 +04:00
l - > min_time ) ;
if ( l - > min_pid ! = l - > max_pid )
2008-02-01 02:20:50 +03:00
len + = sprintf ( buf + len , " pid=%ld-%ld " ,
2007-05-09 13:32:45 +04:00
l - > min_pid , l - > max_pid ) ;
else
2008-02-01 02:20:50 +03:00
len + = sprintf ( buf + len , " pid=%ld " ,
2007-05-09 13:32:45 +04:00
l - > min_pid ) ;
2009-01-01 02:42:29 +03:00
if ( num_online_cpus ( ) > 1 & &
! cpumask_empty ( to_cpumask ( l - > cpus ) ) & &
2008-02-01 02:20:50 +03:00
len < PAGE_SIZE - 60 ) {
len + = sprintf ( buf + len , " cpus= " ) ;
len + = cpulist_scnprintf ( buf + len , PAGE_SIZE - len - 50 ,
2009-01-01 02:42:29 +03:00
to_cpumask ( l - > cpus ) ) ;
2007-05-09 13:32:45 +04:00
}
2007-06-24 04:16:32 +04:00
if ( num_online_nodes ( ) > 1 & & ! nodes_empty ( l - > nodes ) & &
2008-02-01 02:20:50 +03:00
len < PAGE_SIZE - 60 ) {
len + = sprintf ( buf + len , " nodes= " ) ;
len + = nodelist_scnprintf ( buf + len , PAGE_SIZE - len - 50 ,
2007-05-09 13:32:45 +04:00
l - > nodes ) ;
}
2008-02-01 02:20:50 +03:00
len + = sprintf ( buf + len , " \n " ) ;
2007-05-07 01:49:45 +04:00
}
free_loc_track ( & t ) ;
if ( ! t . count )
2008-02-01 02:20:50 +03:00
len + = sprintf ( buf , " No data \n " ) ;
return len ;
2007-05-07 01:49:45 +04:00
}
2007-05-07 01:49:36 +04:00
enum slab_stat_type {
2008-04-14 20:11:40 +04:00
SL_ALL , /* All slabs */
SL_PARTIAL , /* Only partially allocated slabs */
SL_CPU , /* Only slabs used for cpu caches */
SL_OBJECTS , /* Determine allocated objects not slabs */
SL_TOTAL /* Determine object capacity not slabs */
2007-05-07 01:49:36 +04:00
} ;
2008-04-14 20:11:40 +04:00
# define SO_ALL (1 << SL_ALL)
2007-05-07 01:49:36 +04:00
# define SO_PARTIAL (1 << SL_PARTIAL)
# define SO_CPU (1 << SL_CPU)
# define SO_OBJECTS (1 << SL_OBJECTS)
2008-04-14 20:11:40 +04:00
# define SO_TOTAL (1 << SL_TOTAL)
2007-05-07 01:49:36 +04:00
2008-03-02 23:28:24 +03:00
static ssize_t show_slab_objects ( struct kmem_cache * s ,
char * buf , unsigned long flags )
2007-05-07 01:49:36 +04:00
{
unsigned long total = 0 ;
int node ;
int x ;
unsigned long * nodes ;
unsigned long * per_cpu ;
nodes = kzalloc ( 2 * sizeof ( unsigned long ) * nr_node_ids , GFP_KERNEL ) ;
2008-03-02 23:28:24 +03:00
if ( ! nodes )
return - ENOMEM ;
2007-05-07 01:49:36 +04:00
per_cpu = nodes + nr_node_ids ;
2008-04-14 20:11:40 +04:00
if ( flags & SO_CPU ) {
int cpu ;
2007-05-07 01:49:36 +04:00
2008-04-14 20:11:40 +04:00
for_each_possible_cpu ( cpu ) {
struct kmem_cache_cpu * c = get_cpu_slab ( s , cpu ) ;
2007-10-16 12:26:05 +04:00
2008-04-14 20:11:40 +04:00
if ( ! c | | c - > node < 0 )
continue ;
if ( c - > page ) {
if ( flags & SO_TOTAL )
x = c - > page - > objects ;
else if ( flags & SO_OBJECTS )
x = c - > page - > inuse ;
2007-05-07 01:49:36 +04:00
else
x = 1 ;
2008-04-14 20:11:40 +04:00
2007-05-07 01:49:36 +04:00
total + = x ;
2008-04-14 20:11:40 +04:00
nodes [ c - > node ] + = x ;
2007-05-07 01:49:36 +04:00
}
2008-04-14 20:11:40 +04:00
per_cpu [ c - > node ] + + ;
2007-05-07 01:49:36 +04:00
}
}
2008-04-14 20:11:40 +04:00
if ( flags & SO_ALL ) {
for_each_node_state ( node , N_NORMAL_MEMORY ) {
struct kmem_cache_node * n = get_node ( s , node ) ;
if ( flags & SO_TOTAL )
x = atomic_long_read ( & n - > total_objects ) ;
else if ( flags & SO_OBJECTS )
x = atomic_long_read ( & n - > total_objects ) -
count_partial ( n , count_free ) ;
2007-05-07 01:49:36 +04:00
else
2008-04-14 20:11:40 +04:00
x = atomic_long_read ( & n - > nr_slabs ) ;
2007-05-07 01:49:36 +04:00
total + = x ;
nodes [ node ] + = x ;
}
2008-04-14 20:11:40 +04:00
} else if ( flags & SO_PARTIAL ) {
for_each_node_state ( node , N_NORMAL_MEMORY ) {
struct kmem_cache_node * n = get_node ( s , node ) ;
2007-05-07 01:49:36 +04:00
2008-04-14 20:11:40 +04:00
if ( flags & SO_TOTAL )
x = count_partial ( n , count_total ) ;
else if ( flags & SO_OBJECTS )
x = count_partial ( n , count_inuse ) ;
2007-05-07 01:49:36 +04:00
else
2008-04-14 20:11:40 +04:00
x = n - > nr_partial ;
2007-05-07 01:49:36 +04:00
total + = x ;
nodes [ node ] + = x ;
}
}
x = sprintf ( buf , " %lu " , total ) ;
# ifdef CONFIG_NUMA
2007-10-16 12:25:33 +04:00
for_each_node_state ( node , N_NORMAL_MEMORY )
2007-05-07 01:49:36 +04:00
if ( nodes [ node ] )
x + = sprintf ( buf + x , " N%d=%lu " ,
node , nodes [ node ] ) ;
# endif
kfree ( nodes ) ;
return x + sprintf ( buf + x , " \n " ) ;
}
static int any_slab_objects ( struct kmem_cache * s )
{
int node ;
2007-10-16 12:26:05 +04:00
for_each_online_node ( node ) {
2007-05-07 01:49:36 +04:00
struct kmem_cache_node * n = get_node ( s , node ) ;
2007-10-16 12:26:05 +04:00
if ( ! n )
continue ;
2008-05-07 07:42:39 +04:00
if ( atomic_long_read ( & n - > total_objects ) )
2007-05-07 01:49:36 +04:00
return 1 ;
}
return 0 ;
}
# define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
# define to_slab(n) container_of(n, struct kmem_cache, kobj);
struct slab_attribute {
struct attribute attr ;
ssize_t ( * show ) ( struct kmem_cache * s , char * buf ) ;
ssize_t ( * store ) ( struct kmem_cache * s , const char * x , size_t count ) ;
} ;
# define SLAB_ATTR_RO(_name) \
static struct slab_attribute _name # # _attr = __ATTR_RO ( _name )
# define SLAB_ATTR(_name) \
static struct slab_attribute _name # # _attr = \
__ATTR ( _name , 0644 , _name # # _show , _name # # _store )
static ssize_t slab_size_show ( struct kmem_cache * s , char * buf )
{
return sprintf ( buf , " %d \n " , s - > size ) ;
}
SLAB_ATTR_RO ( slab_size ) ;
static ssize_t align_show ( struct kmem_cache * s , char * buf )
{
return sprintf ( buf , " %d \n " , s - > align ) ;
}
SLAB_ATTR_RO ( align ) ;
static ssize_t object_size_show ( struct kmem_cache * s , char * buf )
{
return sprintf ( buf , " %d \n " , s - > objsize ) ;
}
SLAB_ATTR_RO ( object_size ) ;
static ssize_t objs_per_slab_show ( struct kmem_cache * s , char * buf )
{
2008-04-14 20:11:31 +04:00
return sprintf ( buf , " %d \n " , oo_objects ( s - > oo ) ) ;
2007-05-07 01:49:36 +04:00
}
SLAB_ATTR_RO ( objs_per_slab ) ;
2008-04-14 20:11:41 +04:00
static ssize_t order_store ( struct kmem_cache * s ,
const char * buf , size_t length )
{
2008-04-30 03:11:12 +04:00
unsigned long order ;
int err ;
err = strict_strtoul ( buf , 10 , & order ) ;
if ( err )
return err ;
2008-04-14 20:11:41 +04:00
if ( order > slub_max_order | | order < slub_min_order )
return - EINVAL ;
calculate_sizes ( s , order ) ;
return length ;
}
2007-05-07 01:49:36 +04:00
static ssize_t order_show ( struct kmem_cache * s , char * buf )
{
2008-04-14 20:11:31 +04:00
return sprintf ( buf , " %d \n " , oo_order ( s - > oo ) ) ;
2007-05-07 01:49:36 +04:00
}
2008-04-14 20:11:41 +04:00
SLAB_ATTR ( order ) ;
2007-05-07 01:49:36 +04:00
static ssize_t ctor_show ( struct kmem_cache * s , char * buf )
{
if ( s - > ctor ) {
int n = sprint_symbol ( buf , ( unsigned long ) s - > ctor ) ;
return n + sprintf ( buf + n , " \n " ) ;
}
return 0 ;
}
SLAB_ATTR_RO ( ctor ) ;
static ssize_t aliases_show ( struct kmem_cache * s , char * buf )
{
return sprintf ( buf , " %d \n " , s - > refcount - 1 ) ;
}
SLAB_ATTR_RO ( aliases ) ;
static ssize_t slabs_show ( struct kmem_cache * s , char * buf )
{
2008-04-14 20:11:40 +04:00
return show_slab_objects ( s , buf , SO_ALL ) ;
2007-05-07 01:49:36 +04:00
}
SLAB_ATTR_RO ( slabs ) ;
static ssize_t partial_show ( struct kmem_cache * s , char * buf )
{
2008-02-16 02:22:21 +03:00
return show_slab_objects ( s , buf , SO_PARTIAL ) ;
2007-05-07 01:49:36 +04:00
}
SLAB_ATTR_RO ( partial ) ;
static ssize_t cpu_slabs_show ( struct kmem_cache * s , char * buf )
{
2008-02-16 02:22:21 +03:00
return show_slab_objects ( s , buf , SO_CPU ) ;
2007-05-07 01:49:36 +04:00
}
SLAB_ATTR_RO ( cpu_slabs ) ;
static ssize_t objects_show ( struct kmem_cache * s , char * buf )
{
2008-04-14 20:11:40 +04:00
return show_slab_objects ( s , buf , SO_ALL | SO_OBJECTS ) ;
2007-05-07 01:49:36 +04:00
}
SLAB_ATTR_RO ( objects ) ;
2008-04-14 20:11:40 +04:00
static ssize_t objects_partial_show ( struct kmem_cache * s , char * buf )
{
return show_slab_objects ( s , buf , SO_PARTIAL | SO_OBJECTS ) ;
}
SLAB_ATTR_RO ( objects_partial ) ;
static ssize_t total_objects_show ( struct kmem_cache * s , char * buf )
{
return show_slab_objects ( s , buf , SO_ALL | SO_TOTAL ) ;
}
SLAB_ATTR_RO ( total_objects ) ;
2007-05-07 01:49:36 +04:00
static ssize_t sanity_checks_show ( struct kmem_cache * s , char * buf )
{
return sprintf ( buf , " %d \n " , ! ! ( s - > flags & SLAB_DEBUG_FREE ) ) ;
}
static ssize_t sanity_checks_store ( struct kmem_cache * s ,
const char * buf , size_t length )
{
s - > flags & = ~ SLAB_DEBUG_FREE ;
if ( buf [ 0 ] = = ' 1 ' )
s - > flags | = SLAB_DEBUG_FREE ;
return length ;
}
SLAB_ATTR ( sanity_checks ) ;
static ssize_t trace_show ( struct kmem_cache * s , char * buf )
{
return sprintf ( buf , " %d \n " , ! ! ( s - > flags & SLAB_TRACE ) ) ;
}
static ssize_t trace_store ( struct kmem_cache * s , const char * buf ,
size_t length )
{
s - > flags & = ~ SLAB_TRACE ;
if ( buf [ 0 ] = = ' 1 ' )
s - > flags | = SLAB_TRACE ;
return length ;
}
SLAB_ATTR ( trace ) ;
static ssize_t reclaim_account_show ( struct kmem_cache * s , char * buf )
{
return sprintf ( buf , " %d \n " , ! ! ( s - > flags & SLAB_RECLAIM_ACCOUNT ) ) ;
}
static ssize_t reclaim_account_store ( struct kmem_cache * s ,
const char * buf , size_t length )
{
s - > flags & = ~ SLAB_RECLAIM_ACCOUNT ;
if ( buf [ 0 ] = = ' 1 ' )
s - > flags | = SLAB_RECLAIM_ACCOUNT ;
return length ;
}
SLAB_ATTR ( reclaim_account ) ;
static ssize_t hwcache_align_show ( struct kmem_cache * s , char * buf )
{
2007-05-07 01:49:56 +04:00
return sprintf ( buf , " %d \n " , ! ! ( s - > flags & SLAB_HWCACHE_ALIGN ) ) ;
2007-05-07 01:49:36 +04:00
}
SLAB_ATTR_RO ( hwcache_align ) ;
# ifdef CONFIG_ZONE_DMA
static ssize_t cache_dma_show ( struct kmem_cache * s , char * buf )
{
return sprintf ( buf , " %d \n " , ! ! ( s - > flags & SLAB_CACHE_DMA ) ) ;
}
SLAB_ATTR_RO ( cache_dma ) ;
# endif
static ssize_t destroy_by_rcu_show ( struct kmem_cache * s , char * buf )
{
return sprintf ( buf , " %d \n " , ! ! ( s - > flags & SLAB_DESTROY_BY_RCU ) ) ;
}
SLAB_ATTR_RO ( destroy_by_rcu ) ;
static ssize_t red_zone_show ( struct kmem_cache * s , char * buf )
{
return sprintf ( buf , " %d \n " , ! ! ( s - > flags & SLAB_RED_ZONE ) ) ;
}
static ssize_t red_zone_store ( struct kmem_cache * s ,
const char * buf , size_t length )
{
if ( any_slab_objects ( s ) )
return - EBUSY ;
s - > flags & = ~ SLAB_RED_ZONE ;
if ( buf [ 0 ] = = ' 1 ' )
s - > flags | = SLAB_RED_ZONE ;
2008-04-14 20:11:41 +04:00
calculate_sizes ( s , - 1 ) ;
2007-05-07 01:49:36 +04:00
return length ;
}
SLAB_ATTR ( red_zone ) ;
static ssize_t poison_show ( struct kmem_cache * s , char * buf )
{
return sprintf ( buf , " %d \n " , ! ! ( s - > flags & SLAB_POISON ) ) ;
}
static ssize_t poison_store ( struct kmem_cache * s ,
const char * buf , size_t length )
{
if ( any_slab_objects ( s ) )
return - EBUSY ;
s - > flags & = ~ SLAB_POISON ;
if ( buf [ 0 ] = = ' 1 ' )
s - > flags | = SLAB_POISON ;
2008-04-14 20:11:41 +04:00
calculate_sizes ( s , - 1 ) ;
2007-05-07 01:49:36 +04:00
return length ;
}
SLAB_ATTR ( poison ) ;
static ssize_t store_user_show ( struct kmem_cache * s , char * buf )
{
return sprintf ( buf , " %d \n " , ! ! ( s - > flags & SLAB_STORE_USER ) ) ;
}
static ssize_t store_user_store ( struct kmem_cache * s ,
const char * buf , size_t length )
{
if ( any_slab_objects ( s ) )
return - EBUSY ;
s - > flags & = ~ SLAB_STORE_USER ;
if ( buf [ 0 ] = = ' 1 ' )
s - > flags | = SLAB_STORE_USER ;
2008-04-14 20:11:41 +04:00
calculate_sizes ( s , - 1 ) ;
2007-05-07 01:49:36 +04:00
return length ;
}
SLAB_ATTR ( store_user ) ;
2007-05-07 01:49:43 +04:00
static ssize_t validate_show ( struct kmem_cache * s , char * buf )
{
return 0 ;
}
static ssize_t validate_store ( struct kmem_cache * s ,
const char * buf , size_t length )
{
2007-07-17 15:03:30 +04:00
int ret = - EINVAL ;
if ( buf [ 0 ] = = ' 1 ' ) {
ret = validate_slab_cache ( s ) ;
if ( ret > = 0 )
ret = length ;
}
return ret ;
2007-05-07 01:49:43 +04:00
}
SLAB_ATTR ( validate ) ;
2007-05-07 01:49:46 +04:00
static ssize_t shrink_show ( struct kmem_cache * s , char * buf )
{
return 0 ;
}
static ssize_t shrink_store ( struct kmem_cache * s ,
const char * buf , size_t length )
{
if ( buf [ 0 ] = = ' 1 ' ) {
int rc = kmem_cache_shrink ( s ) ;
if ( rc )
return rc ;
} else
return - EINVAL ;
return length ;
}
SLAB_ATTR ( shrink ) ;
2007-05-07 01:49:45 +04:00
static ssize_t alloc_calls_show ( struct kmem_cache * s , char * buf )
{
if ( ! ( s - > flags & SLAB_STORE_USER ) )
return - ENOSYS ;
return list_locations ( s , buf , TRACK_ALLOC ) ;
}
SLAB_ATTR_RO ( alloc_calls ) ;
static ssize_t free_calls_show ( struct kmem_cache * s , char * buf )
{
if ( ! ( s - > flags & SLAB_STORE_USER ) )
return - ENOSYS ;
return list_locations ( s , buf , TRACK_FREE ) ;
}
SLAB_ATTR_RO ( free_calls ) ;
2007-05-07 01:49:36 +04:00
# ifdef CONFIG_NUMA
2008-01-08 10:20:26 +03:00
static ssize_t remote_node_defrag_ratio_show ( struct kmem_cache * s , char * buf )
2007-05-07 01:49:36 +04:00
{
2008-01-08 10:20:26 +03:00
return sprintf ( buf , " %d \n " , s - > remote_node_defrag_ratio / 10 ) ;
2007-05-07 01:49:36 +04:00
}
2008-01-08 10:20:26 +03:00
static ssize_t remote_node_defrag_ratio_store ( struct kmem_cache * s ,
2007-05-07 01:49:36 +04:00
const char * buf , size_t length )
{
2008-04-30 03:11:12 +04:00
unsigned long ratio ;
int err ;
err = strict_strtoul ( buf , 10 , & ratio ) ;
if ( err )
return err ;
2008-08-19 17:51:22 +04:00
if ( ratio < = 100 )
2008-04-30 03:11:12 +04:00
s - > remote_node_defrag_ratio = ratio * 10 ;
2007-05-07 01:49:36 +04:00
return length ;
}
2008-01-08 10:20:26 +03:00
SLAB_ATTR ( remote_node_defrag_ratio ) ;
2007-05-07 01:49:36 +04:00
# endif
2008-02-08 04:47:41 +03:00
# ifdef CONFIG_SLUB_STATS
static int show_stat ( struct kmem_cache * s , char * buf , enum stat_item si )
{
unsigned long sum = 0 ;
int cpu ;
int len ;
int * data = kmalloc ( nr_cpu_ids * sizeof ( int ) , GFP_KERNEL ) ;
if ( ! data )
return - ENOMEM ;
for_each_online_cpu ( cpu ) {
unsigned x = get_cpu_slab ( s , cpu ) - > stat [ si ] ;
data [ cpu ] = x ;
sum + = x ;
}
len = sprintf ( buf , " %lu " , sum ) ;
2008-04-14 19:52:05 +04:00
# ifdef CONFIG_SMP
2008-02-08 04:47:41 +03:00
for_each_online_cpu ( cpu ) {
if ( data [ cpu ] & & len < PAGE_SIZE - 20 )
2008-04-14 19:52:05 +04:00
len + = sprintf ( buf + len , " C%d=%u " , cpu , data [ cpu ] ) ;
2008-02-08 04:47:41 +03:00
}
2008-04-14 19:52:05 +04:00
# endif
2008-02-08 04:47:41 +03:00
kfree ( data ) ;
return len + sprintf ( buf + len , " \n " ) ;
}
# define STAT_ATTR(si, text) \
static ssize_t text # # _show ( struct kmem_cache * s , char * buf ) \
{ \
return show_stat ( s , buf , si ) ; \
} \
SLAB_ATTR_RO ( text ) ; \
STAT_ATTR ( ALLOC_FASTPATH , alloc_fastpath ) ;
STAT_ATTR ( ALLOC_SLOWPATH , alloc_slowpath ) ;
STAT_ATTR ( FREE_FASTPATH , free_fastpath ) ;
STAT_ATTR ( FREE_SLOWPATH , free_slowpath ) ;
STAT_ATTR ( FREE_FROZEN , free_frozen ) ;
STAT_ATTR ( FREE_ADD_PARTIAL , free_add_partial ) ;
STAT_ATTR ( FREE_REMOVE_PARTIAL , free_remove_partial ) ;
STAT_ATTR ( ALLOC_FROM_PARTIAL , alloc_from_partial ) ;
STAT_ATTR ( ALLOC_SLAB , alloc_slab ) ;
STAT_ATTR ( ALLOC_REFILL , alloc_refill ) ;
STAT_ATTR ( FREE_SLAB , free_slab ) ;
STAT_ATTR ( CPUSLAB_FLUSH , cpuslab_flush ) ;
STAT_ATTR ( DEACTIVATE_FULL , deactivate_full ) ;
STAT_ATTR ( DEACTIVATE_EMPTY , deactivate_empty ) ;
STAT_ATTR ( DEACTIVATE_TO_HEAD , deactivate_to_head ) ;
STAT_ATTR ( DEACTIVATE_TO_TAIL , deactivate_to_tail ) ;
STAT_ATTR ( DEACTIVATE_REMOTE_FREES , deactivate_remote_frees ) ;
2008-04-14 20:11:40 +04:00
STAT_ATTR ( ORDER_FALLBACK , order_fallback ) ;
2008-02-08 04:47:41 +03:00
# endif
2008-01-08 10:20:27 +03:00
static struct attribute * slab_attrs [ ] = {
2007-05-07 01:49:36 +04:00
& slab_size_attr . attr ,
& object_size_attr . attr ,
& objs_per_slab_attr . attr ,
& order_attr . attr ,
& objects_attr . attr ,
2008-04-14 20:11:40 +04:00
& objects_partial_attr . attr ,
& total_objects_attr . attr ,
2007-05-07 01:49:36 +04:00
& slabs_attr . attr ,
& partial_attr . attr ,
& cpu_slabs_attr . attr ,
& ctor_attr . attr ,
& aliases_attr . attr ,
& align_attr . attr ,
& sanity_checks_attr . attr ,
& trace_attr . attr ,
& hwcache_align_attr . attr ,
& reclaim_account_attr . attr ,
& destroy_by_rcu_attr . attr ,
& red_zone_attr . attr ,
& poison_attr . attr ,
& store_user_attr . attr ,
2007-05-07 01:49:43 +04:00
& validate_attr . attr ,
2007-05-07 01:49:46 +04:00
& shrink_attr . attr ,
2007-05-07 01:49:45 +04:00
& alloc_calls_attr . attr ,
& free_calls_attr . attr ,
2007-05-07 01:49:36 +04:00
# ifdef CONFIG_ZONE_DMA
& cache_dma_attr . attr ,
# endif
# ifdef CONFIG_NUMA
2008-01-08 10:20:26 +03:00
& remote_node_defrag_ratio_attr . attr ,
2008-02-08 04:47:41 +03:00
# endif
# ifdef CONFIG_SLUB_STATS
& alloc_fastpath_attr . attr ,
& alloc_slowpath_attr . attr ,
& free_fastpath_attr . attr ,
& free_slowpath_attr . attr ,
& free_frozen_attr . attr ,
& free_add_partial_attr . attr ,
& free_remove_partial_attr . attr ,
& alloc_from_partial_attr . attr ,
& alloc_slab_attr . attr ,
& alloc_refill_attr . attr ,
& free_slab_attr . attr ,
& cpuslab_flush_attr . attr ,
& deactivate_full_attr . attr ,
& deactivate_empty_attr . attr ,
& deactivate_to_head_attr . attr ,
& deactivate_to_tail_attr . attr ,
& deactivate_remote_frees_attr . attr ,
2008-04-14 20:11:40 +04:00
& order_fallback_attr . attr ,
2007-05-07 01:49:36 +04:00
# endif
NULL
} ;
static struct attribute_group slab_attr_group = {
. attrs = slab_attrs ,
} ;
static ssize_t slab_attr_show ( struct kobject * kobj ,
struct attribute * attr ,
char * buf )
{
struct slab_attribute * attribute ;
struct kmem_cache * s ;
int err ;
attribute = to_slab_attr ( attr ) ;
s = to_slab ( kobj ) ;
if ( ! attribute - > show )
return - EIO ;
err = attribute - > show ( s , buf ) ;
return err ;
}
static ssize_t slab_attr_store ( struct kobject * kobj ,
struct attribute * attr ,
const char * buf , size_t len )
{
struct slab_attribute * attribute ;
struct kmem_cache * s ;
int err ;
attribute = to_slab_attr ( attr ) ;
s = to_slab ( kobj ) ;
if ( ! attribute - > store )
return - EIO ;
err = attribute - > store ( s , buf , len ) ;
return err ;
}
2008-01-08 09:29:05 +03:00
static void kmem_cache_release ( struct kobject * kobj )
{
struct kmem_cache * s = to_slab ( kobj ) ;
kfree ( s ) ;
}
2007-05-07 01:49:36 +04:00
static struct sysfs_ops slab_sysfs_ops = {
. show = slab_attr_show ,
. store = slab_attr_store ,
} ;
static struct kobj_type slab_ktype = {
. sysfs_ops = & slab_sysfs_ops ,
2008-01-08 09:29:05 +03:00
. release = kmem_cache_release
2007-05-07 01:49:36 +04:00
} ;
static int uevent_filter ( struct kset * kset , struct kobject * kobj )
{
struct kobj_type * ktype = get_ktype ( kobj ) ;
if ( ktype = = & slab_ktype )
return 1 ;
return 0 ;
}
static struct kset_uevent_ops slab_uevent_ops = {
. filter = uevent_filter ,
} ;
2007-11-01 18:29:06 +03:00
static struct kset * slab_kset ;
2007-05-07 01:49:36 +04:00
# define ID_STR_LENGTH 64
/* Create a unique string id for a slab cache:
2008-02-16 10:45:26 +03:00
*
* Format : [ flags - ] size
2007-05-07 01:49:36 +04:00
*/
static char * create_unique_id ( struct kmem_cache * s )
{
char * name = kmalloc ( ID_STR_LENGTH , GFP_KERNEL ) ;
char * p = name ;
BUG_ON ( ! name ) ;
* p + + = ' : ' ;
/*
* First flags affecting slabcache operations . We will only
* get here for aliasable slabs so we do not need to support
* too many flags . The flags here must cover all flags that
* are matched during merging to guarantee that the id is
* unique .
*/
if ( s - > flags & SLAB_CACHE_DMA )
* p + + = ' d ' ;
if ( s - > flags & SLAB_RECLAIM_ACCOUNT )
* p + + = ' a ' ;
if ( s - > flags & SLAB_DEBUG_FREE )
* p + + = ' F ' ;
if ( p ! = name + 1 )
* p + + = ' - ' ;
p + = sprintf ( p , " %07d " , s - > size ) ;
BUG_ON ( p > name + ID_STR_LENGTH - 1 ) ;
return name ;
}
static int sysfs_slab_add ( struct kmem_cache * s )
{
int err ;
const char * name ;
int unmergeable ;
if ( slab_state < SYSFS )
/* Defer until later */
return 0 ;
unmergeable = slab_unmergeable ( s ) ;
if ( unmergeable ) {
/*
* Slabcache can never be merged so we can use the name proper .
* This is typically the case for debug situations . In that
* case we can catch duplicate names easily .
*/
2007-11-01 18:29:06 +03:00
sysfs_remove_link ( & slab_kset - > kobj , s - > name ) ;
2007-05-07 01:49:36 +04:00
name = s - > name ;
} else {
/*
* Create a unique name for the slab as a target
* for the symlinks .
*/
name = create_unique_id ( s ) ;
}
2007-11-01 18:29:06 +03:00
s - > kobj . kset = slab_kset ;
2007-12-18 09:05:35 +03:00
err = kobject_init_and_add ( & s - > kobj , & slab_ktype , NULL , name ) ;
if ( err ) {
kobject_put ( & s - > kobj ) ;
2007-05-07 01:49:36 +04:00
return err ;
2007-12-18 09:05:35 +03:00
}
2007-05-07 01:49:36 +04:00
err = sysfs_create_group ( & s - > kobj , & slab_attr_group ) ;
if ( err )
return err ;
kobject_uevent ( & s - > kobj , KOBJ_ADD ) ;
if ( ! unmergeable ) {
/* Setup first alias */
sysfs_slab_alias ( s , s - > name ) ;
kfree ( name ) ;
}
return 0 ;
}
static void sysfs_slab_remove ( struct kmem_cache * s )
{
kobject_uevent ( & s - > kobj , KOBJ_REMOVE ) ;
kobject_del ( & s - > kobj ) ;
2008-01-08 09:29:05 +03:00
kobject_put ( & s - > kobj ) ;
2007-05-07 01:49:36 +04:00
}
/*
* Need to buffer aliases during bootup until sysfs becomes
2008-12-05 06:08:08 +03:00
* available lest we lose that information .
2007-05-07 01:49:36 +04:00
*/
struct saved_alias {
struct kmem_cache * s ;
const char * name ;
struct saved_alias * next ;
} ;
2007-07-17 15:03:27 +04:00
static struct saved_alias * alias_list ;
2007-05-07 01:49:36 +04:00
static int sysfs_slab_alias ( struct kmem_cache * s , const char * name )
{
struct saved_alias * al ;
if ( slab_state = = SYSFS ) {
/*
* If we have a leftover link then remove it .
*/
2007-11-01 18:29:06 +03:00
sysfs_remove_link ( & slab_kset - > kobj , name ) ;
return sysfs_create_link ( & slab_kset - > kobj , & s - > kobj , name ) ;
2007-05-07 01:49:36 +04:00
}
al = kmalloc ( sizeof ( struct saved_alias ) , GFP_KERNEL ) ;
if ( ! al )
return - ENOMEM ;
al - > s = s ;
al - > name = name ;
al - > next = alias_list ;
alias_list = al ;
return 0 ;
}
static int __init slab_sysfs_init ( void )
{
2007-07-17 15:03:19 +04:00
struct kmem_cache * s ;
2007-05-07 01:49:36 +04:00
int err ;
2007-11-06 21:36:58 +03:00
slab_kset = kset_create_and_add ( " slab " , & slab_uevent_ops , kernel_kobj ) ;
2007-11-01 18:29:06 +03:00
if ( ! slab_kset ) {
2007-05-07 01:49:36 +04:00
printk ( KERN_ERR " Cannot register slab subsystem. \n " ) ;
return - ENOSYS ;
}
2007-05-09 13:32:39 +04:00
slab_state = SYSFS ;
2007-07-17 15:03:19 +04:00
list_for_each_entry ( s , & slab_caches , list ) {
2007-05-09 13:32:39 +04:00
err = sysfs_slab_add ( s ) ;
2007-08-31 10:56:26 +04:00
if ( err )
printk ( KERN_ERR " SLUB: Unable to add boot slab %s "
" to sysfs \n " , s - > name ) ;
2007-05-09 13:32:39 +04:00
}
2007-05-07 01:49:36 +04:00
while ( alias_list ) {
struct saved_alias * al = alias_list ;
alias_list = alias_list - > next ;
err = sysfs_slab_alias ( al - > s , al - > name ) ;
2007-08-31 10:56:26 +04:00
if ( err )
printk ( KERN_ERR " SLUB: Unable to add boot slab alias "
" %s to sysfs \n " , s - > name ) ;
2007-05-07 01:49:36 +04:00
kfree ( al ) ;
}
resiliency_test ( ) ;
return 0 ;
}
__initcall ( slab_sysfs_init ) ;
# endif
2008-01-01 19:23:28 +03:00
/*
* The / proc / slabinfo ABI
*/
2008-01-03 00:04:48 +03:00
# ifdef CONFIG_SLABINFO
2008-01-01 19:23:28 +03:00
static void print_slabinfo_header ( struct seq_file * m )
{
seq_puts ( m , " slabinfo - version: 2.1 \n " ) ;
seq_puts ( m , " # name <active_objs> <num_objs> <objsize> "
" <objperslab> <pagesperslab> " ) ;
seq_puts ( m , " : tunables <limit> <batchcount> <sharedfactor> " ) ;
seq_puts ( m , " : slabdata <active_slabs> <num_slabs> <sharedavail> " ) ;
seq_putc ( m , ' \n ' ) ;
}
static void * s_start ( struct seq_file * m , loff_t * pos )
{
loff_t n = * pos ;
down_read ( & slub_lock ) ;
if ( ! n )
print_slabinfo_header ( m ) ;
return seq_list_start ( & slab_caches , * pos ) ;
}
static void * s_next ( struct seq_file * m , void * p , loff_t * pos )
{
return seq_list_next ( p , & slab_caches , pos ) ;
}
static void s_stop ( struct seq_file * m , void * p )
{
up_read ( & slub_lock ) ;
}
static int s_show ( struct seq_file * m , void * p )
{
unsigned long nr_partials = 0 ;
unsigned long nr_slabs = 0 ;
unsigned long nr_inuse = 0 ;
2008-04-14 20:11:40 +04:00
unsigned long nr_objs = 0 ;
unsigned long nr_free = 0 ;
2008-01-01 19:23:28 +03:00
struct kmem_cache * s ;
int node ;
s = list_entry ( p , struct kmem_cache , list ) ;
for_each_online_node ( node ) {
struct kmem_cache_node * n = get_node ( s , node ) ;
if ( ! n )
continue ;
nr_partials + = n - > nr_partial ;
nr_slabs + = atomic_long_read ( & n - > nr_slabs ) ;
2008-04-14 20:11:40 +04:00
nr_objs + = atomic_long_read ( & n - > total_objects ) ;
nr_free + = count_partial ( n , count_free ) ;
2008-01-01 19:23:28 +03:00
}
2008-04-14 20:11:40 +04:00
nr_inuse = nr_objs - nr_free ;
2008-01-01 19:23:28 +03:00
seq_printf ( m , " %-17s %6lu %6lu %6u %4u %4d " , s - > name , nr_inuse ,
2008-04-14 20:11:31 +04:00
nr_objs , s - > size , oo_objects ( s - > oo ) ,
( 1 < < oo_order ( s - > oo ) ) ) ;
2008-01-01 19:23:28 +03:00
seq_printf ( m , " : tunables %4u %4u %4u " , 0 , 0 , 0 ) ;
seq_printf ( m , " : slabdata %6lu %6lu %6lu " , nr_slabs , nr_slabs ,
0UL ) ;
seq_putc ( m , ' \n ' ) ;
return 0 ;
}
2008-10-06 02:42:17 +04:00
static const struct seq_operations slabinfo_op = {
2008-01-01 19:23:28 +03:00
. start = s_start ,
. next = s_next ,
. stop = s_stop ,
. show = s_show ,
} ;
2008-10-06 02:42:17 +04:00
static int slabinfo_open ( struct inode * inode , struct file * file )
{
return seq_open ( file , & slabinfo_op ) ;
}
static const struct file_operations proc_slabinfo_operations = {
. open = slabinfo_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = seq_release ,
} ;
static int __init slab_proc_init ( void )
{
proc_create ( " slabinfo " , S_IWUSR | S_IRUGO , NULL , & proc_slabinfo_operations ) ;
return 0 ;
}
module_init ( slab_proc_init ) ;
2008-01-03 00:04:48 +03:00
# endif /* CONFIG_SLABINFO */