2012-07-07 00:25:10 +04:00
/*
* Slab allocator functions that are independent of the allocator strategy
*
* ( C ) 2012 Christoph Lameter < cl @ linux . com >
*/
# include <linux/slab.h>
# include <linux/mm.h>
# include <linux/poison.h>
# include <linux/interrupt.h>
# include <linux/memory.h>
# include <linux/compiler.h>
# include <linux/module.h>
2012-07-07 00:25:13 +04:00
# include <linux/cpu.h>
# include <linux/uaccess.h>
2012-07-07 00:25:10 +04:00
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
# include <asm/page.h>
2012-07-07 00:25:11 +04:00
# include "slab.h"
enum slab_state slab_state ;
2012-07-07 00:25:12 +04:00
LIST_HEAD ( slab_caches ) ;
DEFINE_MUTEX ( slab_mutex ) ;
2012-07-07 00:25:11 +04:00
2012-08-16 11:09:46 +04:00
# ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check ( const char * name , size_t size )
2012-07-07 00:25:10 +04:00
{
struct kmem_cache * s = NULL ;
if ( ! name | | in_interrupt ( ) | | size < sizeof ( void * ) | |
size > KMALLOC_MAX_SIZE ) {
2012-08-16 11:09:46 +04:00
pr_err ( " kmem_cache_create(%s) integrity check failed \n " , name ) ;
return - EINVAL ;
2012-07-07 00:25:10 +04:00
}
2012-08-16 11:12:18 +04:00
2012-07-07 00:25:13 +04:00
list_for_each_entry ( s , & slab_caches , list ) {
char tmp ;
int res ;
/*
* This happens when the module gets unloaded and doesn ' t
* destroy its slab cache and no - one else reuses the vmalloc
* area of the module . Print a warning .
*/
res = probe_kernel_address ( s - > name , tmp ) ;
if ( res ) {
2012-08-16 11:09:46 +04:00
pr_err ( " Slab cache with size %d has lost its name \n " ,
2012-07-07 00:25:13 +04:00
s - > object_size ) ;
continue ;
}
if ( ! strcmp ( s - > name , name ) ) {
2012-08-16 11:09:46 +04:00
pr_err ( " %s (%s): Cache name already exists. \n " ,
__func__ , name ) ;
2012-07-07 00:25:13 +04:00
dump_stack ( ) ;
s = NULL ;
2012-08-16 11:09:46 +04:00
return - EINVAL ;
2012-07-07 00:25:13 +04:00
}
}
WARN_ON ( strchr ( name , ' ' ) ) ; /* It confuses parsers */
2012-08-16 11:09:46 +04:00
return 0 ;
}
# else
static inline int kmem_cache_sanity_check ( const char * name , size_t size )
{
return 0 ;
}
2012-07-07 00:25:13 +04:00
# endif
2012-08-16 11:09:46 +04:00
/*
* kmem_cache_create - Create a cache .
* @ name : A string which is used in / proc / slabinfo to identify this cache .
* @ size : The size of objects to be created in this cache .
* @ align : The required alignment for the objects .
* @ flags : SLAB flags
* @ ctor : A constructor for the objects .
*
* Returns a ptr to the cache on success , NULL on failure .
* Cannot be called within a interrupt , but can be interrupted .
* The @ ctor is run when new pages are allocated by the cache .
*
* The flags are
*
* % SLAB_POISON - Poison the slab with a known test pattern ( a5a5a5a5 )
* to catch references to uninitialised memory .
*
* % SLAB_RED_ZONE - Insert ` Red ' zones around the allocated memory to check
* for buffer overruns .
*
* % SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
* cacheline . This can be beneficial if you ' re counting cycles as closely
* as davem .
*/
struct kmem_cache * kmem_cache_create ( const char * name , size_t size , size_t align ,
unsigned long flags , void ( * ctor ) ( void * ) )
{
struct kmem_cache * s = NULL ;
2012-09-05 04:20:33 +04:00
int err = 0 ;
2012-07-07 00:25:10 +04:00
2012-08-16 11:09:46 +04:00
get_online_cpus ( ) ;
mutex_lock ( & slab_mutex ) ;
2012-09-05 04:20:33 +04:00
if ( ! kmem_cache_sanity_check ( name , size ) = = 0 )
goto out_locked ;
s = __kmem_cache_create ( name , size , align , flags , ctor ) ;
if ( ! s )
err = - ENOSYS ; /* Until __kmem_cache_create returns code */
2012-09-05 03:38:33 +04:00
/*
* Check if the slab has actually been created and if it was a
* real instatiation . Aliases do not belong on the list
*/
if ( s & & s - > refcount = = 1 )
list_add ( & s - > list , & slab_caches ) ;
2012-09-05 04:20:33 +04:00
out_locked :
2012-07-07 00:25:13 +04:00
mutex_unlock ( & slab_mutex ) ;
put_online_cpus ( ) ;
2012-09-05 04:20:33 +04:00
if ( err ) {
if ( flags & SLAB_PANIC )
panic ( " kmem_cache_create: Failed to create slab '%s'. Error %d \n " ,
name , err ) ;
else {
printk ( KERN_WARNING " kmem_cache_create(%s) failed with error %d " ,
name , err ) ;
dump_stack ( ) ;
}
return NULL ;
}
2012-07-07 00:25:10 +04:00
return s ;
}
EXPORT_SYMBOL ( kmem_cache_create ) ;
2012-07-07 00:25:11 +04:00
2012-09-05 03:18:33 +04:00
void kmem_cache_destroy ( struct kmem_cache * s )
{
get_online_cpus ( ) ;
mutex_lock ( & slab_mutex ) ;
s - > refcount - - ;
if ( ! s - > refcount ) {
list_del ( & s - > list ) ;
if ( ! __kmem_cache_shutdown ( s ) ) {
if ( s - > flags & SLAB_DESTROY_BY_RCU )
rcu_barrier ( ) ;
__kmem_cache_destroy ( s ) ;
} else {
list_add ( & s - > list , & slab_caches ) ;
printk ( KERN_ERR " kmem_cache_destroy %s: Slab cache still has objects \n " ,
s - > name ) ;
dump_stack ( ) ;
}
}
mutex_unlock ( & slab_mutex ) ;
put_online_cpus ( ) ;
}
EXPORT_SYMBOL ( kmem_cache_destroy ) ;
2012-07-07 00:25:11 +04:00
int slab_is_available ( void )
{
return slab_state > = UP ;
}