2016-03-17 14:21:45 -07:00
# include <stdlib.h>
# include <string.h>
# include <malloc.h>
2016-12-14 15:09:28 -08:00
# include <pthread.h>
2016-03-17 14:21:45 -07:00
# include <unistd.h>
# include <assert.h>
2016-12-16 14:53:45 -05:00
# include <linux/gfp.h>
2016-12-14 15:09:28 -08:00
# include <linux/poison.h>
2016-03-17 14:21:45 -07:00
# include <linux/slab.h>
2016-12-14 15:09:28 -08:00
# include <linux/radix-tree.h>
2016-03-17 14:21:45 -07:00
# include <urcu/uatomic.h>
int nr_allocated ;
2016-12-14 15:08:02 -08:00
int preempt_count ;
2016-12-24 07:49:18 -05:00
int kmalloc_verbose ;
2017-01-04 11:55:00 -05:00
int test_verbose ;
2016-03-17 14:21:45 -07:00
2016-12-14 15:09:28 -08:00
struct kmem_cache {
pthread_mutex_t lock ;
int size ;
int nr_objs ;
void * objs ;
void ( * ctor ) ( void * ) ;
} ;
2016-03-17 14:21:45 -07:00
void * kmem_cache_alloc ( struct kmem_cache * cachep , int flags )
{
2016-12-14 15:09:28 -08:00
struct radix_tree_node * node ;
2016-12-14 15:07:59 -08:00
if ( flags & __GFP_NOWARN )
return NULL ;
2016-12-14 15:09:28 -08:00
pthread_mutex_lock ( & cachep - > lock ) ;
if ( cachep - > nr_objs ) {
cachep - > nr_objs - - ;
node = cachep - > objs ;
cachep - > objs = node - > private_data ;
pthread_mutex_unlock ( & cachep - > lock ) ;
node - > private_data = NULL ;
} else {
pthread_mutex_unlock ( & cachep - > lock ) ;
node = malloc ( cachep - > size ) ;
if ( cachep - > ctor )
cachep - > ctor ( node ) ;
}
2016-03-17 14:21:45 -07:00
uatomic_inc ( & nr_allocated ) ;
2016-12-24 07:49:18 -05:00
if ( kmalloc_verbose )
printf ( " Allocating %p from slab \n " , node ) ;
2016-12-14 15:09:28 -08:00
return node ;
2016-03-17 14:21:45 -07:00
}
void kmem_cache_free ( struct kmem_cache * cachep , void * objp )
{
assert ( objp ) ;
uatomic_dec ( & nr_allocated ) ;
2016-12-24 07:49:18 -05:00
if ( kmalloc_verbose )
printf ( " Freeing %p to slab \n " , objp ) ;
2016-12-14 15:09:28 -08:00
pthread_mutex_lock ( & cachep - > lock ) ;
if ( cachep - > nr_objs > 10 ) {
memset ( objp , POISON_FREE , cachep - > size ) ;
free ( objp ) ;
} else {
struct radix_tree_node * node = objp ;
cachep - > nr_objs + + ;
node - > private_data = cachep - > objs ;
cachep - > objs = node ;
}
pthread_mutex_unlock ( & cachep - > lock ) ;
2016-03-17 14:21:45 -07:00
}
2016-12-14 15:09:25 -08:00
void * kmalloc ( size_t size , gfp_t gfp )
{
void * ret = malloc ( size ) ;
uatomic_inc ( & nr_allocated ) ;
2016-12-24 07:49:18 -05:00
if ( kmalloc_verbose )
printf ( " Allocating %p from malloc \n " , ret ) ;
2016-12-14 15:09:25 -08:00
return ret ;
}
void kfree ( void * p )
{
if ( ! p )
return ;
uatomic_dec ( & nr_allocated ) ;
2016-12-24 07:49:18 -05:00
if ( kmalloc_verbose )
printf ( " Freeing %p to malloc \n " , p ) ;
2016-12-14 15:09:25 -08:00
free ( p ) ;
}
2016-03-17 14:21:45 -07:00
struct kmem_cache *
kmem_cache_create ( const char * name , size_t size , size_t offset ,
unsigned long flags , void ( * ctor ) ( void * ) )
{
struct kmem_cache * ret = malloc ( sizeof ( * ret ) ) ;
2016-12-14 15:09:28 -08:00
pthread_mutex_init ( & ret - > lock , NULL ) ;
2016-03-17 14:21:45 -07:00
ret - > size = size ;
2016-12-14 15:09:28 -08:00
ret - > nr_objs = 0 ;
ret - > objs = NULL ;
2016-03-17 14:21:45 -07:00
ret - > ctor = ctor ;
return ret ;
}