2015-04-15 01:44:57 +03:00
/*
* CMA DebugFS Interface
*
* Copyright ( c ) 2015 Sasha Levin < sasha . levin @ oracle . com >
*/
# include <linux/debugfs.h>
# include <linux/cma.h>
2015-04-15 01:44:59 +03:00
# include <linux/list.h>
# include <linux/kernel.h>
# include <linux/slab.h>
2015-04-15 01:45:02 +03:00
# include <linux/mm_types.h>
2015-04-15 01:44:57 +03:00
# include "cma.h"
2015-04-15 01:44:59 +03:00
struct cma_mem {
struct hlist_node node ;
struct page * p ;
unsigned long n ;
} ;
2015-04-15 01:44:57 +03:00
static struct dentry * cma_debugfs_root ;
static int cma_debugfs_get ( void * data , u64 * val )
{
unsigned long * p = data ;
* val = * p ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( cma_debugfs_fops , cma_debugfs_get , NULL , " %llu \n " ) ;
2015-04-16 02:14:59 +03:00
static int cma_used_get ( void * data , u64 * val )
{
struct cma * cma = data ;
unsigned long used ;
mutex_lock ( & cma - > lock ) ;
/* pages counter is smaller than sizeof(int) */
2015-07-18 02:24:23 +03:00
used = bitmap_weight ( cma - > bitmap , ( int ) cma_bitmap_maxno ( cma ) ) ;
2015-04-16 02:14:59 +03:00
mutex_unlock ( & cma - > lock ) ;
* val = ( u64 ) used < < cma - > order_per_bit ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( cma_used_fops , cma_used_get , NULL , " %llu \n " ) ;
static int cma_maxchunk_get ( void * data , u64 * val )
{
struct cma * cma = data ;
unsigned long maxchunk = 0 ;
unsigned long start , end = 0 ;
2015-07-18 02:24:23 +03:00
unsigned long bitmap_maxno = cma_bitmap_maxno ( cma ) ;
2015-04-16 02:14:59 +03:00
mutex_lock ( & cma - > lock ) ;
for ( ; ; ) {
2015-07-18 02:24:23 +03:00
start = find_next_zero_bit ( cma - > bitmap , bitmap_maxno , end ) ;
2015-04-16 02:14:59 +03:00
if ( start > = cma - > count )
break ;
2015-07-18 02:24:23 +03:00
end = find_next_bit ( cma - > bitmap , bitmap_maxno , start ) ;
2015-04-16 02:14:59 +03:00
maxchunk = max ( end - start , maxchunk ) ;
}
mutex_unlock ( & cma - > lock ) ;
* val = ( u64 ) maxchunk < < cma - > order_per_bit ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( cma_maxchunk_fops , cma_maxchunk_get , NULL , " %llu \n " ) ;
2015-04-15 01:44:59 +03:00
static void cma_add_to_cma_mem_list ( struct cma * cma , struct cma_mem * mem )
{
spin_lock ( & cma - > mem_head_lock ) ;
hlist_add_head ( & mem - > node , & cma - > mem_head ) ;
spin_unlock ( & cma - > mem_head_lock ) ;
}
2015-04-15 01:45:02 +03:00
static struct cma_mem * cma_get_entry_from_list ( struct cma * cma )
{
struct cma_mem * mem = NULL ;
spin_lock ( & cma - > mem_head_lock ) ;
if ( ! hlist_empty ( & cma - > mem_head ) ) {
mem = hlist_entry ( cma - > mem_head . first , struct cma_mem , node ) ;
hlist_del_init ( & mem - > node ) ;
}
spin_unlock ( & cma - > mem_head_lock ) ;
return mem ;
}
static int cma_free_mem ( struct cma * cma , int count )
{
struct cma_mem * mem = NULL ;
while ( count ) {
mem = cma_get_entry_from_list ( cma ) ;
if ( mem = = NULL )
return 0 ;
if ( mem - > n < = count ) {
cma_release ( cma , mem - > p , mem - > n ) ;
count - = mem - > n ;
kfree ( mem ) ;
} else if ( cma - > order_per_bit = = 0 ) {
cma_release ( cma , mem - > p , count ) ;
mem - > p + = count ;
mem - > n - = count ;
count = 0 ;
cma_add_to_cma_mem_list ( cma , mem ) ;
} else {
pr_debug ( " cma: cannot release partial block when order_per_bit != 0 \n " ) ;
cma_add_to_cma_mem_list ( cma , mem ) ;
break ;
}
}
return 0 ;
}
static int cma_free_write ( void * data , u64 val )
{
int pages = val ;
struct cma * cma = data ;
return cma_free_mem ( cma , pages ) ;
}
DEFINE_SIMPLE_ATTRIBUTE ( cma_free_fops , NULL , cma_free_write , " %llu \n " ) ;
2015-04-15 01:44:59 +03:00
static int cma_alloc_mem ( struct cma * cma , int count )
{
struct cma_mem * mem ;
struct page * p ;
mem = kzalloc ( sizeof ( * mem ) , GFP_KERNEL ) ;
if ( ! mem )
return - ENOMEM ;
2017-02-25 01:58:41 +03:00
p = cma_alloc ( cma , count , 0 , GFP_KERNEL ) ;
2015-04-15 01:44:59 +03:00
if ( ! p ) {
kfree ( mem ) ;
return - ENOMEM ;
}
mem - > p = p ;
mem - > n = count ;
cma_add_to_cma_mem_list ( cma , mem ) ;
return 0 ;
}
static int cma_alloc_write ( void * data , u64 val )
{
int pages = val ;
struct cma * cma = data ;
return cma_alloc_mem ( cma , pages ) ;
}
DEFINE_SIMPLE_ATTRIBUTE ( cma_alloc_fops , NULL , cma_alloc_write , " %llu \n " ) ;
2015-04-15 01:44:57 +03:00
static void cma_debugfs_add_one ( struct cma * cma , int idx )
{
struct dentry * tmp ;
char name [ 16 ] ;
int u32s ;
sprintf ( name , " cma-%d " , idx ) ;
tmp = debugfs_create_dir ( name , cma_debugfs_root ) ;
2015-07-18 02:24:20 +03:00
debugfs_create_file ( " alloc " , S_IWUSR , tmp , cma ,
2015-04-15 01:44:59 +03:00
& cma_alloc_fops ) ;
2015-07-18 02:24:20 +03:00
debugfs_create_file ( " free " , S_IWUSR , tmp , cma ,
2015-04-15 01:45:02 +03:00
& cma_free_fops ) ;
2015-04-15 01:44:57 +03:00
debugfs_create_file ( " base_pfn " , S_IRUGO , tmp ,
& cma - > base_pfn , & cma_debugfs_fops ) ;
debugfs_create_file ( " count " , S_IRUGO , tmp ,
& cma - > count , & cma_debugfs_fops ) ;
debugfs_create_file ( " order_per_bit " , S_IRUGO , tmp ,
2015-04-15 01:44:59 +03:00
& cma - > order_per_bit , & cma_debugfs_fops ) ;
2015-04-16 02:14:59 +03:00
debugfs_create_file ( " used " , S_IRUGO , tmp , cma , & cma_used_fops ) ;
debugfs_create_file ( " maxchunk " , S_IRUGO , tmp , cma , & cma_maxchunk_fops ) ;
2015-04-15 01:44:57 +03:00
u32s = DIV_ROUND_UP ( cma_bitmap_maxno ( cma ) , BITS_PER_BYTE * sizeof ( u32 ) ) ;
debugfs_create_u32_array ( " bitmap " , S_IRUGO , tmp , ( u32 * ) cma - > bitmap , u32s ) ;
}
static int __init cma_debugfs_init ( void )
{
int i ;
cma_debugfs_root = debugfs_create_dir ( " cma " , NULL ) ;
if ( ! cma_debugfs_root )
return - ENOMEM ;
for ( i = 0 ; i < cma_area_count ; i + + )
cma_debugfs_add_one ( & cma_areas [ i ] , i ) ;
return 0 ;
}
late_initcall ( cma_debugfs_init ) ;