2014-08-07 03:05:25 +04:00
/*
* Contiguous Memory Allocator
*
* Copyright ( c ) 2010 - 2011 by Samsung Electronics .
* Copyright IBM Corporation , 2013
* Copyright LG Electronics Inc . , 2014
* Written by :
* Marek Szyprowski < m . szyprowski @ samsung . com >
* Michal Nazarewicz < mina86 @ mina86 . com >
* Aneesh Kumar K . V < aneesh . kumar @ linux . vnet . ibm . com >
* Joonsoo Kim < iamjoonsoo . kim @ lge . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation ; either version 2 of the
* License or ( at your optional ) any later version of the license .
*/
# define pr_fmt(fmt) "cma: " fmt
# ifdef CONFIG_CMA_DEBUG
# ifndef DEBUG
# define DEBUG
# endif
# endif
# include <linux/memblock.h>
# include <linux/err.h>
# include <linux/mm.h>
# include <linux/mutex.h>
# include <linux/sizes.h>
# include <linux/slab.h>
# include <linux/log2.h>
# include <linux/cma.h>
2014-10-10 02:26:47 +04:00
# include <linux/highmem.h>
2014-08-07 03:05:25 +04:00
struct cma {
unsigned long base_pfn ;
unsigned long count ;
unsigned long * bitmap ;
unsigned int order_per_bit ; /* Order of pages represented by one bit */
struct mutex lock ;
} ;
static struct cma cma_areas [ MAX_CMA_AREAS ] ;
static unsigned cma_area_count ;
static DEFINE_MUTEX ( cma_mutex ) ;
phys_addr_t cma_get_base ( struct cma * cma )
{
return PFN_PHYS ( cma - > base_pfn ) ;
}
unsigned long cma_get_size ( struct cma * cma )
{
return cma - > count < < PAGE_SHIFT ;
}
static unsigned long cma_bitmap_aligned_mask ( struct cma * cma , int align_order )
{
return ( 1UL < < ( align_order > > cma - > order_per_bit ) ) - 1 ;
}
static unsigned long cma_bitmap_maxno ( struct cma * cma )
{
return cma - > count > > cma - > order_per_bit ;
}
static unsigned long cma_bitmap_pages_to_bits ( struct cma * cma ,
unsigned long pages )
{
return ALIGN ( pages , 1UL < < cma - > order_per_bit ) > > cma - > order_per_bit ;
}
static void cma_clear_bitmap ( struct cma * cma , unsigned long pfn , int count )
{
unsigned long bitmap_no , bitmap_count ;
bitmap_no = ( pfn - cma - > base_pfn ) > > cma - > order_per_bit ;
bitmap_count = cma_bitmap_pages_to_bits ( cma , count ) ;
mutex_lock ( & cma - > lock ) ;
bitmap_clear ( cma - > bitmap , bitmap_no , bitmap_count ) ;
mutex_unlock ( & cma - > lock ) ;
}
static int __init cma_activate_area ( struct cma * cma )
{
int bitmap_size = BITS_TO_LONGS ( cma_bitmap_maxno ( cma ) ) * sizeof ( long ) ;
unsigned long base_pfn = cma - > base_pfn , pfn = base_pfn ;
unsigned i = cma - > count > > pageblock_order ;
struct zone * zone ;
cma - > bitmap = kzalloc ( bitmap_size , GFP_KERNEL ) ;
if ( ! cma - > bitmap )
return - ENOMEM ;
WARN_ON_ONCE ( ! pfn_valid ( pfn ) ) ;
zone = page_zone ( pfn_to_page ( pfn ) ) ;
do {
unsigned j ;
base_pfn = pfn ;
for ( j = pageblock_nr_pages ; j ; - - j , pfn + + ) {
WARN_ON_ONCE ( ! pfn_valid ( pfn ) ) ;
/*
* alloc_contig_range requires the pfn range
* specified to be in the same zone . Make this
* simple by forcing the entire CMA resv range
* to be in the same zone .
*/
if ( page_zone ( pfn_to_page ( pfn ) ) ! = zone )
goto err ;
}
init_cma_reserved_pageblock ( pfn_to_page ( base_pfn ) ) ;
} while ( - - i ) ;
mutex_init ( & cma - > lock ) ;
return 0 ;
err :
kfree ( cma - > bitmap ) ;
return - EINVAL ;
}
static int __init cma_init_reserved_areas ( void )
{
int i ;
for ( i = 0 ; i < cma_area_count ; i + + ) {
int ret = cma_activate_area ( & cma_areas [ i ] ) ;
if ( ret )
return ret ;
}
return 0 ;
}
core_initcall ( cma_init_reserved_areas ) ;
/**
* cma_declare_contiguous ( ) - reserve custom contiguous area
* @ base : Base address of the reserved area optional , use 0 for any
2014-08-07 03:05:32 +04:00
* @ size : Size of the reserved area ( in bytes ) ,
2014-08-07 03:05:25 +04:00
* @ limit : End address of the reserved memory ( optional , 0 for any ) .
* @ alignment : Alignment for the CMA area , should be power of 2 or zero
* @ order_per_bit : Order of pages represented by one bit on bitmap .
* @ fixed : hint about where to place the reserved area
2014-08-07 03:05:32 +04:00
* @ res_cma : Pointer to store the created cma region .
2014-08-07 03:05:25 +04:00
*
* This function reserves memory from early allocator . It should be
* called by arch specific code once the early allocator ( memblock or bootmem )
* has been activated and all other subsystems have already allocated / reserved
* memory . This function allows to create custom reserved areas .
*
* If @ fixed is true , reserve contiguous area at exactly @ base . If false ,
* reserve in range from @ base to @ limit .
*/
2014-08-07 03:05:32 +04:00
int __init cma_declare_contiguous ( phys_addr_t base ,
phys_addr_t size , phys_addr_t limit ,
2014-08-07 03:05:25 +04:00
phys_addr_t alignment , unsigned int order_per_bit ,
2014-08-07 03:05:32 +04:00
bool fixed , struct cma * * res_cma )
2014-08-07 03:05:25 +04:00
{
2014-08-07 03:05:32 +04:00
struct cma * cma ;
2014-10-10 02:26:47 +04:00
phys_addr_t memblock_end = memblock_end_of_DRAM ( ) ;
phys_addr_t highmem_start = __pa ( high_memory ) ;
2014-08-07 03:05:25 +04:00
int ret = 0 ;
pr_debug ( " %s(size %lx, base %08lx, limit %08lx alignment %08lx) \n " ,
__func__ , ( unsigned long ) size , ( unsigned long ) base ,
( unsigned long ) limit , ( unsigned long ) alignment ) ;
if ( cma_area_count = = ARRAY_SIZE ( cma_areas ) ) {
pr_err ( " Not enough slots for CMA reserved regions! \n " ) ;
return - ENOSPC ;
}
if ( ! size )
return - EINVAL ;
if ( alignment & & ! is_power_of_2 ( alignment ) )
return - EINVAL ;
/*
* Sanitise input arguments .
* Pages both ends in CMA area could be merged into adjacent unmovable
* migratetype page by page allocator ' s buddy algorithm . In the case ,
* you couldn ' t get a contiguous memory , which is not what we want .
*/
alignment = max ( alignment ,
( phys_addr_t ) PAGE_SIZE < < max ( MAX_ORDER - 1 , pageblock_order ) ) ;
base = ALIGN ( base , alignment ) ;
size = ALIGN ( size , alignment ) ;
limit & = ~ ( alignment - 1 ) ;
/* size should be aligned with order_per_bit */
if ( ! IS_ALIGNED ( size > > PAGE_SHIFT , 1 < < order_per_bit ) )
return - EINVAL ;
2014-10-10 02:26:47 +04:00
/*
* adjust limit to avoid crossing low / high memory boundary for
* automatically allocated regions
*/
if ( ( ( limit = = 0 | | limit > memblock_end ) & &
( memblock_end - size < highmem_start & &
memblock_end > highmem_start ) ) | |
( ! fixed & & limit > highmem_start & & limit - size < highmem_start ) ) {
limit = highmem_start ;
}
if ( fixed & & base < highmem_start & & base + size > highmem_start ) {
ret = - EINVAL ;
pr_err ( " Region at %08lx defined on low/high memory boundary (%08lx) \n " ,
( unsigned long ) base , ( unsigned long ) highmem_start ) ;
goto err ;
}
2014-08-07 03:05:25 +04:00
/* Reserve memory */
if ( base & & fixed ) {
if ( memblock_is_region_reserved ( base , size ) | |
memblock_reserve ( base , size ) < 0 ) {
ret = - EBUSY ;
goto err ;
}
} else {
phys_addr_t addr = memblock_alloc_range ( size , alignment , base ,
limit ) ;
if ( ! addr ) {
ret = - ENOMEM ;
goto err ;
} else {
base = addr ;
}
}
/*
* Each reserved area must be initialised later , when more kernel
* subsystems ( like slab allocator ) are available .
*/
2014-08-07 03:05:32 +04:00
cma = & cma_areas [ cma_area_count ] ;
2014-08-07 03:05:25 +04:00
cma - > base_pfn = PFN_DOWN ( base ) ;
cma - > count = size > > PAGE_SHIFT ;
cma - > order_per_bit = order_per_bit ;
* res_cma = cma ;
cma_area_count + + ;
2014-08-07 03:05:34 +04:00
pr_info ( " Reserved %ld MiB at %08lx \n " , ( unsigned long ) size / SZ_1M ,
2014-08-07 03:05:25 +04:00
( unsigned long ) base ) ;
return 0 ;
err :
2014-08-07 03:05:34 +04:00
pr_err ( " Failed to reserve %ld MiB \n " , ( unsigned long ) size / SZ_1M ) ;
2014-08-07 03:05:25 +04:00
return ret ;
}
/**
* cma_alloc ( ) - allocate pages from contiguous area
* @ cma : Contiguous memory region for which the allocation is performed .
* @ count : Requested number of pages .
* @ align : Requested alignment of pages ( in PAGE_SIZE order ) .
*
* This function allocates part of contiguous memory on specific
* contiguous memory area .
*/
struct page * cma_alloc ( struct cma * cma , int count , unsigned int align )
{
unsigned long mask , pfn , start = 0 ;
unsigned long bitmap_maxno , bitmap_no , bitmap_count ;
struct page * page = NULL ;
int ret ;
if ( ! cma | | ! cma - > count )
return NULL ;
pr_debug ( " %s(cma %p, count %d, align %d) \n " , __func__ , ( void * ) cma ,
count , align ) ;
if ( ! count )
return NULL ;
mask = cma_bitmap_aligned_mask ( cma , align ) ;
bitmap_maxno = cma_bitmap_maxno ( cma ) ;
bitmap_count = cma_bitmap_pages_to_bits ( cma , count ) ;
for ( ; ; ) {
mutex_lock ( & cma - > lock ) ;
bitmap_no = bitmap_find_next_zero_area ( cma - > bitmap ,
bitmap_maxno , start , bitmap_count , mask ) ;
if ( bitmap_no > = bitmap_maxno ) {
mutex_unlock ( & cma - > lock ) ;
break ;
}
bitmap_set ( cma - > bitmap , bitmap_no , bitmap_count ) ;
/*
* It ' s safe to drop the lock here . We ' ve marked this region for
* our exclusive use . If the migration fails we will take the
* lock again and unmark it .
*/
mutex_unlock ( & cma - > lock ) ;
pfn = cma - > base_pfn + ( bitmap_no < < cma - > order_per_bit ) ;
mutex_lock ( & cma_mutex ) ;
ret = alloc_contig_range ( pfn , pfn + count , MIGRATE_CMA ) ;
mutex_unlock ( & cma_mutex ) ;
if ( ret = = 0 ) {
page = pfn_to_page ( pfn ) ;
break ;
}
2014-08-07 03:05:30 +04:00
2014-08-07 03:05:25 +04:00
cma_clear_bitmap ( cma , pfn , count ) ;
2014-08-07 03:05:30 +04:00
if ( ret ! = - EBUSY )
break ;
2014-08-07 03:05:25 +04:00
pr_debug ( " %s(): memory range at %p is busy, retrying \n " ,
__func__ , pfn_to_page ( pfn ) ) ;
/* try again with a bit different memory target */
start = bitmap_no + mask + 1 ;
}
pr_debug ( " %s(): returned %p \n " , __func__ , page ) ;
return page ;
}
/**
* cma_release ( ) - release allocated pages
* @ cma : Contiguous memory region for which the allocation is performed .
* @ pages : Allocated pages .
* @ count : Number of allocated pages .
*
* This function releases memory allocated by alloc_cma ( ) .
* It returns false when provided pages do not belong to contiguous area and
* true otherwise .
*/
bool cma_release ( struct cma * cma , struct page * pages , int count )
{
unsigned long pfn ;
if ( ! cma | | ! pages )
return false ;
pr_debug ( " %s(page %p) \n " , __func__ , ( void * ) pages ) ;
pfn = page_to_pfn ( pages ) ;
if ( pfn < cma - > base_pfn | | pfn > = cma - > base_pfn + cma - > count )
return false ;
VM_BUG_ON ( pfn + count > cma - > base_pfn + cma - > count ) ;
free_contig_range ( pfn , count ) ;
cma_clear_bitmap ( cma , pfn , count ) ;
return true ;
}