2011-12-29 13:09:51 +01:00
/*
* Contiguous Memory Allocator for DMA mapping framework
* Copyright ( c ) 2010 - 2011 by Samsung Electronics .
* Written by :
* Marek Szyprowski < m . szyprowski @ samsung . com >
* Michal Nazarewicz < mina86 @ mina86 . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation ; either version 2 of the
* License or ( at your optional ) any later version of the license .
*/
# define pr_fmt(fmt) "cma: " fmt
# ifdef CONFIG_CMA_DEBUG
# ifndef DEBUG
# define DEBUG
# endif
# endif
# include <asm/page.h>
# include <asm/dma-contiguous.h>
# include <linux/memblock.h>
# include <linux/err.h>
# include <linux/mm.h>
# include <linux/mutex.h>
# include <linux/page-isolation.h>
2012-10-18 09:29:44 +02:00
# include <linux/sizes.h>
2011-12-29 13:09:51 +01:00
# include <linux/slab.h>
# include <linux/swap.h>
# include <linux/mm_types.h>
# include <linux/dma-contiguous.h>
2014-08-06 16:05:21 -07:00
# include <linux/log2.h>
2011-12-29 13:09:51 +01:00
struct cma {
unsigned long base_pfn ;
unsigned long count ;
unsigned long * bitmap ;
2014-02-25 11:01:19 -08:00
struct mutex lock ;
2011-12-29 13:09:51 +01:00
} ;
struct cma * dma_contiguous_default_area ;
# ifdef CONFIG_CMA_SIZE_MBYTES
# define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
# else
# define CMA_SIZE_MBYTES 0
# endif
/*
* Default global CMA area size can be defined in kernel ' s . config .
2013-09-18 06:04:48 +02:00
* This is useful mainly for distro maintainers to create a kernel
2011-12-29 13:09:51 +01:00
* that works correctly for most supported systems .
* The size can be set in bytes or as a percentage of the total memory
* in the system .
*
* Users , who want to set the size of global CMA area for their system
* should use cma = kernel parameter .
*/
2012-12-05 09:29:25 -05:00
static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M ;
static phys_addr_t size_cmdline = - 1 ;
2014-06-04 16:06:54 -07:00
static phys_addr_t base_cmdline ;
static phys_addr_t limit_cmdline ;
2011-12-29 13:09:51 +01:00
static int __init early_cma ( char * p )
{
pr_debug ( " %s(%s) \n " , __func__ , p ) ;
size_cmdline = memparse ( p , & p ) ;
2014-06-04 16:06:54 -07:00
if ( * p ! = ' @ ' )
return 0 ;
base_cmdline = memparse ( p + 1 , & p ) ;
if ( * p ! = ' - ' ) {
limit_cmdline = base_cmdline + size_cmdline ;
return 0 ;
}
limit_cmdline = memparse ( p + 1 , & p ) ;
2011-12-29 13:09:51 +01:00
return 0 ;
}
early_param ( " cma " , early_cma ) ;
# ifdef CONFIG_CMA_SIZE_PERCENTAGE
2012-12-05 09:29:25 -05:00
static phys_addr_t __init __maybe_unused cma_early_percent_memory ( void )
2011-12-29 13:09:51 +01:00
{
struct memblock_region * reg ;
unsigned long total_pages = 0 ;
/*
* We cannot use memblock_phys_mem_size ( ) here , because
* memblock_analyze ( ) has not been called yet .
*/
for_each_memblock ( memory , reg )
total_pages + = memblock_region_memory_end_pfn ( reg ) -
memblock_region_memory_base_pfn ( reg ) ;
return ( total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100 ) < < PAGE_SHIFT ;
}
# else
2012-12-05 09:29:25 -05:00
static inline __maybe_unused phys_addr_t cma_early_percent_memory ( void )
2011-12-29 13:09:51 +01:00
{
return 0 ;
}
# endif
/**
2013-07-29 14:31:45 +02:00
* dma_contiguous_reserve ( ) - reserve area ( s ) for contiguous memory handling
2011-12-29 13:09:51 +01:00
* @ limit : End address of the reserved memory ( optional , 0 for any ) .
*
* This function reserves memory from early allocator . It should be
* called by arch specific code once the early allocator ( memblock or bootmem )
* has been activated and all other subsystems have already allocated / reserved
* memory .
*/
void __init dma_contiguous_reserve ( phys_addr_t limit )
{
2012-12-05 09:29:25 -05:00
phys_addr_t selected_size = 0 ;
2014-06-04 16:06:54 -07:00
phys_addr_t selected_base = 0 ;
phys_addr_t selected_limit = limit ;
bool fixed = false ;
2011-12-29 13:09:51 +01:00
pr_debug ( " %s(limit %08lx) \n " , __func__ , ( unsigned long ) limit ) ;
if ( size_cmdline ! = - 1 ) {
selected_size = size_cmdline ;
2014-06-04 16:06:54 -07:00
selected_base = base_cmdline ;
selected_limit = min_not_zero ( limit_cmdline , limit ) ;
if ( base_cmdline + size_cmdline = = limit_cmdline )
fixed = true ;
2011-12-29 13:09:51 +01:00
} else {
# ifdef CONFIG_CMA_SIZE_SEL_MBYTES
selected_size = size_bytes ;
# elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
selected_size = cma_early_percent_memory ( ) ;
# elif defined(CONFIG_CMA_SIZE_SEL_MIN)
selected_size = min ( size_bytes , cma_early_percent_memory ( ) ) ;
# elif defined(CONFIG_CMA_SIZE_SEL_MAX)
selected_size = max ( size_bytes , cma_early_percent_memory ( ) ) ;
# endif
}
2013-07-29 14:31:45 +02:00
if ( selected_size & & ! dma_contiguous_default_area ) {
2011-12-29 13:09:51 +01:00
pr_debug ( " %s: reserving %ld MiB for global area \n " , __func__ ,
2012-12-05 09:29:25 -05:00
( unsigned long ) selected_size / SZ_1M ) ;
2011-12-29 13:09:51 +01:00
2014-06-04 16:06:54 -07:00
dma_contiguous_reserve_area ( selected_size , selected_base ,
selected_limit ,
& dma_contiguous_default_area ,
fixed ) ;
2011-12-29 13:09:51 +01:00
}
2014-06-04 16:06:54 -07:00
}
2011-12-29 13:09:51 +01:00
static DEFINE_MUTEX ( cma_mutex ) ;
2013-07-29 14:31:45 +02:00
static int __init cma_activate_area ( struct cma * cma )
2011-12-29 13:09:51 +01:00
{
2013-07-29 14:31:45 +02:00
int bitmap_size = BITS_TO_LONGS ( cma - > count ) * sizeof ( long ) ;
unsigned long base_pfn = cma - > base_pfn , pfn = base_pfn ;
unsigned i = cma - > count > > pageblock_order ;
2011-12-29 13:09:51 +01:00
struct zone * zone ;
2013-07-29 14:31:45 +02:00
cma - > bitmap = kzalloc ( bitmap_size , GFP_KERNEL ) ;
if ( ! cma - > bitmap )
return - ENOMEM ;
2011-12-29 13:09:51 +01:00
WARN_ON_ONCE ( ! pfn_valid ( pfn ) ) ;
zone = page_zone ( pfn_to_page ( pfn ) ) ;
do {
unsigned j ;
base_pfn = pfn ;
for ( j = pageblock_nr_pages ; j ; - - j , pfn + + ) {
WARN_ON_ONCE ( ! pfn_valid ( pfn ) ) ;
2014-06-23 13:22:07 -07:00
/*
* alloc_contig_range requires the pfn range
* specified to be in the same zone . Make this
* simple by forcing the entire CMA resv range
* to be in the same zone .
*/
2011-12-29 13:09:51 +01:00
if ( page_zone ( pfn_to_page ( pfn ) ) ! = zone )
2014-06-23 13:22:07 -07:00
goto err ;
2011-12-29 13:09:51 +01:00
}
init_cma_reserved_pageblock ( pfn_to_page ( base_pfn ) ) ;
} while ( - - i ) ;
2014-02-25 11:01:19 -08:00
mutex_init ( & cma - > lock ) ;
2013-07-29 14:31:45 +02:00
return 0 ;
2014-06-23 13:22:07 -07:00
err :
kfree ( cma - > bitmap ) ;
return - EINVAL ;
2011-12-29 13:09:51 +01:00
}
2013-07-29 14:31:45 +02:00
static struct cma cma_areas [ MAX_CMA_AREAS ] ;
static unsigned cma_area_count ;
2011-12-29 13:09:51 +01:00
static int __init cma_init_reserved_areas ( void )
{
2013-07-29 14:31:45 +02:00
int i ;
2011-12-29 13:09:51 +01:00
2013-07-29 14:31:45 +02:00
for ( i = 0 ; i < cma_area_count ; i + + ) {
int ret = cma_activate_area ( & cma_areas [ i ] ) ;
if ( ret )
return ret ;
2011-12-29 13:09:51 +01:00
}
2013-07-29 14:31:45 +02:00
2011-12-29 13:09:51 +01:00
return 0 ;
}
core_initcall ( cma_init_reserved_areas ) ;
2014-08-06 16:05:19 -07:00
static int __init __dma_contiguous_reserve_area ( phys_addr_t size ,
phys_addr_t base , phys_addr_t limit ,
2014-08-06 16:05:21 -07:00
phys_addr_t alignment ,
2014-08-06 16:05:19 -07:00
struct cma * * res_cma , bool fixed )
2011-12-29 13:09:51 +01:00
{
2013-07-29 14:31:45 +02:00
struct cma * cma = & cma_areas [ cma_area_count ] ;
int ret = 0 ;
2011-12-29 13:09:51 +01:00
2014-08-06 16:05:21 -07:00
pr_debug ( " %s(size %lx, base %08lx, limit %08lx alignment %08lx) \n " ,
__func__ , ( unsigned long ) size , ( unsigned long ) base ,
( unsigned long ) limit , ( unsigned long ) alignment ) ;
2011-12-29 13:09:51 +01:00
2013-07-29 14:31:45 +02:00
if ( cma_area_count = = ARRAY_SIZE ( cma_areas ) ) {
2011-12-29 13:09:51 +01:00
pr_err ( " Not enough slots for CMA reserved regions! \n " ) ;
return - ENOSPC ;
}
if ( ! size )
return - EINVAL ;
2014-08-06 16:05:21 -07:00
if ( alignment & & ! is_power_of_2 ( alignment ) )
return - EINVAL ;
/*
* Sanitise input arguments .
* Pages both ends in CMA area could be merged into adjacent unmovable
* migratetype page by page allocator ' s buddy algorithm . In the case ,
* you couldn ' t get a contiguous memory , which is not what we want .
*/
alignment = max ( alignment ,
( phys_addr_t ) PAGE_SIZE < < max ( MAX_ORDER - 1 , pageblock_order ) ) ;
2011-12-29 13:09:51 +01:00
base = ALIGN ( base , alignment ) ;
size = ALIGN ( size , alignment ) ;
limit & = ~ ( alignment - 1 ) ;
/* Reserve memory */
2014-06-04 16:06:54 -07:00
if ( base & & fixed ) {
2011-12-29 13:09:51 +01:00
if ( memblock_is_region_reserved ( base , size ) | |
memblock_reserve ( base , size ) < 0 ) {
2013-07-29 14:31:45 +02:00
ret = - EBUSY ;
2011-12-29 13:09:51 +01:00
goto err ;
}
} else {
2014-06-04 16:06:54 -07:00
phys_addr_t addr = memblock_alloc_range ( size , alignment , base ,
limit ) ;
2011-12-29 13:09:51 +01:00
if ( ! addr ) {
2013-07-29 14:31:45 +02:00
ret = - ENOMEM ;
2011-12-29 13:09:51 +01:00
goto err ;
} else {
base = addr ;
}
}
/*
* Each reserved area must be initialised later , when more kernel
* subsystems ( like slab allocator ) are available .
*/
2013-07-29 14:31:45 +02:00
cma - > base_pfn = PFN_DOWN ( base ) ;
cma - > count = size > > PAGE_SHIFT ;
* res_cma = cma ;
cma_area_count + + ;
2012-12-05 09:29:25 -05:00
pr_info ( " CMA: reserved %ld MiB at %08lx \n " , ( unsigned long ) size / SZ_1M ,
2011-12-29 13:09:51 +01:00
( unsigned long ) base ) ;
return 0 ;
2014-08-06 16:05:19 -07:00
2011-12-29 13:09:51 +01:00
err :
2012-12-05 09:29:25 -05:00
pr_err ( " CMA: failed to reserve %ld MiB \n " , ( unsigned long ) size / SZ_1M ) ;
2013-07-29 14:31:45 +02:00
return ret ;
2011-12-29 13:09:51 +01:00
}
2014-08-06 16:05:19 -07:00
/**
* dma_contiguous_reserve_area ( ) - reserve custom contiguous area
* @ size : Size of the reserved area ( in bytes ) ,
* @ base : Base address of the reserved area optional , use 0 for any
* @ limit : End address of the reserved memory ( optional , 0 for any ) .
* @ res_cma : Pointer to store the created cma region .
* @ fixed : hint about where to place the reserved area
*
* This function reserves memory from early allocator . It should be
* called by arch specific code once the early allocator ( memblock or bootmem )
* has been activated and all other subsystems have already allocated / reserved
* memory . This function allows to create custom reserved areas for specific
* devices .
*
* If @ fixed is true , reserve contiguous area at exactly @ base . If false ,
* reserve in range from @ base to @ limit .
*/
int __init dma_contiguous_reserve_area ( phys_addr_t size , phys_addr_t base ,
phys_addr_t limit , struct cma * * res_cma ,
bool fixed )
{
int ret ;
2014-08-06 16:05:21 -07:00
ret = __dma_contiguous_reserve_area ( size , base , limit , 0 ,
res_cma , fixed ) ;
2014-08-06 16:05:19 -07:00
if ( ret )
return ret ;
/* Architecture specific contiguous memory fixup. */
dma_contiguous_early_fixup ( PFN_PHYS ( ( * res_cma ) - > base_pfn ) ,
( * res_cma ) - > count < < PAGE_SHIFT ) ;
return 0 ;
}
2014-02-25 11:01:19 -08:00
static void clear_cma_bitmap ( struct cma * cma , unsigned long pfn , int count )
{
mutex_lock ( & cma - > lock ) ;
bitmap_clear ( cma - > bitmap , pfn - cma - > base_pfn , count ) ;
mutex_unlock ( & cma - > lock ) ;
}
2014-08-06 16:05:19 -07:00
static struct page * __dma_alloc_from_contiguous ( struct cma * cma , int count ,
2011-12-29 13:09:51 +01:00
unsigned int align )
{
unsigned long mask , pfn , pageno , start = 0 ;
2012-09-05 07:50:41 +02:00
struct page * page = NULL ;
2011-12-29 13:09:51 +01:00
int ret ;
if ( ! cma | | ! cma - > count )
return NULL ;
pr_debug ( " %s(cma %p, count %d, align %d) \n " , __func__ , ( void * ) cma ,
count , align ) ;
if ( ! count )
return NULL ;
mask = ( 1 < < align ) - 1 ;
for ( ; ; ) {
2014-02-25 11:01:19 -08:00
mutex_lock ( & cma - > lock ) ;
2011-12-29 13:09:51 +01:00
pageno = bitmap_find_next_zero_area ( cma - > bitmap , cma - > count ,
start , count , mask ) ;
2014-02-25 11:01:19 -08:00
if ( pageno > = cma - > count ) {
2014-05-29 15:29:18 +09:00
mutex_unlock ( & cma - > lock ) ;
2012-09-05 07:50:41 +02:00
break ;
2014-02-25 11:01:19 -08:00
}
bitmap_set ( cma - > bitmap , pageno , count ) ;
/*
* It ' s safe to drop the lock here . We ' ve marked this region for
* our exclusive use . If the migration fails we will take the
* lock again and unmark it .
*/
mutex_unlock ( & cma - > lock ) ;
2011-12-29 13:09:51 +01:00
pfn = cma - > base_pfn + pageno ;
2014-02-25 11:01:19 -08:00
mutex_lock ( & cma_mutex ) ;
2011-12-29 13:09:51 +01:00
ret = alloc_contig_range ( pfn , pfn + count , MIGRATE_CMA ) ;
2014-02-25 11:01:19 -08:00
mutex_unlock ( & cma_mutex ) ;
2011-12-29 13:09:51 +01:00
if ( ret = = 0 ) {
2012-09-05 07:50:41 +02:00
page = pfn_to_page ( pfn ) ;
2011-12-29 13:09:51 +01:00
break ;
} else if ( ret ! = - EBUSY ) {
2014-02-25 11:01:19 -08:00
clear_cma_bitmap ( cma , pfn , count ) ;
2012-09-05 07:50:41 +02:00
break ;
2011-12-29 13:09:51 +01:00
}
2014-02-25 11:01:19 -08:00
clear_cma_bitmap ( cma , pfn , count ) ;
2011-12-29 13:09:51 +01:00
pr_debug ( " %s(): memory range at %p is busy, retrying \n " ,
__func__ , pfn_to_page ( pfn ) ) ;
/* try again with a bit different memory target */
start = pageno + mask + 1 ;
}
2012-09-05 07:50:41 +02:00
pr_debug ( " %s(): returned %p \n " , __func__ , page ) ;
return page ;
2011-12-29 13:09:51 +01:00
}
/**
2014-08-06 16:05:19 -07:00
* dma_alloc_from_contiguous ( ) - allocate pages from contiguous area
* @ dev : Pointer to device for which the allocation is performed .
* @ count : Requested number of pages .
* @ align : Requested alignment of pages ( in PAGE_SIZE order ) .
2011-12-29 13:09:51 +01:00
*
2014-08-06 16:05:19 -07:00
* This function allocates memory buffer for specified device . It uses
* device specific contiguous memory area if available or the default
* global one . Requires architecture specific dev_get_cma_area ( ) helper
* function .
2011-12-29 13:09:51 +01:00
*/
2014-08-06 16:05:19 -07:00
struct page * dma_alloc_from_contiguous ( struct device * dev , int count ,
unsigned int align )
2011-12-29 13:09:51 +01:00
{
struct cma * cma = dev_get_cma_area ( dev ) ;
2014-08-06 16:05:19 -07:00
if ( align > CONFIG_CMA_ALIGNMENT )
align = CONFIG_CMA_ALIGNMENT ;
return __dma_alloc_from_contiguous ( cma , count , align ) ;
}
static bool __dma_release_from_contiguous ( struct cma * cma , struct page * pages ,
int count )
{
2011-12-29 13:09:51 +01:00
unsigned long pfn ;
if ( ! cma | | ! pages )
return false ;
pr_debug ( " %s(page %p) \n " , __func__ , ( void * ) pages ) ;
pfn = page_to_pfn ( pages ) ;
if ( pfn < cma - > base_pfn | | pfn > = cma - > base_pfn + cma - > count )
return false ;
VM_BUG_ON ( pfn + count > cma - > base_pfn + cma - > count ) ;
free_contig_range ( pfn , count ) ;
2014-02-25 11:01:19 -08:00
clear_cma_bitmap ( cma , pfn , count ) ;
2011-12-29 13:09:51 +01:00
return true ;
}
2014-08-06 16:05:19 -07:00
/**
* dma_release_from_contiguous ( ) - release allocated pages
* @ dev : Pointer to device for which the pages were allocated .
* @ pages : Allocated pages .
* @ count : Number of allocated pages .
*
* This function releases memory allocated by dma_alloc_from_contiguous ( ) .
* It returns false when provided pages do not belong to contiguous area and
* true otherwise .
*/
bool dma_release_from_contiguous ( struct device * dev , struct page * pages ,
int count )
{
struct cma * cma = dev_get_cma_area ( dev ) ;
return __dma_release_from_contiguous ( cma , pages , count ) ;
}