2014-08-07 03:05:25 +04:00
/*
* Contiguous Memory Allocator
*
* Copyright ( c ) 2010 - 2011 by Samsung Electronics .
* Copyright IBM Corporation , 2013
* Copyright LG Electronics Inc . , 2014
* Written by :
* Marek Szyprowski < m . szyprowski @ samsung . com >
* Michal Nazarewicz < mina86 @ mina86 . com >
* Aneesh Kumar K . V < aneesh . kumar @ linux . vnet . ibm . com >
* Joonsoo Kim < iamjoonsoo . kim @ lge . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation ; either version 2 of the
* License or ( at your optional ) any later version of the license .
*/
# define pr_fmt(fmt) "cma: " fmt
# ifdef CONFIG_CMA_DEBUG
# ifndef DEBUG
# define DEBUG
# endif
# endif
2015-04-16 02:14:50 +03:00
# define CREATE_TRACE_POINTS
2014-08-07 03:05:25 +04:00
# include <linux/memblock.h>
# include <linux/err.h>
# include <linux/mm.h>
# include <linux/mutex.h>
# include <linux/sizes.h>
# include <linux/slab.h>
# include <linux/log2.h>
# include <linux/cma.h>
2014-10-10 02:26:47 +04:00
# include <linux/highmem.h>
2014-12-13 03:58:31 +03:00
# include <linux/io.h>
2015-04-16 02:14:50 +03:00
# include <trace/events/cma.h>
2014-08-07 03:05:25 +04:00
2015-04-15 01:44:57 +03:00
# include "cma.h"
struct cma cma_areas [ MAX_CMA_AREAS ] ;
unsigned cma_area_count ;
2014-08-07 03:05:25 +04:00
static DEFINE_MUTEX ( cma_mutex ) ;
2015-04-15 01:47:04 +03:00
phys_addr_t cma_get_base ( const struct cma * cma )
2014-08-07 03:05:25 +04:00
{
return PFN_PHYS ( cma - > base_pfn ) ;
}
2015-04-15 01:47:04 +03:00
unsigned long cma_get_size ( const struct cma * cma )
2014-08-07 03:05:25 +04:00
{
return cma - > count < < PAGE_SHIFT ;
}
2015-04-15 01:47:04 +03:00
static unsigned long cma_bitmap_aligned_mask ( const struct cma * cma ,
int align_order )
2014-08-07 03:05:25 +04:00
{
2014-10-14 02:51:03 +04:00
if ( align_order < = cma - > order_per_bit )
return 0 ;
return ( 1UL < < ( align_order - cma - > order_per_bit ) ) - 1 ;
2014-08-07 03:05:25 +04:00
}
2015-03-13 02:25:57 +03:00
/*
* Find a PFN aligned to the specified order and return an offset represented in
* order_per_bits .
*/
2015-04-15 01:47:04 +03:00
static unsigned long cma_bitmap_aligned_offset ( const struct cma * cma ,
int align_order )
2014-12-13 03:54:48 +03:00
{
if ( align_order < = cma - > order_per_bit )
return 0 ;
2015-03-13 02:25:57 +03:00
return ( ALIGN ( cma - > base_pfn , ( 1UL < < align_order ) )
- cma - > base_pfn ) > > cma - > order_per_bit ;
2014-12-13 03:54:48 +03:00
}
2015-04-15 01:47:04 +03:00
static unsigned long cma_bitmap_pages_to_bits ( const struct cma * cma ,
unsigned long pages )
2014-08-07 03:05:25 +04:00
{
return ALIGN ( pages , 1UL < < cma - > order_per_bit ) > > cma - > order_per_bit ;
}
2015-04-15 01:47:04 +03:00
static void cma_clear_bitmap ( struct cma * cma , unsigned long pfn ,
unsigned int count )
2014-08-07 03:05:25 +04:00
{
unsigned long bitmap_no , bitmap_count ;
bitmap_no = ( pfn - cma - > base_pfn ) > > cma - > order_per_bit ;
bitmap_count = cma_bitmap_pages_to_bits ( cma , count ) ;
mutex_lock ( & cma - > lock ) ;
bitmap_clear ( cma - > bitmap , bitmap_no , bitmap_count ) ;
mutex_unlock ( & cma - > lock ) ;
}
static int __init cma_activate_area ( struct cma * cma )
{
int bitmap_size = BITS_TO_LONGS ( cma_bitmap_maxno ( cma ) ) * sizeof ( long ) ;
unsigned long base_pfn = cma - > base_pfn , pfn = base_pfn ;
unsigned i = cma - > count > > pageblock_order ;
struct zone * zone ;
cma - > bitmap = kzalloc ( bitmap_size , GFP_KERNEL ) ;
if ( ! cma - > bitmap )
return - ENOMEM ;
WARN_ON_ONCE ( ! pfn_valid ( pfn ) ) ;
zone = page_zone ( pfn_to_page ( pfn ) ) ;
do {
unsigned j ;
base_pfn = pfn ;
for ( j = pageblock_nr_pages ; j ; - - j , pfn + + ) {
WARN_ON_ONCE ( ! pfn_valid ( pfn ) ) ;
/*
* alloc_contig_range requires the pfn range
* specified to be in the same zone . Make this
* simple by forcing the entire CMA resv range
* to be in the same zone .
*/
if ( page_zone ( pfn_to_page ( pfn ) ) ! = zone )
goto err ;
}
init_cma_reserved_pageblock ( pfn_to_page ( base_pfn ) ) ;
} while ( - - i ) ;
mutex_init ( & cma - > lock ) ;
2015-04-15 01:44:59 +03:00
# ifdef CONFIG_CMA_DEBUGFS
INIT_HLIST_HEAD ( & cma - > mem_head ) ;
spin_lock_init ( & cma - > mem_head_lock ) ;
# endif
2014-08-07 03:05:25 +04:00
return 0 ;
err :
kfree ( cma - > bitmap ) ;
2014-10-24 14:18:39 +04:00
cma - > count = 0 ;
2014-08-07 03:05:25 +04:00
return - EINVAL ;
}
static int __init cma_init_reserved_areas ( void )
{
int i ;
for ( i = 0 ; i < cma_area_count ; i + + ) {
int ret = cma_activate_area ( & cma_areas [ i ] ) ;
if ( ret )
return ret ;
}
return 0 ;
}
core_initcall ( cma_init_reserved_areas ) ;
2014-10-14 02:51:09 +04:00
/**
* cma_init_reserved_mem ( ) - create custom contiguous area from reserved memory
* @ base : Base address of the reserved area
* @ size : Size of the reserved area ( in bytes ) ,
* @ order_per_bit : Order of pages represented by one bit on bitmap .
* @ res_cma : Pointer to store the created cma region .
*
* This function creates custom contiguous area from already reserved memory .
*/
int __init cma_init_reserved_mem ( phys_addr_t base , phys_addr_t size ,
2015-04-15 01:47:04 +03:00
unsigned int order_per_bit ,
struct cma * * res_cma )
2014-10-14 02:51:09 +04:00
{
struct cma * cma ;
phys_addr_t alignment ;
/* Sanity checks */
if ( cma_area_count = = ARRAY_SIZE ( cma_areas ) ) {
pr_err ( " Not enough slots for CMA reserved regions! \n " ) ;
return - ENOSPC ;
}
if ( ! size | | ! memblock_is_region_reserved ( base , size ) )
return - EINVAL ;
2015-06-25 02:58:03 +03:00
/* ensure minimal alignment required by mm core */
mm/cma: silence warnings due to max() usage
pageblock_order can be (at least) an unsigned int or an unsigned long
depending on the kernel config and architecture, so use max_t(unsigned
long, ...) when comparing it.
fixes these warnings:
In file included from include/asm-generic/bug.h:13:0,
from arch/powerpc/include/asm/bug.h:127,
from include/linux/bug.h:4,
from include/linux/mmdebug.h:4,
from include/linux/mm.h:8,
from include/linux/memblock.h:18,
from mm/cma.c:28:
mm/cma.c: In function 'cma_init_reserved_mem':
include/linux/kernel.h:748:17: warning: comparison of distinct pointer types lacks a cast
(void) (&_max1 == &_max2); ^
mm/cma.c:186:27: note: in expansion of macro 'max'
alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
^
mm/cma.c: In function 'cma_declare_contiguous':
include/linux/kernel.h:748:17: warning: comparison of distinct pointer types lacks a cast
(void) (&_max1 == &_max2); ^
include/linux/kernel.h:747:9: note: in definition of macro 'max'
typeof(y) _max2 = (y); ^
mm/cma.c:270:29: note: in expansion of macro 'max'
(phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
^
include/linux/kernel.h:748:17: warning: comparison of distinct pointer types lacks a cast
(void) (&_max1 == &_max2); ^
include/linux/kernel.h:747:21: note: in definition of macro 'max'
typeof(y) _max2 = (y); ^
mm/cma.c:270:29: note: in expansion of macro 'max'
(phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
^
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/20160526150748.5be38a4f@canb.auug.org.au
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 00:27:41 +03:00
alignment = PAGE_SIZE < <
max_t ( unsigned long , MAX_ORDER - 1 , pageblock_order ) ;
2014-10-14 02:51:09 +04:00
/* alignment should be aligned with order_per_bit */
if ( ! IS_ALIGNED ( alignment > > PAGE_SHIFT , 1 < < order_per_bit ) )
return - EINVAL ;
if ( ALIGN ( base , alignment ) ! = base | | ALIGN ( size , alignment ) ! = size )
return - EINVAL ;
/*
* Each reserved area must be initialised later , when more kernel
* subsystems ( like slab allocator ) are available .
*/
cma = & cma_areas [ cma_area_count ] ;
cma - > base_pfn = PFN_DOWN ( base ) ;
cma - > count = size > > PAGE_SHIFT ;
cma - > order_per_bit = order_per_bit ;
* res_cma = cma ;
cma_area_count + + ;
2015-02-12 02:26:27 +03:00
totalcma_pages + = ( size / PAGE_SIZE ) ;
2014-10-14 02:51:09 +04:00
return 0 ;
}
2014-08-07 03:05:25 +04:00
/**
* cma_declare_contiguous ( ) - reserve custom contiguous area
* @ base : Base address of the reserved area optional , use 0 for any
2014-08-07 03:05:32 +04:00
* @ size : Size of the reserved area ( in bytes ) ,
2014-08-07 03:05:25 +04:00
* @ limit : End address of the reserved memory ( optional , 0 for any ) .
* @ alignment : Alignment for the CMA area , should be power of 2 or zero
* @ order_per_bit : Order of pages represented by one bit on bitmap .
* @ fixed : hint about where to place the reserved area
2014-08-07 03:05:32 +04:00
* @ res_cma : Pointer to store the created cma region .
2014-08-07 03:05:25 +04:00
*
* This function reserves memory from early allocator . It should be
* called by arch specific code once the early allocator ( memblock or bootmem )
* has been activated and all other subsystems have already allocated / reserved
* memory . This function allows to create custom reserved areas .
*
* If @ fixed is true , reserve contiguous area at exactly @ base . If false ,
* reserve in range from @ base to @ limit .
*/
2014-08-07 03:05:32 +04:00
int __init cma_declare_contiguous ( phys_addr_t base ,
phys_addr_t size , phys_addr_t limit ,
2014-08-07 03:05:25 +04:00
phys_addr_t alignment , unsigned int order_per_bit ,
2014-08-07 03:05:32 +04:00
bool fixed , struct cma * * res_cma )
2014-08-07 03:05:25 +04:00
{
2014-10-10 02:26:47 +04:00
phys_addr_t memblock_end = memblock_end_of_DRAM ( ) ;
2014-12-11 02:41:12 +03:00
phys_addr_t highmem_start ;
2014-08-07 03:05:25 +04:00
int ret = 0 ;
2014-12-11 02:41:12 +03:00
/*
2017-01-11 00:35:41 +03:00
* We can ' t use __pa ( high_memory ) directly , since high_memory
* isn ' t a valid direct map VA , and DEBUG_VIRTUAL will ( validly )
* complain . Find the boundary by adding one to the last valid
* address .
2014-12-11 02:41:12 +03:00
*/
2017-01-11 00:35:41 +03:00
highmem_start = __pa ( high_memory - 1 ) + 1 ;
2014-10-24 14:18:42 +04:00
pr_debug ( " %s(size %pa, base %pa, limit %pa alignment %pa) \n " ,
__func__ , & size , & base , & limit , & alignment ) ;
2014-08-07 03:05:25 +04:00
if ( cma_area_count = = ARRAY_SIZE ( cma_areas ) ) {
pr_err ( " Not enough slots for CMA reserved regions! \n " ) ;
return - ENOSPC ;
}
if ( ! size )
return - EINVAL ;
if ( alignment & & ! is_power_of_2 ( alignment ) )
return - EINVAL ;
/*
* Sanitise input arguments .
* Pages both ends in CMA area could be merged into adjacent unmovable
* migratetype page by page allocator ' s buddy algorithm . In the case ,
* you couldn ' t get a contiguous memory , which is not what we want .
*/
mm/cma: silence warnings due to max() usage
pageblock_order can be (at least) an unsigned int or an unsigned long
depending on the kernel config and architecture, so use max_t(unsigned
long, ...) when comparing it.
fixes these warnings:
In file included from include/asm-generic/bug.h:13:0,
from arch/powerpc/include/asm/bug.h:127,
from include/linux/bug.h:4,
from include/linux/mmdebug.h:4,
from include/linux/mm.h:8,
from include/linux/memblock.h:18,
from mm/cma.c:28:
mm/cma.c: In function 'cma_init_reserved_mem':
include/linux/kernel.h:748:17: warning: comparison of distinct pointer types lacks a cast
(void) (&_max1 == &_max2); ^
mm/cma.c:186:27: note: in expansion of macro 'max'
alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
^
mm/cma.c: In function 'cma_declare_contiguous':
include/linux/kernel.h:748:17: warning: comparison of distinct pointer types lacks a cast
(void) (&_max1 == &_max2); ^
include/linux/kernel.h:747:9: note: in definition of macro 'max'
typeof(y) _max2 = (y); ^
mm/cma.c:270:29: note: in expansion of macro 'max'
(phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
^
include/linux/kernel.h:748:17: warning: comparison of distinct pointer types lacks a cast
(void) (&_max1 == &_max2); ^
include/linux/kernel.h:747:21: note: in definition of macro 'max'
typeof(y) _max2 = (y); ^
mm/cma.c:270:29: note: in expansion of macro 'max'
(phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
^
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/20160526150748.5be38a4f@canb.auug.org.au
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 00:27:41 +03:00
alignment = max ( alignment , ( phys_addr_t ) PAGE_SIZE < <
max_t ( unsigned long , MAX_ORDER - 1 , pageblock_order ) ) ;
2014-08-07 03:05:25 +04:00
base = ALIGN ( base , alignment ) ;
size = ALIGN ( size , alignment ) ;
limit & = ~ ( alignment - 1 ) ;
2014-10-24 14:18:40 +04:00
if ( ! base )
fixed = false ;
2014-08-07 03:05:25 +04:00
/* size should be aligned with order_per_bit */
if ( ! IS_ALIGNED ( size > > PAGE_SHIFT , 1 < < order_per_bit ) )
return - EINVAL ;
2014-10-10 02:26:47 +04:00
/*
2014-10-24 14:18:41 +04:00
* If allocating at a fixed base the request region must not cross the
* low / high memory boundary .
2014-10-10 02:26:47 +04:00
*/
2014-10-24 14:18:41 +04:00
if ( fixed & & base < highmem_start & & base + size > highmem_start ) {
2014-10-10 02:26:47 +04:00
ret = - EINVAL ;
2014-10-24 14:18:42 +04:00
pr_err ( " Region at %pa defined on low/high memory boundary (%pa) \n " ,
& base , & highmem_start ) ;
2014-10-10 02:26:47 +04:00
goto err ;
}
2014-10-24 14:18:41 +04:00
/*
* If the limit is unspecified or above the memblock end , its effective
* value will be the memblock end . Set it explicitly to simplify further
* checks .
*/
if ( limit = = 0 | | limit > memblock_end )
limit = memblock_end ;
2014-08-07 03:05:25 +04:00
/* Reserve memory */
2014-10-24 14:18:40 +04:00
if ( fixed ) {
2014-08-07 03:05:25 +04:00
if ( memblock_is_region_reserved ( base , size ) | |
memblock_reserve ( base , size ) < 0 ) {
ret = - EBUSY ;
goto err ;
}
} else {
2014-10-24 14:18:41 +04:00
phys_addr_t addr = 0 ;
/*
* All pages in the reserved area must come from the same zone .
* If the requested region crosses the low / high memory boundary ,
* try allocating from high memory first and fall back to low
* memory in case of failure .
*/
if ( base < highmem_start & & limit > highmem_start ) {
addr = memblock_alloc_range ( size , alignment ,
2015-06-25 02:58:09 +03:00
highmem_start , limit ,
MEMBLOCK_NONE ) ;
2014-10-24 14:18:41 +04:00
limit = highmem_start ;
}
2014-08-07 03:05:25 +04:00
if ( ! addr ) {
2014-10-24 14:18:41 +04:00
addr = memblock_alloc_range ( size , alignment , base ,
2015-06-25 02:58:09 +03:00
limit ,
MEMBLOCK_NONE ) ;
2014-10-24 14:18:41 +04:00
if ( ! addr ) {
ret = - ENOMEM ;
goto err ;
}
2014-08-07 03:05:25 +04:00
}
2014-10-24 14:18:41 +04:00
2014-12-13 03:58:31 +03:00
/*
* kmemleak scans / reads tracked objects for pointers to other
* objects but this address isn ' t mapped and accessible
*/
2016-10-11 23:55:11 +03:00
kmemleak_ignore_phys ( addr ) ;
2014-10-24 14:18:41 +04:00
base = addr ;
2014-08-07 03:05:25 +04:00
}
2014-10-14 02:51:09 +04:00
ret = cma_init_reserved_mem ( base , size , order_per_bit , res_cma ) ;
if ( ret )
goto err ;
2014-08-07 03:05:25 +04:00
2014-10-24 14:18:42 +04:00
pr_info ( " Reserved %ld MiB at %pa \n " , ( unsigned long ) size / SZ_1M ,
& base ) ;
2014-08-07 03:05:25 +04:00
return 0 ;
err :
2014-08-07 03:05:34 +04:00
pr_err ( " Failed to reserve %ld MiB \n " , ( unsigned long ) size / SZ_1M ) ;
2014-08-07 03:05:25 +04:00
return ret ;
}
/**
* cma_alloc ( ) - allocate pages from contiguous area
* @ cma : Contiguous memory region for which the allocation is performed .
* @ count : Requested number of pages .
* @ align : Requested alignment of pages ( in PAGE_SIZE order ) .
*
* This function allocates part of contiguous memory on specific
* contiguous memory area .
*/
2015-10-22 23:32:11 +03:00
struct page * cma_alloc ( struct cma * cma , size_t count , unsigned int align )
2014-08-07 03:05:25 +04:00
{
2015-11-06 05:50:08 +03:00
unsigned long mask , offset ;
unsigned long pfn = - 1 ;
unsigned long start = 0 ;
2014-08-07 03:05:25 +04:00
unsigned long bitmap_maxno , bitmap_no , bitmap_count ;
struct page * page = NULL ;
int ret ;
if ( ! cma | | ! cma - > count )
return NULL ;
2015-10-22 23:32:11 +03:00
pr_debug ( " %s(cma %p, count %zu, align %d) \n " , __func__ , ( void * ) cma ,
2014-08-07 03:05:25 +04:00
count , align ) ;
if ( ! count )
return NULL ;
mask = cma_bitmap_aligned_mask ( cma , align ) ;
2014-12-13 03:54:48 +03:00
offset = cma_bitmap_aligned_offset ( cma , align ) ;
2014-08-07 03:05:25 +04:00
bitmap_maxno = cma_bitmap_maxno ( cma ) ;
bitmap_count = cma_bitmap_pages_to_bits ( cma , count ) ;
2016-11-10 21:46:16 +03:00
if ( bitmap_count > bitmap_maxno )
return NULL ;
2014-08-07 03:05:25 +04:00
for ( ; ; ) {
mutex_lock ( & cma - > lock ) ;
2014-12-13 03:54:48 +03:00
bitmap_no = bitmap_find_next_zero_area_off ( cma - > bitmap ,
bitmap_maxno , start , bitmap_count , mask ,
offset ) ;
2014-08-07 03:05:25 +04:00
if ( bitmap_no > = bitmap_maxno ) {
mutex_unlock ( & cma - > lock ) ;
break ;
}
bitmap_set ( cma - > bitmap , bitmap_no , bitmap_count ) ;
/*
* It ' s safe to drop the lock here . We ' ve marked this region for
* our exclusive use . If the migration fails we will take the
* lock again and unmark it .
*/
mutex_unlock ( & cma - > lock ) ;
pfn = cma - > base_pfn + ( bitmap_no < < cma - > order_per_bit ) ;
mutex_lock ( & cma_mutex ) ;
ret = alloc_contig_range ( pfn , pfn + count , MIGRATE_CMA ) ;
mutex_unlock ( & cma_mutex ) ;
if ( ret = = 0 ) {
page = pfn_to_page ( pfn ) ;
break ;
}
2014-08-07 03:05:30 +04:00
2014-08-07 03:05:25 +04:00
cma_clear_bitmap ( cma , pfn , count ) ;
2014-08-07 03:05:30 +04:00
if ( ret ! = - EBUSY )
break ;
2014-08-07 03:05:25 +04:00
pr_debug ( " %s(): memory range at %p is busy, retrying \n " ,
__func__ , pfn_to_page ( pfn ) ) ;
/* try again with a bit different memory target */
start = bitmap_no + mask + 1 ;
}
2015-11-06 05:50:08 +03:00
trace_cma_alloc ( pfn , page , count , align ) ;
2015-04-16 02:14:50 +03:00
2014-08-07 03:05:25 +04:00
pr_debug ( " %s(): returned %p \n " , __func__ , page ) ;
return page ;
}
/**
* cma_release ( ) - release allocated pages
* @ cma : Contiguous memory region for which the allocation is performed .
* @ pages : Allocated pages .
* @ count : Number of allocated pages .
*
* This function releases memory allocated by alloc_cma ( ) .
* It returns false when provided pages do not belong to contiguous area and
* true otherwise .
*/
2015-04-15 01:47:04 +03:00
bool cma_release ( struct cma * cma , const struct page * pages , unsigned int count )
2014-08-07 03:05:25 +04:00
{
unsigned long pfn ;
if ( ! cma | | ! pages )
return false ;
pr_debug ( " %s(page %p) \n " , __func__ , ( void * ) pages ) ;
pfn = page_to_pfn ( pages ) ;
if ( pfn < cma - > base_pfn | | pfn > = cma - > base_pfn + cma - > count )
return false ;
VM_BUG_ON ( pfn + count > cma - > base_pfn + cma - > count ) ;
free_contig_range ( pfn , count ) ;
cma_clear_bitmap ( cma , pfn , count ) ;
2015-04-16 02:14:50 +03:00
trace_cma_release ( pfn , pages , count ) ;
2014-08-07 03:05:25 +04:00
return true ;
}