2017-11-07 19:30:07 +03:00
// SPDX-License-Identifier: GPL-2.0+
2011-12-29 16:09:51 +04:00
/*
* Contiguous Memory Allocator for DMA mapping framework
* Copyright ( c ) 2010 - 2011 by Samsung Electronics .
* Written by :
* Marek Szyprowski < m . szyprowski @ samsung . com >
* Michal Nazarewicz < mina86 @ mina86 . com >
2020-09-11 11:56:52 +03:00
*
* Contiguous Memory Allocator
*
* The Contiguous Memory Allocator ( CMA ) makes it possible to
* allocate big contiguous chunks of memory after the system has
* booted .
*
* Why is it needed ?
*
* Various devices on embedded systems have no scatter - getter and / or
* IO map support and require contiguous blocks of memory to
* operate . They include devices such as cameras , hardware video
* coders , etc .
*
* Such devices often require big memory buffers ( a full HD frame
2020-11-24 13:40:19 +03:00
* is , for instance , more than 2 mega pixels large , i . e . more than 6
2020-09-11 11:56:52 +03:00
* MB of memory ) , which makes mechanisms such as kmalloc ( ) or
* alloc_page ( ) ineffective .
*
* At the same time , a solution where a big memory region is
* reserved for a device is suboptimal since often more memory is
* reserved then strictly required and , moreover , the memory is
* inaccessible to page system even if device drivers don ' t use it .
*
* CMA tries to solve this issue by operating on memory regions
* where only movable pages can be allocated from . This way , kernel
* can use the memory for pagecache and when device driver requests
* it , allocated pages can be migrated .
2011-12-29 16:09:51 +04:00
*/
# define pr_fmt(fmt) "cma: " fmt
# ifdef CONFIG_CMA_DEBUG
# ifndef DEBUG
# define DEBUG
# endif
# endif
# include <asm/page.h>
# include <linux/memblock.h>
# include <linux/err.h>
2012-10-18 11:29:44 +04:00
# include <linux/sizes.h>
2020-09-11 11:56:52 +03:00
# include <linux/dma-map-ops.h>
2014-08-07 03:05:25 +04:00
# include <linux/cma.h>
2023-07-12 10:47:58 +03:00
# include <linux/nospec.h>
2011-12-29 16:09:51 +04:00
# ifdef CONFIG_CMA_SIZE_MBYTES
# define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
# else
# define CMA_SIZE_MBYTES 0
# endif
2014-08-07 03:05:25 +04:00
struct cma * dma_contiguous_default_area ;
2011-12-29 16:09:51 +04:00
/*
* Default global CMA area size can be defined in kernel ' s . config .
2013-09-18 08:04:48 +04:00
* This is useful mainly for distro maintainers to create a kernel
2011-12-29 16:09:51 +04:00
* that works correctly for most supported systems .
* The size can be set in bytes or as a percentage of the total memory
* in the system .
*
* Users , who want to set the size of global CMA area for their system
* should use cma = kernel parameter .
*/
2019-10-20 08:03:22 +03:00
static const phys_addr_t size_bytes __initconst =
( phys_addr_t ) CMA_SIZE_MBYTES * SZ_1M ;
static phys_addr_t size_cmdline __initdata = - 1 ;
static phys_addr_t base_cmdline __initdata ;
static phys_addr_t limit_cmdline __initdata ;
2011-12-29 16:09:51 +04:00
static int __init early_cma ( char * p )
{
2018-09-17 06:24:20 +03:00
if ( ! p ) {
pr_err ( " Config string not provided \n " ) ;
return - EINVAL ;
}
2011-12-29 16:09:51 +04:00
size_cmdline = memparse ( p , & p ) ;
2014-06-05 03:06:54 +04:00
if ( * p ! = ' @ ' )
return 0 ;
base_cmdline = memparse ( p + 1 , & p ) ;
if ( * p ! = ' - ' ) {
limit_cmdline = base_cmdline + size_cmdline ;
return 0 ;
}
limit_cmdline = memparse ( p + 1 , & p ) ;
2011-12-29 16:09:51 +04:00
return 0 ;
}
early_param ( " cma " , early_cma ) ;
2023-07-12 10:47:58 +03:00
# ifdef CONFIG_DMA_NUMA_CMA
2020-08-24 02:03:07 +03:00
2023-07-12 10:47:58 +03:00
static struct cma * dma_contiguous_numa_area [ MAX_NUMNODES ] ;
static phys_addr_t numa_cma_size [ MAX_NUMNODES ] __initdata ;
2020-08-24 02:03:07 +03:00
static struct cma * dma_contiguous_pernuma_area [ MAX_NUMNODES ] ;
static phys_addr_t pernuma_size_bytes __initdata ;
2023-07-12 10:47:58 +03:00
static int __init early_numa_cma ( char * p )
{
int nid , count = 0 ;
unsigned long tmp ;
char * s = p ;
while ( * s ) {
if ( sscanf ( s , " %lu%n " , & tmp , & count ) ! = 1 )
break ;
if ( s [ count ] = = ' : ' ) {
if ( tmp > = MAX_NUMNODES )
break ;
nid = array_index_nospec ( tmp , MAX_NUMNODES ) ;
s + = count + 1 ;
tmp = memparse ( s , & s ) ;
numa_cma_size [ nid ] = tmp ;
if ( * s = = ' , ' )
s + + ;
else
break ;
} else
break ;
}
return 0 ;
}
early_param ( " numa_cma " , early_numa_cma ) ;
2020-08-24 02:03:07 +03:00
static int __init early_cma_pernuma ( char * p )
{
pernuma_size_bytes = memparse ( p , & p ) ;
return 0 ;
}
early_param ( " cma_pernuma " , early_cma_pernuma ) ;
# endif
2011-12-29 16:09:51 +04:00
# ifdef CONFIG_CMA_SIZE_PERCENTAGE
2012-12-05 18:29:25 +04:00
static phys_addr_t __init __maybe_unused cma_early_percent_memory ( void )
2011-12-29 16:09:51 +04:00
{
2020-10-14 02:57:22 +03:00
unsigned long total_pages = PHYS_PFN ( memblock_phys_mem_size ( ) ) ;
2011-12-29 16:09:51 +04:00
return ( total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100 ) < < PAGE_SHIFT ;
}
# else
2012-12-05 18:29:25 +04:00
static inline __maybe_unused phys_addr_t cma_early_percent_memory ( void )
2011-12-29 16:09:51 +04:00
{
return 0 ;
}
# endif
2023-07-12 10:47:58 +03:00
# ifdef CONFIG_DMA_NUMA_CMA
static void __init dma_numa_cma_reserve ( void )
2020-08-24 02:03:07 +03:00
{
int nid ;
2023-07-12 10:47:58 +03:00
for_each_node ( nid ) {
2020-08-24 02:03:07 +03:00
int ret ;
2020-08-24 02:03:09 +03:00
char name [ CMA_MAX_NAME ] ;
2023-07-12 10:47:58 +03:00
struct cma * * cma ;
if ( ! node_online ( nid ) ) {
if ( pernuma_size_bytes | | numa_cma_size [ nid ] )
pr_warn ( " invalid node %d specified \n " , nid ) ;
2020-08-24 02:03:07 +03:00
continue ;
}
2023-07-12 10:47:58 +03:00
if ( pernuma_size_bytes ) {
cma = & dma_contiguous_pernuma_area [ nid ] ;
snprintf ( name , sizeof ( name ) , " pernuma%d " , nid ) ;
ret = cma_declare_contiguous_nid ( 0 , pernuma_size_bytes , 0 , 0 ,
0 , false , name , cma , nid ) ;
if ( ret )
pr_warn ( " %s: reservation failed: err %d, node %d " , __func__ ,
ret , nid ) ;
}
if ( numa_cma_size [ nid ] ) {
cma = & dma_contiguous_numa_area [ nid ] ;
snprintf ( name , sizeof ( name ) , " numa%d " , nid ) ;
ret = cma_declare_contiguous_nid ( 0 , numa_cma_size [ nid ] , 0 , 0 , 0 , false ,
name , cma , nid ) ;
if ( ret )
pr_warn ( " %s: reservation failed: err %d, node %d " , __func__ ,
ret , nid ) ;
}
2020-08-24 02:03:07 +03:00
}
}
2023-05-12 12:42:10 +03:00
# else
2023-07-12 10:47:58 +03:00
static inline void __init dma_numa_cma_reserve ( void )
2023-05-12 12:42:10 +03:00
{
}
2020-08-24 02:03:07 +03:00
# endif
2011-12-29 16:09:51 +04:00
/**
2013-07-29 16:31:45 +04:00
* dma_contiguous_reserve ( ) - reserve area ( s ) for contiguous memory handling
2011-12-29 16:09:51 +04:00
* @ limit : End address of the reserved memory ( optional , 0 for any ) .
*
* This function reserves memory from early allocator . It should be
* called by arch specific code once the early allocator ( memblock or bootmem )
* has been activated and all other subsystems have already allocated / reserved
* memory .
*/
void __init dma_contiguous_reserve ( phys_addr_t limit )
{
2012-12-05 18:29:25 +04:00
phys_addr_t selected_size = 0 ;
2014-06-05 03:06:54 +04:00
phys_addr_t selected_base = 0 ;
phys_addr_t selected_limit = limit ;
bool fixed = false ;
2011-12-29 16:09:51 +04:00
2023-07-12 10:47:58 +03:00
dma_numa_cma_reserve ( ) ;
2023-05-12 12:42:10 +03:00
2011-12-29 16:09:51 +04:00
pr_debug ( " %s(limit %08lx) \n " , __func__ , ( unsigned long ) limit ) ;
if ( size_cmdline ! = - 1 ) {
selected_size = size_cmdline ;
2014-06-05 03:06:54 +04:00
selected_base = base_cmdline ;
selected_limit = min_not_zero ( limit_cmdline , limit ) ;
if ( base_cmdline + size_cmdline = = limit_cmdline )
fixed = true ;
2011-12-29 16:09:51 +04:00
} else {
# ifdef CONFIG_CMA_SIZE_SEL_MBYTES
selected_size = size_bytes ;
# elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
selected_size = cma_early_percent_memory ( ) ;
# elif defined(CONFIG_CMA_SIZE_SEL_MIN)
selected_size = min ( size_bytes , cma_early_percent_memory ( ) ) ;
# elif defined(CONFIG_CMA_SIZE_SEL_MAX)
selected_size = max ( size_bytes , cma_early_percent_memory ( ) ) ;
# endif
}
2013-07-29 16:31:45 +04:00
if ( selected_size & & ! dma_contiguous_default_area ) {
2011-12-29 16:09:51 +04:00
pr_debug ( " %s: reserving %ld MiB for global area \n " , __func__ ,
2012-12-05 18:29:25 +04:00
( unsigned long ) selected_size / SZ_1M ) ;
2011-12-29 16:09:51 +04:00
2014-06-05 03:06:54 +04:00
dma_contiguous_reserve_area ( selected_size , selected_base ,
selected_limit ,
& dma_contiguous_default_area ,
fixed ) ;
2011-12-29 16:09:51 +04:00
}
2014-06-05 03:06:54 +04:00
}
2011-12-29 16:09:51 +04:00
2020-09-11 12:04:43 +03:00
void __weak
dma_contiguous_early_fixup ( phys_addr_t base , unsigned long size )
{
}
2014-08-07 03:05:19 +04:00
/**
* dma_contiguous_reserve_area ( ) - reserve custom contiguous area
* @ size : Size of the reserved area ( in bytes ) ,
* @ base : Base address of the reserved area optional , use 0 for any
* @ limit : End address of the reserved memory ( optional , 0 for any ) .
* @ res_cma : Pointer to store the created cma region .
* @ fixed : hint about where to place the reserved area
*
* This function reserves memory from early allocator . It should be
* called by arch specific code once the early allocator ( memblock or bootmem )
* has been activated and all other subsystems have already allocated / reserved
* memory . This function allows to create custom reserved areas for specific
* devices .
*
* If @ fixed is true , reserve contiguous area at exactly @ base . If false ,
* reserve in range from @ base to @ limit .
*/
int __init dma_contiguous_reserve_area ( phys_addr_t size , phys_addr_t base ,
phys_addr_t limit , struct cma * * res_cma ,
bool fixed )
{
int ret ;
2017-04-18 21:27:03 +03:00
ret = cma_declare_contiguous ( base , size , limit , 0 , 0 , fixed ,
" reserved " , res_cma ) ;
2014-08-07 03:05:19 +04:00
if ( ret )
return ret ;
/* Architecture specific contiguous memory fixup. */
2014-08-07 03:05:25 +04:00
dma_contiguous_early_fixup ( cma_get_base ( * res_cma ) ,
cma_get_size ( * res_cma ) ) ;
2014-08-07 03:05:19 +04:00
return 0 ;
}
2011-12-29 16:09:51 +04:00
/**
2014-08-07 03:05:19 +04:00
* dma_alloc_from_contiguous ( ) - allocate pages from contiguous area
* @ dev : Pointer to device for which the allocation is performed .
* @ count : Requested number of pages .
* @ align : Requested alignment of pages ( in PAGE_SIZE order ) .
2018-08-18 01:49:00 +03:00
* @ no_warn : Avoid printing message about failed allocation .
2011-12-29 16:09:51 +04:00
*
2014-08-07 03:05:19 +04:00
* This function allocates memory buffer for specified device . It uses
* device specific contiguous memory area if available or the default
* global one . Requires architecture specific dev_get_cma_area ( ) helper
* function .
2011-12-29 16:09:51 +04:00
*/
2015-10-22 23:32:11 +03:00
struct page * dma_alloc_from_contiguous ( struct device * dev , size_t count ,
2018-08-18 01:49:00 +03:00
unsigned int align , bool no_warn )
2011-12-29 16:09:51 +04:00
{
2014-08-07 03:05:19 +04:00
if ( align > CONFIG_CMA_ALIGNMENT )
align = CONFIG_CMA_ALIGNMENT ;
2018-08-18 01:49:00 +03:00
return cma_alloc ( dev_get_cma_area ( dev ) , count , align , no_warn ) ;
2011-12-29 16:09:51 +04:00
}
2014-08-07 03:05:19 +04:00
/**
* dma_release_from_contiguous ( ) - release allocated pages
* @ dev : Pointer to device for which the pages were allocated .
* @ pages : Allocated pages .
* @ count : Number of allocated pages .
*
* This function releases memory allocated by dma_alloc_from_contiguous ( ) .
* It returns false when provided pages do not belong to contiguous area and
* true otherwise .
*/
bool dma_release_from_contiguous ( struct device * dev , struct page * pages ,
int count )
{
2014-08-07 03:05:25 +04:00
return cma_release ( dev_get_cma_area ( dev ) , pages , count ) ;
2014-08-07 03:05:19 +04:00
}
2014-10-14 02:51:09 +04:00
2020-07-22 17:33:43 +03:00
static struct page * cma_alloc_aligned ( struct cma * cma , size_t size , gfp_t gfp )
{
unsigned int align = min ( get_order ( size ) , CONFIG_CMA_ALIGNMENT ) ;
return cma_alloc ( cma , size > > PAGE_SHIFT , align , gfp & __GFP_NOWARN ) ;
}
dma-contiguous: add dma_{alloc,free}_contiguous() helpers
Both dma_alloc_from_contiguous() and dma_release_from_contiguous() are
very simply implemented, but requiring callers to pass certain
parameters like count and align, and taking a boolean parameter to check
__GFP_NOWARN in the allocation flags. So every function call duplicates
similar work:
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
page = dma_alloc_from_contiguous(dev, count, order,
gfp & __GFP_NOWARN);
[...]
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
Additionally, as CMA can be used only in the context which permits
sleeping, most of callers do a gfpflags_allow_blocking() check and a
corresponding fallback allocation of normal pages upon any false result:
if (gfpflags_allow_blocking(flag))
page = dma_alloc_from_contiguous();
if (!page)
page = alloc_pages();
[...]
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
So this patch simplifies those function calls by abstracting these
operations into the two new functions: dma_{alloc,free}_contiguous.
As some callers of dma_{alloc,release}_from_contiguous() might be
complicated, this patch just implements these two new functions to
kernel/dma/direct.c only as an initial step.
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
Tested-by: dann frazier <dann.frazier@canonical.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-05-24 07:06:32 +03:00
/**
* dma_alloc_contiguous ( ) - allocate contiguous pages
* @ dev : Pointer to device for which the allocation is performed .
* @ size : Requested allocation size .
* @ gfp : Allocation flags .
*
2020-08-24 02:03:07 +03:00
* tries to use device specific contiguous memory area if available , or it
* tries to use per - numa cma , if the allocation fails , it will fallback to
* try default global one .
2019-05-24 07:06:33 +03:00
*
2020-08-24 02:03:07 +03:00
* Note that it bypass one - page size of allocations from the per - numa and
* global area as the addresses within one page are always contiguous , so
* there is no need to waste CMA pages for that kind ; it also helps reduce
* fragmentations .
dma-contiguous: add dma_{alloc,free}_contiguous() helpers
Both dma_alloc_from_contiguous() and dma_release_from_contiguous() are
very simply implemented, but requiring callers to pass certain
parameters like count and align, and taking a boolean parameter to check
__GFP_NOWARN in the allocation flags. So every function call duplicates
similar work:
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
page = dma_alloc_from_contiguous(dev, count, order,
gfp & __GFP_NOWARN);
[...]
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
Additionally, as CMA can be used only in the context which permits
sleeping, most of callers do a gfpflags_allow_blocking() check and a
corresponding fallback allocation of normal pages upon any false result:
if (gfpflags_allow_blocking(flag))
page = dma_alloc_from_contiguous();
if (!page)
page = alloc_pages();
[...]
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
So this patch simplifies those function calls by abstracting these
operations into the two new functions: dma_{alloc,free}_contiguous.
As some callers of dma_{alloc,release}_from_contiguous() might be
complicated, this patch just implements these two new functions to
kernel/dma/direct.c only as an initial step.
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
Tested-by: dann frazier <dann.frazier@canonical.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-05-24 07:06:32 +03:00
*/
struct page * dma_alloc_contiguous ( struct device * dev , size_t size , gfp_t gfp )
{
2023-07-12 10:47:58 +03:00
# ifdef CONFIG_DMA_NUMA_CMA
2020-08-24 02:03:07 +03:00
int nid = dev_to_node ( dev ) ;
# endif
dma-contiguous: add dma_{alloc,free}_contiguous() helpers
Both dma_alloc_from_contiguous() and dma_release_from_contiguous() are
very simply implemented, but requiring callers to pass certain
parameters like count and align, and taking a boolean parameter to check
__GFP_NOWARN in the allocation flags. So every function call duplicates
similar work:
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
page = dma_alloc_from_contiguous(dev, count, order,
gfp & __GFP_NOWARN);
[...]
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
Additionally, as CMA can be used only in the context which permits
sleeping, most of callers do a gfpflags_allow_blocking() check and a
corresponding fallback allocation of normal pages upon any false result:
if (gfpflags_allow_blocking(flag))
page = dma_alloc_from_contiguous();
if (!page)
page = alloc_pages();
[...]
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
So this patch simplifies those function calls by abstracting these
operations into the two new functions: dma_{alloc,free}_contiguous.
As some callers of dma_{alloc,release}_from_contiguous() might be
complicated, this patch just implements these two new functions to
kernel/dma/direct.c only as an initial step.
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
Tested-by: dann frazier <dann.frazier@canonical.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-05-24 07:06:32 +03:00
/* CMA can be used only in the context which permits sleeping */
2020-07-22 17:33:43 +03:00
if ( ! gfpflags_allow_blocking ( gfp ) )
return NULL ;
if ( dev - > cma_area )
return cma_alloc_aligned ( dev - > cma_area , size , gfp ) ;
2020-08-24 02:03:07 +03:00
if ( size < = PAGE_SIZE )
2020-07-22 17:33:43 +03:00
return NULL ;
2020-08-24 02:03:07 +03:00
2023-07-12 10:47:58 +03:00
# ifdef CONFIG_DMA_NUMA_CMA
2020-08-24 02:03:07 +03:00
if ( nid ! = NUMA_NO_NODE & & ! ( gfp & ( GFP_DMA | GFP_DMA32 ) ) ) {
struct cma * cma = dma_contiguous_pernuma_area [ nid ] ;
struct page * page ;
if ( cma ) {
page = cma_alloc_aligned ( cma , size , gfp ) ;
if ( page )
return page ;
}
2023-07-12 10:47:58 +03:00
cma = dma_contiguous_numa_area [ nid ] ;
if ( cma ) {
page = cma_alloc_aligned ( cma , size , gfp ) ;
if ( page )
return page ;
}
2020-08-24 02:03:07 +03:00
}
# endif
if ( ! dma_contiguous_default_area )
2020-07-22 17:33:43 +03:00
return NULL ;
2020-08-24 02:03:07 +03:00
2020-07-22 17:33:43 +03:00
return cma_alloc_aligned ( dma_contiguous_default_area , size , gfp ) ;
dma-contiguous: add dma_{alloc,free}_contiguous() helpers
Both dma_alloc_from_contiguous() and dma_release_from_contiguous() are
very simply implemented, but requiring callers to pass certain
parameters like count and align, and taking a boolean parameter to check
__GFP_NOWARN in the allocation flags. So every function call duplicates
similar work:
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
page = dma_alloc_from_contiguous(dev, count, order,
gfp & __GFP_NOWARN);
[...]
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
Additionally, as CMA can be used only in the context which permits
sleeping, most of callers do a gfpflags_allow_blocking() check and a
corresponding fallback allocation of normal pages upon any false result:
if (gfpflags_allow_blocking(flag))
page = dma_alloc_from_contiguous();
if (!page)
page = alloc_pages();
[...]
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
So this patch simplifies those function calls by abstracting these
operations into the two new functions: dma_{alloc,free}_contiguous.
As some callers of dma_{alloc,release}_from_contiguous() might be
complicated, this patch just implements these two new functions to
kernel/dma/direct.c only as an initial step.
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
Tested-by: dann frazier <dann.frazier@canonical.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-05-24 07:06:32 +03:00
}
/**
* dma_free_contiguous ( ) - release allocated pages
* @ dev : Pointer to device for which the pages were allocated .
* @ page : Pointer to the allocated pages .
* @ size : Size of allocated pages .
*
* This function releases memory allocated by dma_alloc_contiguous ( ) . As the
* cma_release returns false when provided pages do not belong to contiguous
* area and true otherwise , this function then does a fallback __free_pages ( )
* upon a false - return .
*/
void dma_free_contiguous ( struct device * dev , struct page * page , size_t size )
{
2020-08-24 02:03:07 +03:00
unsigned int count = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
/* if dev has its own cma, free page from there */
if ( dev - > cma_area ) {
if ( cma_release ( dev - > cma_area , page , count ) )
return ;
} else {
/*
* otherwise , page is from either per - numa cma or default cma
*/
2023-07-12 10:47:58 +03:00
# ifdef CONFIG_DMA_NUMA_CMA
2020-08-24 02:03:07 +03:00
if ( cma_release ( dma_contiguous_pernuma_area [ page_to_nid ( page ) ] ,
2023-07-12 10:47:58 +03:00
page , count ) )
return ;
if ( cma_release ( dma_contiguous_numa_area [ page_to_nid ( page ) ] ,
2020-08-24 02:03:07 +03:00
page , count ) )
return ;
# endif
if ( cma_release ( dma_contiguous_default_area , page , count ) )
return ;
}
/* not in any cma, free from buddy */
__free_pages ( page , get_order ( size ) ) ;
dma-contiguous: add dma_{alloc,free}_contiguous() helpers
Both dma_alloc_from_contiguous() and dma_release_from_contiguous() are
very simply implemented, but requiring callers to pass certain
parameters like count and align, and taking a boolean parameter to check
__GFP_NOWARN in the allocation flags. So every function call duplicates
similar work:
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
page = dma_alloc_from_contiguous(dev, count, order,
gfp & __GFP_NOWARN);
[...]
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
Additionally, as CMA can be used only in the context which permits
sleeping, most of callers do a gfpflags_allow_blocking() check and a
corresponding fallback allocation of normal pages upon any false result:
if (gfpflags_allow_blocking(flag))
page = dma_alloc_from_contiguous();
if (!page)
page = alloc_pages();
[...]
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
So this patch simplifies those function calls by abstracting these
operations into the two new functions: dma_{alloc,free}_contiguous.
As some callers of dma_{alloc,release}_from_contiguous() might be
complicated, this patch just implements these two new functions to
kernel/dma/direct.c only as an initial step.
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
Tested-by: dann frazier <dann.frazier@canonical.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-05-24 07:06:32 +03:00
}
2014-10-14 02:51:09 +04:00
/*
* Support for reserved memory regions defined in device tree
*/
# ifdef CONFIG_OF_RESERVED_MEM
# include <linux/of.h>
# include <linux/of_fdt.h>
# include <linux/of_reserved_mem.h>
# undef pr_fmt
# define pr_fmt(fmt) fmt
2014-10-30 00:50:29 +03:00
static int rmem_cma_device_init ( struct reserved_mem * rmem , struct device * dev )
2014-10-14 02:51:09 +04:00
{
2020-09-11 11:56:03 +03:00
dev - > cma_area = rmem - > priv ;
2014-10-30 00:50:29 +03:00
return 0 ;
2014-10-14 02:51:09 +04:00
}
static void rmem_cma_device_release ( struct reserved_mem * rmem ,
struct device * dev )
{
2020-09-11 11:56:03 +03:00
dev - > cma_area = NULL ;
2014-10-14 02:51:09 +04:00
}
static const struct reserved_mem_ops rmem_cma_ops = {
. device_init = rmem_cma_device_init ,
. device_release = rmem_cma_device_release ,
} ;
static int __init rmem_cma_setup ( struct reserved_mem * rmem )
{
unsigned long node = rmem - > fdt_node ;
2020-01-10 20:19:33 +03:00
bool default_cma = of_get_flat_dt_prop ( node , " linux,cma-default " , NULL ) ;
2014-10-14 02:51:09 +04:00
struct cma * cma ;
int err ;
2020-01-10 20:19:33 +03:00
if ( size_cmdline ! = - 1 & & default_cma ) {
pr_info ( " Reserved memory: bypass %s node, using cmdline CMA params instead \n " ,
rmem - > name ) ;
return - EBUSY ;
}
2014-10-14 02:51:09 +04:00
if ( ! of_get_flat_dt_prop ( node , " reusable " , NULL ) | |
of_get_flat_dt_prop ( node , " no-map " , NULL ) )
return - EINVAL ;
2022-03-23 00:43:17 +03:00
if ( ! IS_ALIGNED ( rmem - > base | rmem - > size , CMA_MIN_ALIGNMENT_BYTES ) ) {
2014-10-14 02:51:09 +04:00
pr_err ( " Reserved memory: incorrect alignment of CMA region \n " ) ;
return - EINVAL ;
}
2017-04-18 21:27:03 +03:00
err = cma_init_reserved_mem ( rmem - > base , rmem - > size , 0 , rmem - > name , & cma ) ;
2014-10-14 02:51:09 +04:00
if ( err ) {
pr_err ( " Reserved memory: unable to setup CMA region \n " ) ;
return err ;
}
/* Architecture specific contiguous memory fixup. */
dma_contiguous_early_fixup ( rmem - > base , rmem - > size ) ;
2020-01-10 20:19:33 +03:00
if ( default_cma )
2020-09-11 11:56:40 +03:00
dma_contiguous_default_area = cma ;
2014-10-14 02:51:09 +04:00
rmem - > ops = & rmem_cma_ops ;
rmem - > priv = cma ;
pr_info ( " Reserved memory: created CMA memory pool at %pa, size %ld MiB \n " ,
& rmem - > base , ( unsigned long ) rmem - > size / SZ_1M ) ;
return 0 ;
}
RESERVEDMEM_OF_DECLARE ( cma , " shared-dma-pool " , rmem_cma_setup ) ;
# endif