2020-04-14 17:04:52 -07:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( C ) 2012 ARM Ltd .
* Copyright ( C ) 2020 Google LLC
*/
2020-04-14 17:04:59 -07:00
# include <linux/debugfs.h>
2020-04-14 17:04:52 -07:00
# include <linux/dma-direct.h>
# include <linux/dma-noncoherent.h>
# include <linux/init.h>
# include <linux/genalloc.h>
2020-04-14 17:04:58 -07:00
# include <linux/set_memory.h>
2020-04-14 17:04:52 -07:00
# include <linux/slab.h>
2020-04-20 12:09:58 +02:00
# include <linux/workqueue.h>
2020-04-14 17:04:52 -07:00
2020-04-14 17:04:55 -07:00
static struct gen_pool * atomic_pool_dma __ro_after_init ;
2020-04-14 17:04:59 -07:00
static unsigned long pool_size_dma ;
2020-04-14 17:04:55 -07:00
static struct gen_pool * atomic_pool_dma32 __ro_after_init ;
2020-04-14 17:04:59 -07:00
static unsigned long pool_size_dma32 ;
2020-04-14 17:04:55 -07:00
static struct gen_pool * atomic_pool_kernel __ro_after_init ;
2020-04-14 17:04:59 -07:00
static unsigned long pool_size_kernel ;
2020-04-14 17:04:52 -07:00
2020-04-14 17:05:02 -07:00
/* Size can be defined by the coherent_pool command line */
static size_t atomic_pool_size ;
2020-04-20 12:09:58 +02:00
/* Dynamic background expansion when the atomic pool is near capacity */
static struct work_struct atomic_pool_work ;
2020-04-14 17:04:52 -07:00
static int __init early_coherent_pool ( char * p )
{
atomic_pool_size = memparse ( p , & p ) ;
return 0 ;
}
early_param ( " coherent_pool " , early_coherent_pool ) ;
2020-04-14 17:04:59 -07:00
static void __init dma_atomic_pool_debugfs_init ( void )
{
struct dentry * root ;
root = debugfs_create_dir ( " dma_pools " , NULL ) ;
if ( IS_ERR_OR_NULL ( root ) )
return ;
debugfs_create_ulong ( " pool_size_dma " , 0400 , root , & pool_size_dma ) ;
debugfs_create_ulong ( " pool_size_dma32 " , 0400 , root , & pool_size_dma32 ) ;
debugfs_create_ulong ( " pool_size_kernel " , 0400 , root , & pool_size_kernel ) ;
}
static void dma_atomic_pool_size_add ( gfp_t gfp , size_t size )
{
if ( gfp & __GFP_DMA )
pool_size_dma + = size ;
else if ( gfp & __GFP_DMA32 )
pool_size_dma32 + = size ;
else
pool_size_kernel + = size ;
}
2020-04-20 12:09:58 +02:00
static int atomic_pool_expand ( struct gen_pool * pool , size_t pool_size ,
gfp_t gfp )
2020-04-14 17:04:52 -07:00
{
2020-04-20 12:09:58 +02:00
unsigned int order ;
2020-04-14 17:04:52 -07:00
struct page * page ;
void * addr ;
2020-04-20 12:09:58 +02:00
int ret = - ENOMEM ;
/* Cannot allocate larger than MAX_ORDER-1 */
order = min ( get_order ( pool_size ) , MAX_ORDER - 1 ) ;
do {
pool_size = 1 < < ( PAGE_SHIFT + order ) ;
2020-07-08 18:49:39 +02:00
page = alloc_pages ( gfp , order ) ;
2020-04-20 12:09:58 +02:00
} while ( ! page & & order - - > 0 ) ;
2020-04-14 17:04:52 -07:00
if ( ! page )
goto out ;
2020-04-14 17:04:55 -07:00
arch_dma_prep_coherent ( page , pool_size ) ;
2020-04-14 17:04:52 -07:00
2020-04-14 17:04:58 -07:00
# ifdef CONFIG_DMA_DIRECT_REMAP
2020-04-14 17:04:55 -07:00
addr = dma_common_contiguous_remap ( page , pool_size ,
2020-04-14 17:04:52 -07:00
pgprot_dmacoherent ( PAGE_KERNEL ) ,
__builtin_return_address ( 0 ) ) ;
if ( ! addr )
2020-04-20 12:09:58 +02:00
goto free_page ;
2020-04-14 17:04:58 -07:00
# else
addr = page_to_virt ( page ) ;
# endif
/*
* Memory in the atomic DMA pools must be unencrypted , the pools do not
* shrink so no re - encryption occurs in dma_direct_free_pages ( ) .
*/
ret = set_memory_decrypted ( ( unsigned long ) page_to_virt ( page ) ,
1 < < order ) ;
if ( ret )
goto remove_mapping ;
2020-04-20 12:09:58 +02:00
ret = gen_pool_add_virt ( pool , ( unsigned long ) addr , page_to_phys ( page ) ,
pool_size , NUMA_NO_NODE ) ;
2020-04-14 17:04:52 -07:00
if ( ret )
2020-04-14 17:04:58 -07:00
goto encrypt_mapping ;
2020-04-14 17:04:52 -07:00
2020-04-14 17:04:59 -07:00
dma_atomic_pool_size_add ( gfp , pool_size ) ;
2020-04-14 17:04:52 -07:00
return 0 ;
2020-04-14 17:04:58 -07:00
encrypt_mapping :
ret = set_memory_encrypted ( ( unsigned long ) page_to_virt ( page ) ,
1 < < order ) ;
if ( WARN_ON_ONCE ( ret ) ) {
/* Decrypt succeeded but encrypt failed, purposely leak */
goto out ;
}
2020-04-14 17:04:52 -07:00
remove_mapping :
2020-04-14 17:04:58 -07:00
# ifdef CONFIG_DMA_DIRECT_REMAP
2020-04-14 17:04:55 -07:00
dma_common_free_remap ( addr , pool_size ) ;
2020-04-14 17:04:58 -07:00
# endif
free_page : __maybe_unused
2020-07-08 18:49:39 +02:00
__free_pages ( page , order ) ;
2020-04-14 17:04:52 -07:00
out :
2020-04-20 12:09:58 +02:00
return ret ;
}
static void atomic_pool_resize ( struct gen_pool * pool , gfp_t gfp )
{
if ( pool & & gen_pool_avail ( pool ) < atomic_pool_size )
atomic_pool_expand ( pool , gen_pool_size ( pool ) , gfp ) ;
}
static void atomic_pool_work_fn ( struct work_struct * work )
{
if ( IS_ENABLED ( CONFIG_ZONE_DMA ) )
atomic_pool_resize ( atomic_pool_dma ,
GFP_KERNEL | GFP_DMA ) ;
if ( IS_ENABLED ( CONFIG_ZONE_DMA32 ) )
atomic_pool_resize ( atomic_pool_dma32 ,
GFP_KERNEL | GFP_DMA32 ) ;
atomic_pool_resize ( atomic_pool_kernel , GFP_KERNEL ) ;
}
static __init struct gen_pool * __dma_atomic_pool_init ( size_t pool_size ,
gfp_t gfp )
{
struct gen_pool * pool ;
int ret ;
pool = gen_pool_create ( PAGE_SHIFT , NUMA_NO_NODE ) ;
if ( ! pool )
return NULL ;
gen_pool_set_algo ( pool , gen_pool_first_fit_order_align , NULL ) ;
ret = atomic_pool_expand ( pool , pool_size , gfp ) ;
if ( ret ) {
gen_pool_destroy ( pool ) ;
pr_err ( " DMA: failed to allocate %zu KiB %pGg pool for atomic allocation \n " ,
pool_size > > 10 , & gfp ) ;
return NULL ;
}
pr_info ( " DMA: preallocated %zu KiB %pGg pool for atomic allocations \n " ,
gen_pool_size ( pool ) > > 10 , & gfp ) ;
return pool ;
2020-04-14 17:04:52 -07:00
}
2020-04-14 17:04:55 -07:00
static int __init dma_atomic_pool_init ( void )
{
int ret = 0 ;
2020-04-14 17:05:02 -07:00
/*
* If coherent_pool was not used on the command line , default the pool
* sizes to 128 KB per 1 GB of memory , min 128 KB , max MAX_ORDER - 1.
*/
if ( ! atomic_pool_size ) {
2020-06-08 15:22:17 +02:00
unsigned long pages = totalram_pages ( ) / ( SZ_1G / SZ_128K ) ;
pages = min_t ( unsigned long , pages , MAX_ORDER_NR_PAGES ) ;
atomic_pool_size = max_t ( size_t , pages < < PAGE_SHIFT , SZ_128K ) ;
2020-04-14 17:05:02 -07:00
}
2020-04-20 12:09:58 +02:00
INIT_WORK ( & atomic_pool_work , atomic_pool_work_fn ) ;
atomic_pool_kernel = __dma_atomic_pool_init ( atomic_pool_size ,
GFP_KERNEL ) ;
if ( ! atomic_pool_kernel )
ret = - ENOMEM ;
2020-04-14 17:04:55 -07:00
if ( IS_ENABLED ( CONFIG_ZONE_DMA ) ) {
2020-04-20 12:09:58 +02:00
atomic_pool_dma = __dma_atomic_pool_init ( atomic_pool_size ,
GFP_KERNEL | GFP_DMA ) ;
if ( ! atomic_pool_dma )
ret = - ENOMEM ;
2020-04-14 17:04:55 -07:00
}
if ( IS_ENABLED ( CONFIG_ZONE_DMA32 ) ) {
2020-04-20 12:09:58 +02:00
atomic_pool_dma32 = __dma_atomic_pool_init ( atomic_pool_size ,
GFP_KERNEL | GFP_DMA32 ) ;
if ( ! atomic_pool_dma32 )
ret = - ENOMEM ;
2020-04-14 17:04:55 -07:00
}
2020-04-14 17:04:59 -07:00
dma_atomic_pool_debugfs_init ( ) ;
2020-04-14 17:04:55 -07:00
return ret ;
}
2020-04-14 17:04:52 -07:00
postcore_initcall ( dma_atomic_pool_init ) ;
2020-07-14 14:39:27 +02:00
static inline struct gen_pool * dma_guess_pool_from_device ( struct device * dev )
2020-04-14 17:04:52 -07:00
{
2020-04-14 17:04:55 -07:00
u64 phys_mask ;
gfp_t gfp ;
gfp = dma_direct_optimal_gfp_mask ( dev , dev - > coherent_dma_mask ,
& phys_mask ) ;
if ( IS_ENABLED ( CONFIG_ZONE_DMA ) & & gfp = = GFP_DMA )
return atomic_pool_dma ;
if ( IS_ENABLED ( CONFIG_ZONE_DMA32 ) & & gfp = = GFP_DMA32 )
return atomic_pool_dma32 ;
return atomic_pool_kernel ;
}
2020-04-14 17:04:52 -07:00
2020-07-14 14:39:27 +02:00
static inline struct gen_pool * dma_get_safer_pool ( struct gen_pool * bad_pool )
{
if ( bad_pool = = atomic_pool_kernel )
return atomic_pool_dma32 ? : atomic_pool_dma ;
if ( bad_pool = = atomic_pool_dma32 )
return atomic_pool_dma ;
return NULL ;
}
static inline struct gen_pool * dma_guess_pool ( struct device * dev ,
struct gen_pool * bad_pool )
{
if ( bad_pool )
return dma_get_safer_pool ( bad_pool ) ;
return dma_guess_pool_from_device ( dev ) ;
}
2020-04-14 17:04:55 -07:00
void * dma_alloc_from_pool ( struct device * dev , size_t size ,
struct page * * ret_page , gfp_t flags )
2020-04-14 17:04:52 -07:00
{
2020-07-14 14:39:28 +02:00
struct gen_pool * pool = NULL ;
unsigned long val = 0 ;
2020-04-14 17:04:52 -07:00
void * ptr = NULL ;
2020-07-14 14:39:28 +02:00
phys_addr_t phys ;
while ( 1 ) {
pool = dma_guess_pool ( dev , pool ) ;
if ( ! pool ) {
WARN ( 1 , " Failed to get suitable pool for %s \n " ,
dev_name ( dev ) ) ;
break ;
}
val = gen_pool_alloc ( pool , size ) ;
if ( ! val )
continue ;
phys = gen_pool_virt_to_phys ( pool , val ) ;
if ( dma_coherent_ok ( dev , phys , size ) )
break ;
gen_pool_free ( pool , val , size ) ;
val = 0 ;
2020-04-14 17:04:52 -07:00
}
2020-07-14 14:39:28 +02:00
if ( val ) {
2020-04-14 17:04:52 -07:00
* ret_page = pfn_to_page ( __phys_to_pfn ( phys ) ) ;
ptr = ( void * ) val ;
memset ( ptr , 0 , size ) ;
2020-07-14 14:39:28 +02:00
if ( gen_pool_avail ( pool ) < atomic_pool_size )
schedule_work ( & atomic_pool_work ) ;
2020-04-14 17:04:52 -07:00
}
return ptr ;
}
2020-04-14 17:04:55 -07:00
bool dma_free_from_pool ( struct device * dev , void * start , size_t size )
2020-04-14 17:04:52 -07:00
{
2020-07-14 14:39:28 +02:00
struct gen_pool * pool = NULL ;
2020-04-14 17:04:55 -07:00
2020-07-14 14:39:28 +02:00
while ( 1 ) {
pool = dma_guess_pool ( dev , pool ) ;
if ( ! pool )
return false ;
if ( gen_pool_has_addr ( pool , ( unsigned long ) start , size ) ) {
gen_pool_free ( pool , ( unsigned long ) start , size ) ;
return true ;
}
}
2020-04-14 17:04:52 -07:00
}