2018-08-24 11:31:08 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
2018-11-04 22:29:28 +03:00
* Copyright ( C ) 2012 ARM Ltd .
2018-08-24 11:31:08 +03:00
* Copyright ( c ) 2014 The Linux Foundation
*/
2018-11-04 22:29:28 +03:00
# include <linux/dma-direct.h>
# include <linux/dma-noncoherent.h>
# include <linux/dma-contiguous.h>
# include <linux/init.h>
# include <linux/genalloc.h>
2018-08-24 11:31:08 +03:00
# include <linux/slab.h>
# include <linux/vmalloc.h>
static struct vm_struct * __dma_common_pages_remap ( struct page * * pages ,
size_t size , unsigned long vm_flags , pgprot_t prot ,
const void * caller )
{
struct vm_struct * area ;
area = get_vm_area_caller ( size , vm_flags , caller ) ;
if ( ! area )
return NULL ;
if ( map_vm_area ( area , prot , pages ) ) {
vunmap ( area - > addr ) ;
return NULL ;
}
return area ;
}
/*
* Remaps an array of PAGE_SIZE pages into another vm_area .
* Cannot be used in non - sleeping contexts
*/
void * dma_common_pages_remap ( struct page * * pages , size_t size ,
unsigned long vm_flags , pgprot_t prot ,
const void * caller )
{
struct vm_struct * area ;
area = __dma_common_pages_remap ( pages , size , vm_flags , prot , caller ) ;
if ( ! area )
return NULL ;
area - > pages = pages ;
return area - > addr ;
}
/*
* Remaps an allocated contiguous region into another vm_area .
* Cannot be used in non - sleeping contexts
*/
void * dma_common_contiguous_remap ( struct page * page , size_t size ,
unsigned long vm_flags ,
pgprot_t prot , const void * caller )
{
int i ;
struct page * * pages ;
struct vm_struct * area ;
pages = kmalloc ( sizeof ( struct page * ) < < get_order ( size ) , GFP_KERNEL ) ;
if ( ! pages )
return NULL ;
for ( i = 0 ; i < ( size > > PAGE_SHIFT ) ; i + + )
pages [ i ] = nth_page ( page , i ) ;
area = __dma_common_pages_remap ( pages , size , vm_flags , prot , caller ) ;
kfree ( pages ) ;
if ( ! area )
return NULL ;
return area - > addr ;
}
/*
* Unmaps a range previously mapped by dma_common_ * _remap
*/
void dma_common_free_remap ( void * cpu_addr , size_t size , unsigned long vm_flags )
{
struct vm_struct * area = find_vm_area ( cpu_addr ) ;
if ( ! area | | ( area - > flags & vm_flags ) ! = vm_flags ) {
WARN ( 1 , " trying to free invalid coherent area: %p \n " , cpu_addr ) ;
return ;
}
unmap_kernel_range ( ( unsigned long ) cpu_addr , PAGE_ALIGN ( size ) ) ;
vunmap ( cpu_addr ) ;
}
2018-11-04 22:29:28 +03:00
# ifdef CONFIG_DMA_DIRECT_REMAP
static struct gen_pool * atomic_pool __ro_after_init ;
# define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE ;
static int __init early_coherent_pool ( char * p )
{
atomic_pool_size = memparse ( p , & p ) ;
return 0 ;
}
early_param ( " coherent_pool " , early_coherent_pool ) ;
int __init dma_atomic_pool_init ( gfp_t gfp , pgprot_t prot )
{
unsigned int pool_size_order = get_order ( atomic_pool_size ) ;
unsigned long nr_pages = atomic_pool_size > > PAGE_SHIFT ;
struct page * page ;
void * addr ;
int ret ;
if ( dev_get_cma_area ( NULL ) )
page = dma_alloc_from_contiguous ( NULL , nr_pages ,
pool_size_order , false ) ;
else
page = alloc_pages ( gfp , pool_size_order ) ;
if ( ! page )
goto out ;
arch_dma_prep_coherent ( page , atomic_pool_size ) ;
atomic_pool = gen_pool_create ( PAGE_SHIFT , - 1 ) ;
if ( ! atomic_pool )
goto free_page ;
addr = dma_common_contiguous_remap ( page , atomic_pool_size , VM_USERMAP ,
prot , __builtin_return_address ( 0 ) ) ;
if ( ! addr )
goto destroy_genpool ;
ret = gen_pool_add_virt ( atomic_pool , ( unsigned long ) addr ,
page_to_phys ( page ) , atomic_pool_size , - 1 ) ;
if ( ret )
goto remove_mapping ;
gen_pool_set_algo ( atomic_pool , gen_pool_first_fit_order_align , NULL ) ;
pr_info ( " DMA: preallocated %zu KiB pool for atomic allocations \n " ,
atomic_pool_size / 1024 ) ;
return 0 ;
remove_mapping :
dma_common_free_remap ( addr , atomic_pool_size , VM_USERMAP ) ;
destroy_genpool :
gen_pool_destroy ( atomic_pool ) ;
atomic_pool = NULL ;
free_page :
if ( ! dma_release_from_contiguous ( NULL , page , nr_pages ) )
__free_pages ( page , pool_size_order ) ;
out :
pr_err ( " DMA: failed to allocate %zu KiB pool for atomic coherent allocation \n " ,
atomic_pool_size / 1024 ) ;
return - ENOMEM ;
}
bool dma_in_atomic_pool ( void * start , size_t size )
{
return addr_in_gen_pool ( atomic_pool , ( unsigned long ) start , size ) ;
}
void * dma_alloc_from_pool ( size_t size , struct page * * ret_page , gfp_t flags )
{
unsigned long val ;
void * ptr = NULL ;
if ( ! atomic_pool ) {
WARN ( 1 , " coherent pool not initialised! \n " ) ;
return NULL ;
}
val = gen_pool_alloc ( atomic_pool , size ) ;
if ( val ) {
phys_addr_t phys = gen_pool_virt_to_phys ( atomic_pool , val ) ;
* ret_page = pfn_to_page ( __phys_to_pfn ( phys ) ) ;
ptr = ( void * ) val ;
memset ( ptr , 0 , size ) ;
}
return ptr ;
}
bool dma_free_from_pool ( void * start , size_t size )
{
if ( ! dma_in_atomic_pool ( start , size ) )
return false ;
gen_pool_free ( atomic_pool , ( unsigned long ) start , size ) ;
return true ;
}
void * arch_dma_alloc ( struct device * dev , size_t size , dma_addr_t * dma_handle ,
gfp_t flags , unsigned long attrs )
{
struct page * page = NULL ;
2018-11-04 19:38:39 +03:00
void * ret ;
2018-11-04 22:29:28 +03:00
size = PAGE_ALIGN ( size ) ;
2018-11-04 19:51:50 +03:00
if ( ! gfpflags_allow_blocking ( flags ) & &
! ( attrs & DMA_ATTR_NO_KERNEL_MAPPING ) ) {
2018-11-04 22:29:28 +03:00
ret = dma_alloc_from_pool ( size , & page , flags ) ;
if ( ! ret )
return NULL ;
2019-01-04 20:31:48 +03:00
goto done ;
2018-11-04 22:29:28 +03:00
}
2018-11-04 19:38:39 +03:00
page = __dma_direct_alloc_pages ( dev , size , dma_handle , flags , attrs ) ;
if ( ! page )
2018-11-04 22:29:28 +03:00
return NULL ;
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent ( page , size ) ;
2019-01-04 20:31:48 +03:00
if ( attrs & DMA_ATTR_NO_KERNEL_MAPPING ) {
ret = page ; /* opaque cookie */
goto done ;
}
2018-11-04 19:51:50 +03:00
2018-11-04 22:29:28 +03:00
/* create a coherent mapping */
ret = dma_common_contiguous_remap ( page , size , VM_USERMAP ,
arch_dma_mmap_pgprot ( dev , PAGE_KERNEL , attrs ) ,
__builtin_return_address ( 0 ) ) ;
2018-12-05 13:14:01 +03:00
if ( ! ret ) {
2018-11-04 19:38:39 +03:00
__dma_direct_free_pages ( dev , size , page ) ;
2018-12-05 13:14:01 +03:00
return ret ;
}
memset ( ret , 0 , size ) ;
2019-01-04 20:31:48 +03:00
done :
* dma_handle = phys_to_dma ( dev , page_to_phys ( page ) ) ;
2018-11-04 22:29:28 +03:00
return ret ;
}
void arch_dma_free ( struct device * dev , size_t size , void * vaddr ,
dma_addr_t dma_handle , unsigned long attrs )
{
2018-11-04 19:51:50 +03:00
if ( attrs & DMA_ATTR_NO_KERNEL_MAPPING ) {
/* vaddr is a struct page cookie, not a kernel address */
__dma_direct_free_pages ( dev , size , vaddr ) ;
} else if ( ! dma_free_from_pool ( vaddr , PAGE_ALIGN ( size ) ) ) {
2018-11-04 19:38:39 +03:00
phys_addr_t phys = dma_to_phys ( dev , dma_handle ) ;
struct page * page = pfn_to_page ( __phys_to_pfn ( phys ) ) ;
2018-11-04 22:29:28 +03:00
vunmap ( vaddr ) ;
2018-11-04 19:38:39 +03:00
__dma_direct_free_pages ( dev , size , page ) ;
2018-11-04 22:29:28 +03:00
}
}
long arch_dma_coherent_to_pfn ( struct device * dev , void * cpu_addr ,
dma_addr_t dma_addr )
{
return __phys_to_pfn ( dma_to_phys ( dev , dma_addr ) ) ;
}
# endif /* CONFIG_DMA_DIRECT_REMAP */