2019-05-27 09:55:05 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-04-17 02:20:36 +04:00
/*
2007-10-15 11:50:19 +04:00
* Copyright ( c ) by Jaroslav Kysela < perex @ perex . cz >
2005-04-17 02:20:36 +04:00
* Takashi Iwai < tiwai @ suse . de >
*
* Generic memory allocators
*/
# include <linux/slab.h>
# include <linux/mm.h>
# include <linux/dma-mapping.h>
2013-10-23 07:47:43 +04:00
# include <linux/genalloc.h>
2019-11-05 11:01:36 +03:00
# include <linux/vmalloc.h>
2018-08-08 18:01:00 +03:00
# ifdef CONFIG_X86
# include <asm/set_memory.h>
# endif
2005-04-17 02:20:36 +04:00
# include <sound/memalloc.h>
/*
*
* Bus - specific memory allocators
*
*/
2007-07-26 20:59:36 +04:00
# ifdef CONFIG_HAS_DMA
2005-04-17 02:20:36 +04:00
/* allocate the coherent DMA pages */
2018-08-10 15:43:37 +03:00
static void snd_malloc_dev_pages ( struct snd_dma_buffer * dmab , size_t size )
2005-04-17 02:20:36 +04:00
{
2005-10-21 11:22:18 +04:00
gfp_t gfp_flags ;
2005-04-17 02:20:36 +04:00
gfp_flags = GFP_KERNEL
2005-11-22 08:32:22 +03:00
| __GFP_COMP /* compound page lets parts be mapped */
2005-04-17 02:20:36 +04:00
| __GFP_NORETRY /* don't trigger OOM-killer */
| __GFP_NOWARN ; /* no stack trace print - this call is non-critical */
2018-08-10 15:43:37 +03:00
dmab - > area = dma_alloc_coherent ( dmab - > dev . dev , size , & dmab - > addr ,
gfp_flags ) ;
2018-08-08 18:01:00 +03:00
# ifdef CONFIG_X86
if ( dmab - > area & & dmab - > dev . type = = SNDRV_DMA_TYPE_DEV_UC )
set_memory_wc ( ( unsigned long ) dmab - > area ,
PAGE_ALIGN ( size ) > > PAGE_SHIFT ) ;
# endif
2005-04-17 02:20:36 +04:00
}
/* free the coherent DMA pages */
2018-08-10 15:43:37 +03:00
static void snd_free_dev_pages ( struct snd_dma_buffer * dmab )
2005-04-17 02:20:36 +04:00
{
2018-08-08 18:01:00 +03:00
# ifdef CONFIG_X86
if ( dmab - > dev . type = = SNDRV_DMA_TYPE_DEV_UC )
set_memory_wb ( ( unsigned long ) dmab - > area ,
PAGE_ALIGN ( dmab - > bytes ) > > PAGE_SHIFT ) ;
# endif
2018-08-10 15:43:37 +03:00
dma_free_coherent ( dmab - > dev . dev , dmab - > bytes , dmab - > area , dmab - > addr ) ;
2005-04-17 02:20:36 +04:00
}
2013-10-23 07:47:43 +04:00
2013-10-28 19:08:27 +04:00
# ifdef CONFIG_GENERIC_ALLOCATOR
2013-10-23 07:47:43 +04:00
/**
* snd_malloc_dev_iram - allocate memory from on - chip internal ram
* @ dmab : buffer allocation record to store the allocated data
* @ size : number of bytes to allocate from the iram
*
* This function requires iram phandle provided via of_node
*/
2013-10-29 14:56:21 +04:00
static void snd_malloc_dev_iram ( struct snd_dma_buffer * dmab , size_t size )
2013-10-23 07:47:43 +04:00
{
struct device * dev = dmab - > dev . dev ;
struct gen_pool * pool = NULL ;
2013-10-29 14:59:31 +04:00
dmab - > area = NULL ;
dmab - > addr = 0 ;
2013-10-23 07:47:43 +04:00
if ( dev - > of_node )
2015-07-01 01:00:07 +03:00
pool = of_gen_pool_get ( dev - > of_node , " iram " , 0 ) ;
2013-10-23 07:47:43 +04:00
if ( ! pool )
return ;
/* Assign the pool into private_data field */
dmab - > private_data = pool ;
2013-11-15 02:32:15 +04:00
dmab - > area = gen_pool_dma_alloc ( pool , size , & dmab - > addr ) ;
2013-10-23 07:47:43 +04:00
}
/**
* snd_free_dev_iram - free allocated specific memory from on - chip internal ram
* @ dmab : buffer allocation record to store the allocated data
*/
2013-10-29 14:56:21 +04:00
static void snd_free_dev_iram ( struct snd_dma_buffer * dmab )
2013-10-23 07:47:43 +04:00
{
struct gen_pool * pool = dmab - > private_data ;
if ( pool & & dmab - > area )
gen_pool_free ( pool , ( unsigned long ) dmab - > area , dmab - > bytes ) ;
}
2013-10-28 19:08:27 +04:00
# endif /* CONFIG_GENERIC_ALLOCATOR */
2007-07-26 20:59:36 +04:00
# endif /* CONFIG_HAS_DMA */
2005-04-17 02:20:36 +04:00
/*
*
* ALSA generic memory management
*
*/
2019-11-05 11:01:36 +03:00
static inline gfp_t snd_mem_get_gfp_flags ( const struct device * dev ,
gfp_t default_gfp )
2019-11-05 11:01:35 +03:00
{
if ( ! dev )
2019-11-05 11:01:36 +03:00
return default_gfp ;
2019-11-05 11:01:35 +03:00
else
return ( __force gfp_t ) ( unsigned long ) dev ;
}
2005-04-17 02:20:36 +04:00
/**
* snd_dma_alloc_pages - allocate the buffer area according to the given type
* @ type : the DMA buffer type
* @ device : the device pointer
* @ size : the buffer size to allocate
* @ dmab : buffer allocation record to store the allocated data
*
* Calls the memory - allocator function for the corresponding
* buffer type .
2013-03-12 01:05:14 +04:00
*
* Return : Zero if the buffer with the given size is allocated successfully ,
* otherwise a negative value on error .
2005-04-17 02:20:36 +04:00
*/
int snd_dma_alloc_pages ( int type , struct device * device , size_t size ,
struct snd_dma_buffer * dmab )
{
2019-11-05 11:01:36 +03:00
gfp_t gfp ;
2008-08-08 19:09:09 +04:00
if ( WARN_ON ( ! size ) )
return - ENXIO ;
if ( WARN_ON ( ! dmab ) )
return - ENXIO ;
2005-04-17 02:20:36 +04:00
dmab - > dev . type = type ;
dmab - > dev . dev = device ;
dmab - > bytes = 0 ;
switch ( type ) {
case SNDRV_DMA_TYPE_CONTINUOUS :
2019-11-05 11:01:36 +03:00
gfp = snd_mem_get_gfp_flags ( device , GFP_KERNEL ) ;
dmab - > area = alloc_pages_exact ( size , gfp ) ;
dmab - > addr = 0 ;
break ;
case SNDRV_DMA_TYPE_VMALLOC :
gfp = snd_mem_get_gfp_flags ( device , GFP_KERNEL | __GFP_HIGHMEM ) ;
dmab - > area = __vmalloc ( size , gfp , PAGE_KERNEL ) ;
2005-04-17 02:20:36 +04:00
dmab - > addr = 0 ;
break ;
2007-07-26 20:59:36 +04:00
# ifdef CONFIG_HAS_DMA
2013-10-24 16:25:32 +04:00
# ifdef CONFIG_GENERIC_ALLOCATOR
2013-10-23 07:47:43 +04:00
case SNDRV_DMA_TYPE_DEV_IRAM :
snd_malloc_dev_iram ( dmab , size ) ;
if ( dmab - > area )
break ;
/* Internal memory might have limited size and no enough space,
* so if we fail to malloc , try to fetch memory traditionally .
*/
dmab - > dev . type = SNDRV_DMA_TYPE_DEV ;
2013-10-24 16:25:32 +04:00
# endif /* CONFIG_GENERIC_ALLOCATOR */
2018-10-04 20:54:51 +03:00
/* fall through */
2005-04-17 02:20:36 +04:00
case SNDRV_DMA_TYPE_DEV :
2018-08-08 18:01:00 +03:00
case SNDRV_DMA_TYPE_DEV_UC :
2018-08-10 15:43:37 +03:00
snd_malloc_dev_pages ( dmab , size ) ;
2005-04-17 02:20:36 +04:00
break ;
2008-06-17 18:39:06 +04:00
# endif
# ifdef CONFIG_SND_DMA_SGBUF
2005-04-17 02:20:36 +04:00
case SNDRV_DMA_TYPE_DEV_SG :
2018-08-08 18:01:00 +03:00
case SNDRV_DMA_TYPE_DEV_UC_SG :
2005-04-17 02:20:36 +04:00
snd_malloc_sgbuf_pages ( device , size , dmab , NULL ) ;
break ;
2007-07-26 20:59:36 +04:00
# endif
2005-04-17 02:20:36 +04:00
default :
2014-02-04 21:21:03 +04:00
pr_err ( " snd-malloc: invalid device type %d \n " , type ) ;
2005-04-17 02:20:36 +04:00
dmab - > area = NULL ;
dmab - > addr = 0 ;
return - ENXIO ;
}
if ( ! dmab - > area )
return - ENOMEM ;
dmab - > bytes = size ;
return 0 ;
}
2017-06-16 17:16:33 +03:00
EXPORT_SYMBOL ( snd_dma_alloc_pages ) ;
2005-04-17 02:20:36 +04:00
/**
* snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
* @ type : the DMA buffer type
* @ device : the device pointer
* @ size : the buffer size to allocate
* @ dmab : buffer allocation record to store the allocated data
*
* Calls the memory - allocator function for the corresponding
* buffer type . When no space is left , this function reduces the size and
* tries to allocate again . The size actually allocated is stored in
* res_size argument .
2013-03-12 01:05:14 +04:00
*
* Return : Zero if the buffer with the given size is allocated successfully ,
* otherwise a negative value on error .
2005-04-17 02:20:36 +04:00
*/
int snd_dma_alloc_pages_fallback ( int type , struct device * device , size_t size ,
struct snd_dma_buffer * dmab )
{
int err ;
while ( ( err = snd_dma_alloc_pages ( type , device , size , dmab ) ) < 0 ) {
if ( err ! = - ENOMEM )
return err ;
if ( size < = PAGE_SIZE )
return - ENOMEM ;
2018-07-19 12:01:04 +03:00
size > > = 1 ;
size = PAGE_SIZE < < get_order ( size ) ;
2005-04-17 02:20:36 +04:00
}
if ( ! dmab - > area )
return - ENOMEM ;
return 0 ;
}
2017-06-16 17:16:33 +03:00
EXPORT_SYMBOL ( snd_dma_alloc_pages_fallback ) ;
2005-04-17 02:20:36 +04:00
/**
* snd_dma_free_pages - release the allocated buffer
* @ dmab : the buffer allocation record to release
*
* Releases the allocated buffer via snd_dma_alloc_pages ( ) .
*/
void snd_dma_free_pages ( struct snd_dma_buffer * dmab )
{
switch ( dmab - > dev . type ) {
case SNDRV_DMA_TYPE_CONTINUOUS :
2018-11-23 21:38:13 +03:00
free_pages_exact ( dmab - > area , dmab - > bytes ) ;
2005-04-17 02:20:36 +04:00
break ;
2019-11-05 11:01:36 +03:00
case SNDRV_DMA_TYPE_VMALLOC :
vfree ( dmab - > area ) ;
break ;
2007-07-26 20:59:36 +04:00
# ifdef CONFIG_HAS_DMA
2013-10-24 16:25:32 +04:00
# ifdef CONFIG_GENERIC_ALLOCATOR
2013-10-23 07:47:43 +04:00
case SNDRV_DMA_TYPE_DEV_IRAM :
snd_free_dev_iram ( dmab ) ;
break ;
2013-10-24 16:25:32 +04:00
# endif /* CONFIG_GENERIC_ALLOCATOR */
2005-04-17 02:20:36 +04:00
case SNDRV_DMA_TYPE_DEV :
2018-08-08 18:01:00 +03:00
case SNDRV_DMA_TYPE_DEV_UC :
2018-08-10 15:43:37 +03:00
snd_free_dev_pages ( dmab ) ;
2005-04-17 02:20:36 +04:00
break ;
2008-06-17 18:39:06 +04:00
# endif
# ifdef CONFIG_SND_DMA_SGBUF
2005-04-17 02:20:36 +04:00
case SNDRV_DMA_TYPE_DEV_SG :
2018-08-08 18:01:00 +03:00
case SNDRV_DMA_TYPE_DEV_UC_SG :
2005-04-17 02:20:36 +04:00
snd_free_sgbuf_pages ( dmab ) ;
break ;
2007-07-26 20:59:36 +04:00
# endif
2005-04-17 02:20:36 +04:00
default :
2014-02-04 21:21:03 +04:00
pr_err ( " snd-malloc: invalid device type %d \n " , dmab - > dev . type ) ;
2005-04-17 02:20:36 +04:00
}
}
EXPORT_SYMBOL ( snd_dma_free_pages ) ;