2005-04-17 02:20:36 +04:00
/*
2007-10-15 11:50:19 +04:00
* Copyright ( c ) by Jaroslav Kysela < perex @ perex . cz >
2005-04-17 02:20:36 +04:00
* Takashi Iwai < tiwai @ suse . de >
*
* Generic memory allocators
*
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*
*/
# include <linux/module.h>
# include <linux/proc_fs.h>
# include <linux/init.h>
# include <linux/pci.h>
# include <linux/slab.h>
# include <linux/mm.h>
2007-09-17 23:55:10 +04:00
# include <linux/seq_file.h>
2005-05-30 20:27:03 +04:00
# include <asm/uaccess.h>
2005-04-17 02:20:36 +04:00
# include <linux/dma-mapping.h>
2013-10-23 07:47:43 +04:00
# include <linux/genalloc.h>
2005-04-17 02:20:36 +04:00
# include <linux/moduleparam.h>
2006-01-16 18:29:08 +03:00
# include <linux/mutex.h>
2005-04-17 02:20:36 +04:00
# include <sound/memalloc.h>
2007-10-15 11:50:19 +04:00
MODULE_AUTHOR ( " Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz> " ) ;
2005-04-17 02:20:36 +04:00
MODULE_DESCRIPTION ( " Memory allocator for ALSA system. " ) ;
MODULE_LICENSE ( " GPL " ) ;
/*
*/
2006-01-16 18:29:08 +03:00
static DEFINE_MUTEX ( list_mutex ) ;
2005-04-17 02:20:36 +04:00
static LIST_HEAD ( mem_list_head ) ;
/* buffer preservation list */
struct snd_mem_list {
struct snd_dma_buffer buffer ;
unsigned int id ;
struct list_head list ;
} ;
/* id for pre-allocated buffers */
# define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
/*
*
* Generic memory allocators
*
*/
static long snd_allocated_pages ; /* holding the number of allocated pages */
static inline void inc_snd_pages ( int order )
{
snd_allocated_pages + = 1 < < order ;
}
static inline void dec_snd_pages ( int order )
{
snd_allocated_pages - = 1 < < order ;
}
/**
* snd_malloc_pages - allocate pages with the given size
* @ size : the size to allocate in bytes
* @ gfp_flags : the allocation conditions , GFP_XXX
*
* Allocates the physically contiguous pages with the given size .
*
2013-03-12 01:05:14 +04:00
* Return : The pointer of the buffer , or % NULL if no enough memory .
2005-04-17 02:20:36 +04:00
*/
2005-10-21 11:22:18 +04:00
void * snd_malloc_pages ( size_t size , gfp_t gfp_flags )
2005-04-17 02:20:36 +04:00
{
int pg ;
void * res ;
2008-08-08 19:09:09 +04:00
if ( WARN_ON ( ! size ) )
return NULL ;
if ( WARN_ON ( ! gfp_flags ) )
return NULL ;
2005-11-22 08:32:22 +03:00
gfp_flags | = __GFP_COMP ; /* compound page lets parts be mapped */
2005-04-17 02:20:36 +04:00
pg = get_order ( size ) ;
2006-01-31 16:44:28 +03:00
if ( ( res = ( void * ) __get_free_pages ( gfp_flags , pg ) ) ! = NULL )
2005-04-17 02:20:36 +04:00
inc_snd_pages ( pg ) ;
return res ;
}
/**
* snd_free_pages - release the pages
* @ ptr : the buffer pointer to release
* @ size : the allocated buffer size
*
* Releases the buffer allocated via snd_malloc_pages ( ) .
*/
void snd_free_pages ( void * ptr , size_t size )
{
int pg ;
if ( ptr = = NULL )
return ;
pg = get_order ( size ) ;
dec_snd_pages ( pg ) ;
free_pages ( ( unsigned long ) ptr , pg ) ;
}
/*
*
* Bus - specific memory allocators
*
*/
2007-07-26 20:59:36 +04:00
# ifdef CONFIG_HAS_DMA
2005-04-17 02:20:36 +04:00
/* allocate the coherent DMA pages */
static void * snd_malloc_dev_pages ( struct device * dev , size_t size , dma_addr_t * dma )
{
int pg ;
void * res ;
2005-10-21 11:22:18 +04:00
gfp_t gfp_flags ;
2005-04-17 02:20:36 +04:00
2008-08-08 19:09:09 +04:00
if ( WARN_ON ( ! dma ) )
return NULL ;
2005-04-17 02:20:36 +04:00
pg = get_order ( size ) ;
gfp_flags = GFP_KERNEL
2005-11-22 08:32:22 +03:00
| __GFP_COMP /* compound page lets parts be mapped */
2005-04-17 02:20:36 +04:00
| __GFP_NORETRY /* don't trigger OOM-killer */
| __GFP_NOWARN ; /* no stack trace print - this call is non-critical */
res = dma_alloc_coherent ( dev , PAGE_SIZE < < pg , dma , gfp_flags ) ;
2006-01-31 16:44:28 +03:00
if ( res ! = NULL )
2005-04-17 02:20:36 +04:00
inc_snd_pages ( pg ) ;
return res ;
}
/* free the coherent DMA pages */
static void snd_free_dev_pages ( struct device * dev , size_t size , void * ptr ,
dma_addr_t dma )
{
int pg ;
if ( ptr = = NULL )
return ;
pg = get_order ( size ) ;
dec_snd_pages ( pg ) ;
dma_free_coherent ( dev , PAGE_SIZE < < pg , ptr , dma ) ;
}
2013-10-23 07:47:43 +04:00
2013-10-28 19:08:27 +04:00
# ifdef CONFIG_GENERIC_ALLOCATOR
2013-10-23 07:47:43 +04:00
/**
* snd_malloc_dev_iram - allocate memory from on - chip internal ram
* @ dmab : buffer allocation record to store the allocated data
* @ size : number of bytes to allocate from the iram
*
* This function requires iram phandle provided via of_node
*/
2013-10-29 14:56:21 +04:00
static void snd_malloc_dev_iram ( struct snd_dma_buffer * dmab , size_t size )
2013-10-23 07:47:43 +04:00
{
struct device * dev = dmab - > dev . dev ;
struct gen_pool * pool = NULL ;
2013-10-29 14:59:31 +04:00
dmab - > area = NULL ;
dmab - > addr = 0 ;
2013-10-23 07:47:43 +04:00
if ( dev - > of_node )
pool = of_get_named_gen_pool ( dev - > of_node , " iram " , 0 ) ;
if ( ! pool )
return ;
/* Assign the pool into private_data field */
dmab - > private_data = pool ;
dmab - > area = ( void * ) gen_pool_alloc ( pool , size ) ;
if ( ! dmab - > area )
return ;
dmab - > addr = gen_pool_virt_to_phys ( pool , ( unsigned long ) dmab - > area ) ;
}
/**
* snd_free_dev_iram - free allocated specific memory from on - chip internal ram
* @ dmab : buffer allocation record to store the allocated data
*/
2013-10-29 14:56:21 +04:00
static void snd_free_dev_iram ( struct snd_dma_buffer * dmab )
2013-10-23 07:47:43 +04:00
{
struct gen_pool * pool = dmab - > private_data ;
if ( pool & & dmab - > area )
gen_pool_free ( pool , ( unsigned long ) dmab - > area , dmab - > bytes ) ;
}
2013-10-28 19:08:27 +04:00
# endif /* CONFIG_GENERIC_ALLOCATOR */
2007-07-26 20:59:36 +04:00
# endif /* CONFIG_HAS_DMA */
2005-04-17 02:20:36 +04:00
/*
*
* ALSA generic memory management
*
*/
/**
* snd_dma_alloc_pages - allocate the buffer area according to the given type
* @ type : the DMA buffer type
* @ device : the device pointer
* @ size : the buffer size to allocate
* @ dmab : buffer allocation record to store the allocated data
*
* Calls the memory - allocator function for the corresponding
* buffer type .
2013-03-12 01:05:14 +04:00
*
* Return : Zero if the buffer with the given size is allocated successfully ,
* otherwise a negative value on error .
2005-04-17 02:20:36 +04:00
*/
int snd_dma_alloc_pages ( int type , struct device * device , size_t size ,
struct snd_dma_buffer * dmab )
{
2008-08-08 19:09:09 +04:00
if ( WARN_ON ( ! size ) )
return - ENXIO ;
if ( WARN_ON ( ! dmab ) )
return - ENXIO ;
2005-04-17 02:20:36 +04:00
dmab - > dev . type = type ;
dmab - > dev . dev = device ;
dmab - > bytes = 0 ;
switch ( type ) {
case SNDRV_DMA_TYPE_CONTINUOUS :
2011-02-14 13:00:47 +03:00
dmab - > area = snd_malloc_pages ( size ,
( __force gfp_t ) ( unsigned long ) device ) ;
2005-04-17 02:20:36 +04:00
dmab - > addr = 0 ;
break ;
2007-07-26 20:59:36 +04:00
# ifdef CONFIG_HAS_DMA
2013-10-24 16:25:32 +04:00
# ifdef CONFIG_GENERIC_ALLOCATOR
2013-10-23 07:47:43 +04:00
case SNDRV_DMA_TYPE_DEV_IRAM :
snd_malloc_dev_iram ( dmab , size ) ;
if ( dmab - > area )
break ;
/* Internal memory might have limited size and no enough space,
* so if we fail to malloc , try to fetch memory traditionally .
*/
dmab - > dev . type = SNDRV_DMA_TYPE_DEV ;
2013-10-24 16:25:32 +04:00
# endif /* CONFIG_GENERIC_ALLOCATOR */
2005-04-17 02:20:36 +04:00
case SNDRV_DMA_TYPE_DEV :
dmab - > area = snd_malloc_dev_pages ( device , size , & dmab - > addr ) ;
break ;
2008-06-17 18:39:06 +04:00
# endif
# ifdef CONFIG_SND_DMA_SGBUF
2005-04-17 02:20:36 +04:00
case SNDRV_DMA_TYPE_DEV_SG :
snd_malloc_sgbuf_pages ( device , size , dmab , NULL ) ;
break ;
2007-07-26 20:59:36 +04:00
# endif
2005-04-17 02:20:36 +04:00
default :
printk ( KERN_ERR " snd-malloc: invalid device type %d \n " , type ) ;
dmab - > area = NULL ;
dmab - > addr = 0 ;
return - ENXIO ;
}
if ( ! dmab - > area )
return - ENOMEM ;
dmab - > bytes = size ;
return 0 ;
}
/**
* snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
* @ type : the DMA buffer type
* @ device : the device pointer
* @ size : the buffer size to allocate
* @ dmab : buffer allocation record to store the allocated data
*
* Calls the memory - allocator function for the corresponding
* buffer type . When no space is left , this function reduces the size and
* tries to allocate again . The size actually allocated is stored in
* res_size argument .
2013-03-12 01:05:14 +04:00
*
* Return : Zero if the buffer with the given size is allocated successfully ,
* otherwise a negative value on error .
2005-04-17 02:20:36 +04:00
*/
int snd_dma_alloc_pages_fallback ( int type , struct device * device , size_t size ,
struct snd_dma_buffer * dmab )
{
int err ;
while ( ( err = snd_dma_alloc_pages ( type , device , size , dmab ) ) < 0 ) {
2008-07-30 17:13:33 +04:00
size_t aligned_size ;
2005-04-17 02:20:36 +04:00
if ( err ! = - ENOMEM )
return err ;
if ( size < = PAGE_SIZE )
return - ENOMEM ;
2008-07-30 17:13:33 +04:00
aligned_size = PAGE_SIZE < < get_order ( size ) ;
if ( size ! = aligned_size )
size = aligned_size ;
else
size > > = 1 ;
2005-04-17 02:20:36 +04:00
}
if ( ! dmab - > area )
return - ENOMEM ;
return 0 ;
}
/**
* snd_dma_free_pages - release the allocated buffer
* @ dmab : the buffer allocation record to release
*
* Releases the allocated buffer via snd_dma_alloc_pages ( ) .
*/
void snd_dma_free_pages ( struct snd_dma_buffer * dmab )
{
switch ( dmab - > dev . type ) {
case SNDRV_DMA_TYPE_CONTINUOUS :
snd_free_pages ( dmab - > area , dmab - > bytes ) ;
break ;
2007-07-26 20:59:36 +04:00
# ifdef CONFIG_HAS_DMA
2013-10-24 16:25:32 +04:00
# ifdef CONFIG_GENERIC_ALLOCATOR
2013-10-23 07:47:43 +04:00
case SNDRV_DMA_TYPE_DEV_IRAM :
snd_free_dev_iram ( dmab ) ;
break ;
2013-10-24 16:25:32 +04:00
# endif /* CONFIG_GENERIC_ALLOCATOR */
2005-04-17 02:20:36 +04:00
case SNDRV_DMA_TYPE_DEV :
snd_free_dev_pages ( dmab - > dev . dev , dmab - > bytes , dmab - > area , dmab - > addr ) ;
break ;
2008-06-17 18:39:06 +04:00
# endif
# ifdef CONFIG_SND_DMA_SGBUF
2005-04-17 02:20:36 +04:00
case SNDRV_DMA_TYPE_DEV_SG :
snd_free_sgbuf_pages ( dmab ) ;
break ;
2007-07-26 20:59:36 +04:00
# endif
2005-04-17 02:20:36 +04:00
default :
printk ( KERN_ERR " snd-malloc: invalid device type %d \n " , dmab - > dev . type ) ;
}
}
/**
* snd_dma_get_reserved - get the reserved buffer for the given device
* @ dmab : the buffer allocation record to store
* @ id : the buffer id
*
* Looks for the reserved - buffer list and re - uses if the same buffer
* is found in the list . When the buffer is found , it ' s removed from the free list .
*
2013-03-12 01:05:14 +04:00
* Return : The size of buffer if the buffer is found , or zero if not found .
2005-04-17 02:20:36 +04:00
*/
size_t snd_dma_get_reserved_buf ( struct snd_dma_buffer * dmab , unsigned int id )
{
struct snd_mem_list * mem ;
2008-08-08 19:09:09 +04:00
if ( WARN_ON ( ! dmab ) )
return 0 ;
2005-04-17 02:20:36 +04:00
2006-01-16 18:29:08 +03:00
mutex_lock ( & list_mutex ) ;
2006-10-05 18:02:22 +04:00
list_for_each_entry ( mem , & mem_list_head , list ) {
2005-04-17 02:20:36 +04:00
if ( mem - > id = = id & &
2005-05-30 20:27:03 +04:00
( mem - > buffer . dev . dev = = NULL | | dmab - > dev . dev = = NULL | |
! memcmp ( & mem - > buffer . dev , & dmab - > dev , sizeof ( dmab - > dev ) ) ) ) {
struct device * dev = dmab - > dev . dev ;
2006-10-05 18:02:22 +04:00
list_del ( & mem - > list ) ;
2005-04-17 02:20:36 +04:00
* dmab = mem - > buffer ;
2005-05-30 20:27:03 +04:00
if ( dmab - > dev . dev = = NULL )
dmab - > dev . dev = dev ;
2005-04-17 02:20:36 +04:00
kfree ( mem ) ;
2006-01-16 18:29:08 +03:00
mutex_unlock ( & list_mutex ) ;
2005-04-17 02:20:36 +04:00
return dmab - > bytes ;
}
}
2006-01-16 18:29:08 +03:00
mutex_unlock ( & list_mutex ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
/**
* snd_dma_reserve_buf - reserve the buffer
* @ dmab : the buffer to reserve
* @ id : the buffer id
*
* Reserves the given buffer as a reserved buffer .
2013-03-12 01:05:14 +04:00
*
* Return : Zero if successful , or a negative code on error .
2005-04-17 02:20:36 +04:00
*/
int snd_dma_reserve_buf ( struct snd_dma_buffer * dmab , unsigned int id )
{
struct snd_mem_list * mem ;
2008-08-08 19:09:09 +04:00
if ( WARN_ON ( ! dmab ) )
return - EINVAL ;
2005-04-17 02:20:36 +04:00
mem = kmalloc ( sizeof ( * mem ) , GFP_KERNEL ) ;
if ( ! mem )
return - ENOMEM ;
2006-01-16 18:29:08 +03:00
mutex_lock ( & list_mutex ) ;
2005-04-17 02:20:36 +04:00
mem - > buffer = * dmab ;
mem - > id = id ;
list_add_tail ( & mem - > list , & mem_list_head ) ;
2006-01-16 18:29:08 +03:00
mutex_unlock ( & list_mutex ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
/*
* purge all reserved buffers
*/
static void free_all_reserved_pages ( void )
{
struct list_head * p ;
struct snd_mem_list * mem ;
2006-01-16 18:29:08 +03:00
mutex_lock ( & list_mutex ) ;
2005-04-17 02:20:36 +04:00
while ( ! list_empty ( & mem_list_head ) ) {
p = mem_list_head . next ;
mem = list_entry ( p , struct snd_mem_list , list ) ;
list_del ( p ) ;
snd_dma_free_pages ( & mem - > buffer ) ;
kfree ( mem ) ;
}
2006-01-16 18:29:08 +03:00
mutex_unlock ( & list_mutex ) ;
2005-04-17 02:20:36 +04:00
}
# ifdef CONFIG_PROC_FS
/*
* proc file interface
*/
2005-05-30 20:27:03 +04:00
# define SND_MEM_PROC_FILE "driver / snd-page-alloc"
2005-08-11 17:59:17 +04:00
static struct proc_dir_entry * snd_mem_proc ;
2005-05-30 20:27:03 +04:00
2007-09-17 23:55:10 +04:00
static int snd_mem_proc_read ( struct seq_file * seq , void * offset )
2005-04-17 02:20:36 +04:00
{
long pages = snd_allocated_pages > > ( PAGE_SHIFT - 12 ) ;
struct snd_mem_list * mem ;
int devno ;
2008-08-27 11:33:26 +04:00
static char * types [ ] = { " UNKNOWN " , " CONT " , " DEV " , " DEV-SG " } ;
2005-04-17 02:20:36 +04:00
2006-01-16 18:29:08 +03:00
mutex_lock ( & list_mutex ) ;
2007-09-17 23:55:10 +04:00
seq_printf ( seq , " pages : %li bytes (%li pages per %likB) \n " ,
pages * PAGE_SIZE , pages , PAGE_SIZE / 1024 ) ;
2005-04-17 02:20:36 +04:00
devno = 0 ;
2006-10-05 18:02:22 +04:00
list_for_each_entry ( mem , & mem_list_head , list ) {
2005-04-17 02:20:36 +04:00
devno + + ;
2007-09-17 23:55:10 +04:00
seq_printf ( seq , " buffer %d : ID %08x : type %s \n " ,
devno , mem - > id , types [ mem - > buffer . dev . type ] ) ;
seq_printf ( seq , " addr = 0x%lx, size = %d bytes \n " ,
( unsigned long ) mem - > buffer . addr ,
( int ) mem - > buffer . bytes ) ;
2005-04-17 02:20:36 +04:00
}
2006-01-16 18:29:08 +03:00
mutex_unlock ( & list_mutex ) ;
2007-09-17 23:55:10 +04:00
return 0 ;
}
static int snd_mem_proc_open ( struct inode * inode , struct file * file )
{
return single_open ( file , snd_mem_proc_read , NULL ) ;
2005-04-17 02:20:36 +04:00
}
2005-05-30 20:27:03 +04:00
/* FIXME: for pci only - other bus? */
# ifdef CONFIG_PCI
# define gettoken(bufp) strsep(bufp, " \t\n")
2007-09-17 23:55:10 +04:00
static ssize_t snd_mem_proc_write ( struct file * file , const char __user * buffer ,
size_t count , loff_t * ppos )
2005-05-30 20:27:03 +04:00
{
char buf [ 128 ] ;
char * token , * p ;
2007-09-17 23:55:10 +04:00
if ( count > sizeof ( buf ) - 1 )
return - EINVAL ;
2005-05-30 20:27:03 +04:00
if ( copy_from_user ( buf , buffer , count ) )
return - EFAULT ;
2007-09-17 23:55:10 +04:00
buf [ count ] = ' \0 ' ;
2005-05-30 20:27:03 +04:00
p = buf ;
token = gettoken ( & p ) ;
if ( ! token | | * token = = ' # ' )
2007-09-17 23:55:10 +04:00
return count ;
2005-05-30 20:27:03 +04:00
if ( strcmp ( token , " add " ) = = 0 ) {
char * endp ;
int vendor , device , size , buffers ;
long mask ;
int i , alloced ;
struct pci_dev * pci ;
if ( ( token = gettoken ( & p ) ) = = NULL | |
( vendor = simple_strtol ( token , NULL , 0 ) ) < = 0 | |
( token = gettoken ( & p ) ) = = NULL | |
( device = simple_strtol ( token , NULL , 0 ) ) < = 0 | |
( token = gettoken ( & p ) ) = = NULL | |
( mask = simple_strtol ( token , NULL , 0 ) ) < 0 | |
( token = gettoken ( & p ) ) = = NULL | |
( size = memparse ( token , & endp ) ) < 64 * 1024 | |
size > 16 * 1024 * 1024 /* too big */ | |
( token = gettoken ( & p ) ) = = NULL | |
( buffers = simple_strtol ( token , NULL , 0 ) ) < = 0 | |
buffers > 4 ) {
printk ( KERN_ERR " snd-page-alloc: invalid proc write format \n " ) ;
2007-09-17 23:55:10 +04:00
return count ;
2005-05-30 20:27:03 +04:00
}
vendor & = 0xffff ;
device & = 0xffff ;
alloced = 0 ;
pci = NULL ;
2005-09-07 16:28:33 +04:00
while ( ( pci = pci_get_device ( vendor , device , pci ) ) ! = NULL ) {
2005-05-30 20:27:03 +04:00
if ( mask > 0 & & mask < 0xffffffff ) {
if ( pci_set_dma_mask ( pci , mask ) < 0 | |
pci_set_consistent_dma_mask ( pci , mask ) < 0 ) {
printk ( KERN_ERR " snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x \n " , mask , vendor , device ) ;
2007-11-28 13:58:56 +03:00
pci_dev_put ( pci ) ;
2007-09-17 23:55:10 +04:00
return count ;
2005-05-30 20:27:03 +04:00
}
}
for ( i = 0 ; i < buffers ; i + + ) {
struct snd_dma_buffer dmab ;
memset ( & dmab , 0 , sizeof ( dmab ) ) ;
if ( snd_dma_alloc_pages ( SNDRV_DMA_TYPE_DEV , snd_dma_pci_data ( pci ) ,
size , & dmab ) < 0 ) {
printk ( KERN_ERR " snd-page-alloc: cannot allocate buffer pages (size = %d) \n " , size ) ;
2005-09-07 16:28:33 +04:00
pci_dev_put ( pci ) ;
2007-09-17 23:55:10 +04:00
return count ;
2005-05-30 20:27:03 +04:00
}
snd_dma_reserve_buf ( & dmab , snd_dma_pci_buf_id ( pci ) ) ;
}
alloced + + ;
}
if ( ! alloced ) {
for ( i = 0 ; i < buffers ; i + + ) {
struct snd_dma_buffer dmab ;
memset ( & dmab , 0 , sizeof ( dmab ) ) ;
/* FIXME: We can allocate only in ZONE_DMA
* without a device pointer !
*/
if ( snd_dma_alloc_pages ( SNDRV_DMA_TYPE_DEV , NULL ,
size , & dmab ) < 0 ) {
printk ( KERN_ERR " snd-page-alloc: cannot allocate buffer pages (size = %d) \n " , size ) ;
break ;
}
snd_dma_reserve_buf ( & dmab , ( unsigned int ) ( ( vendor < < 16 ) | device ) ) ;
}
}
} else if ( strcmp ( token , " erase " ) = = 0 )
/* FIXME: need for releasing each buffer chunk? */
free_all_reserved_pages ( ) ;
else
printk ( KERN_ERR " snd-page-alloc: invalid proc cmd \n " ) ;
2007-09-17 23:55:10 +04:00
return count ;
2005-05-30 20:27:03 +04:00
}
# endif /* CONFIG_PCI */
2007-09-17 23:55:10 +04:00
static const struct file_operations snd_mem_proc_fops = {
. owner = THIS_MODULE ,
. open = snd_mem_proc_open ,
. read = seq_read ,
# ifdef CONFIG_PCI
. write = snd_mem_proc_write ,
# endif
. llseek = seq_lseek ,
. release = single_release ,
} ;
2005-04-17 02:20:36 +04:00
# endif /* CONFIG_PROC_FS */
/*
* module entry
*/
static int __init snd_mem_init ( void )
{
# ifdef CONFIG_PROC_FS
2008-04-29 12:02:13 +04:00
snd_mem_proc = proc_create ( SND_MEM_PROC_FILE , 0644 , NULL ,
& snd_mem_proc_fops ) ;
2005-04-17 02:20:36 +04:00
# endif
return 0 ;
}
static void __exit snd_mem_exit ( void )
{
2005-08-23 13:11:03 +04:00
remove_proc_entry ( SND_MEM_PROC_FILE , NULL ) ;
2005-04-17 02:20:36 +04:00
free_all_reserved_pages ( ) ;
if ( snd_allocated_pages > 0 )
printk ( KERN_ERR " snd-malloc: Memory leak? pages not freed = %li \n " , snd_allocated_pages ) ;
}
module_init ( snd_mem_init )
module_exit ( snd_mem_exit )
/*
* exports
*/
EXPORT_SYMBOL ( snd_dma_alloc_pages ) ;
EXPORT_SYMBOL ( snd_dma_alloc_pages_fallback ) ;
EXPORT_SYMBOL ( snd_dma_free_pages ) ;
EXPORT_SYMBOL ( snd_dma_get_reserved_buf ) ;
EXPORT_SYMBOL ( snd_dma_reserve_buf ) ;
EXPORT_SYMBOL ( snd_malloc_pages ) ;
EXPORT_SYMBOL ( snd_free_pages ) ;