2005-04-17 02:20:36 +04:00
/*
* Scatter - Gather buffer
*
* Copyright ( c ) by Takashi Iwai < tiwai @ suse . de >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*
*/
# include <linux/slab.h>
# include <linux/mm.h>
# include <linux/vmalloc.h>
2012-09-21 07:29:12 +04:00
# include <linux/export.h>
2005-04-17 02:20:36 +04:00
# include <sound/memalloc.h>
/* table entries are align to 32 */
# define SGBUF_TBL_ALIGN 32
2006-10-09 10:13:32 +04:00
# define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
2005-04-17 02:20:36 +04:00
int snd_free_sgbuf_pages ( struct snd_dma_buffer * dmab )
{
struct snd_sg_buf * sgbuf = dmab - > private_data ;
struct snd_dma_buffer tmpb ;
int i ;
if ( ! sgbuf )
return - EINVAL ;
2014-11-21 20:34:48 +03:00
vunmap ( dmab - > area ) ;
2009-03-17 16:00:06 +03:00
dmab - > area = NULL ;
2005-04-17 02:20:36 +04:00
tmpb . dev . type = SNDRV_DMA_TYPE_DEV ;
tmpb . dev . dev = sgbuf - > dev ;
for ( i = 0 ; i < sgbuf - > pages ; i + + ) {
2008-07-30 17:13:33 +04:00
if ( ! ( sgbuf - > table [ i ] . addr & ~ PAGE_MASK ) )
continue ; /* continuous pages */
2005-04-17 02:20:36 +04:00
tmpb . area = sgbuf - > table [ i ] . buf ;
2008-07-30 17:13:33 +04:00
tmpb . addr = sgbuf - > table [ i ] . addr & PAGE_MASK ;
tmpb . bytes = ( sgbuf - > table [ i ] . addr & ~ PAGE_MASK ) < < PAGE_SHIFT ;
2005-04-17 02:20:36 +04:00
snd_dma_free_pages ( & tmpb ) ;
}
kfree ( sgbuf - > table ) ;
kfree ( sgbuf - > page_table ) ;
kfree ( sgbuf ) ;
dmab - > private_data = NULL ;
return 0 ;
}
2008-07-30 17:13:33 +04:00
# define MAX_ALLOC_PAGES 32
2005-04-17 02:20:36 +04:00
void * snd_malloc_sgbuf_pages ( struct device * device ,
size_t size , struct snd_dma_buffer * dmab ,
size_t * res_size )
{
struct snd_sg_buf * sgbuf ;
2008-07-30 17:13:33 +04:00
unsigned int i , pages , chunk , maxpages ;
2005-04-17 02:20:36 +04:00
struct snd_dma_buffer tmpb ;
2008-07-30 17:13:33 +04:00
struct snd_sg_page * table ;
struct page * * pgtable ;
2005-04-17 02:20:36 +04:00
dmab - > area = NULL ;
dmab - > addr = 0 ;
2006-07-25 17:28:03 +04:00
dmab - > private_data = sgbuf = kzalloc ( sizeof ( * sgbuf ) , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( ! sgbuf )
return NULL ;
sgbuf - > dev = device ;
pages = snd_sgbuf_aligned_pages ( size ) ;
sgbuf - > tblsize = sgbuf_align_table ( pages ) ;
2008-07-30 17:13:33 +04:00
table = kcalloc ( sgbuf - > tblsize , sizeof ( * table ) , GFP_KERNEL ) ;
if ( ! table )
2005-04-17 02:20:36 +04:00
goto _failed ;
2008-07-30 17:13:33 +04:00
sgbuf - > table = table ;
pgtable = kcalloc ( sgbuf - > tblsize , sizeof ( * pgtable ) , GFP_KERNEL ) ;
if ( ! pgtable )
2005-04-17 02:20:36 +04:00
goto _failed ;
2008-07-30 17:13:33 +04:00
sgbuf - > page_table = pgtable ;
2005-04-17 02:20:36 +04:00
2008-07-30 17:13:33 +04:00
/* allocate pages */
maxpages = MAX_ALLOC_PAGES ;
while ( pages > 0 ) {
chunk = pages ;
/* don't be too eager to take a huge chunk */
if ( chunk > maxpages )
chunk = maxpages ;
chunk < < = PAGE_SHIFT ;
if ( snd_dma_alloc_pages_fallback ( SNDRV_DMA_TYPE_DEV , device ,
chunk , & tmpb ) < 0 ) {
if ( ! sgbuf - > pages )
2012-08-03 14:48:32 +04:00
goto _failed ;
2008-07-30 17:13:33 +04:00
if ( ! res_size )
2005-04-17 02:20:36 +04:00
goto _failed ;
2008-07-30 17:13:33 +04:00
size = sgbuf - > pages * PAGE_SIZE ;
2005-04-17 02:20:36 +04:00
break ;
}
2008-07-30 17:13:33 +04:00
chunk = tmpb . bytes > > PAGE_SHIFT ;
for ( i = 0 ; i < chunk ; i + + ) {
table - > buf = tmpb . area ;
table - > addr = tmpb . addr ;
if ( ! i )
table - > addr | = chunk ; /* mark head */
table + + ;
* pgtable + + = virt_to_page ( tmpb . area ) ;
tmpb . area + = PAGE_SIZE ;
tmpb . addr + = PAGE_SIZE ;
}
sgbuf - > pages + = chunk ;
pages - = chunk ;
if ( chunk < maxpages )
maxpages = chunk ;
2005-04-17 02:20:36 +04:00
}
sgbuf - > size = size ;
dmab - > area = vmap ( sgbuf - > page_table , sgbuf - > pages , VM_MAP , PAGE_KERNEL ) ;
if ( ! dmab - > area )
goto _failed ;
2008-07-30 17:13:33 +04:00
if ( res_size )
* res_size = sgbuf - > size ;
2005-04-17 02:20:36 +04:00
return dmab - > area ;
_failed :
snd_free_sgbuf_pages ( dmab ) ; /* free the table */
return NULL ;
}
2012-09-21 07:29:12 +04:00
/*
* compute the max chunk size with continuous pages on sg - buffer
*/
unsigned int snd_sgbuf_get_chunk_size ( struct snd_dma_buffer * dmab ,
unsigned int ofs , unsigned int size )
{
struct snd_sg_buf * sg = dmab - > private_data ;
unsigned int start , end , pg ;
start = ofs > > PAGE_SHIFT ;
end = ( ofs + size - 1 ) > > PAGE_SHIFT ;
/* check page continuity */
pg = sg - > table [ start ] . addr > > PAGE_SHIFT ;
for ( ; ; ) {
start + + ;
if ( start > end )
break ;
pg + + ;
if ( ( sg - > table [ start ] . addr > > PAGE_SHIFT ) ! = pg )
return ( start < < PAGE_SHIFT ) - ofs ;
}
/* ok, all on continuous pages */
return size ;
}
EXPORT_SYMBOL ( snd_sgbuf_get_chunk_size ) ;