2019-05-27 08:55:05 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-04-16 15:20:36 -07:00
/*
* Scatter - Gather buffer
*
* Copyright ( c ) by Takashi Iwai < tiwai @ suse . de >
*/
# include <linux/slab.h>
# include <linux/mm.h>
# include <linux/vmalloc.h>
2012-09-20 20:29:12 -07:00
# include <linux/export.h>
2005-04-16 15:20:36 -07:00
# include <sound/memalloc.h>
2021-06-09 18:25:49 +02:00
# include "memalloc_local.h"
struct snd_sg_page {
void * buf ;
dma_addr_t addr ;
} ;
struct snd_sg_buf {
int size ; /* allocated byte size */
int pages ; /* allocated pages */
int tblsize ; /* allocated table size */
struct snd_sg_page * table ; /* address table */
struct page * * page_table ; /* page table (for vmap/vunmap) */
struct device * dev ;
} ;
2005-04-16 15:20:36 -07:00
/* table entries are align to 32 */
# define SGBUF_TBL_ALIGN 32
2006-10-09 08:13:32 +02:00
# define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
2005-04-16 15:20:36 -07:00
2021-06-09 18:25:49 +02:00
static void snd_dma_sg_free ( struct snd_dma_buffer * dmab )
2005-04-16 15:20:36 -07:00
{
struct snd_sg_buf * sgbuf = dmab - > private_data ;
struct snd_dma_buffer tmpb ;
int i ;
2021-06-09 18:25:49 +02:00
if ( ! sgbuf )
return ;
2005-04-16 15:20:36 -07:00
2014-11-21 18:34:48 +01:00
vunmap ( dmab - > area ) ;
2009-03-17 14:00:06 +01:00
dmab - > area = NULL ;
2005-04-16 15:20:36 -07:00
tmpb . dev . type = SNDRV_DMA_TYPE_DEV ;
2021-08-02 09:28:02 +02:00
if ( dmab - > dev . type = = SNDRV_DMA_TYPE_DEV_WC_SG )
tmpb . dev . type = SNDRV_DMA_TYPE_DEV_WC ;
2005-04-16 15:20:36 -07:00
tmpb . dev . dev = sgbuf - > dev ;
for ( i = 0 ; i < sgbuf - > pages ; i + + ) {
2008-07-30 15:13:33 +02:00
if ( ! ( sgbuf - > table [ i ] . addr & ~ PAGE_MASK ) )
continue ; /* continuous pages */
2005-04-16 15:20:36 -07:00
tmpb . area = sgbuf - > table [ i ] . buf ;
2008-07-30 15:13:33 +02:00
tmpb . addr = sgbuf - > table [ i ] . addr & PAGE_MASK ;
tmpb . bytes = ( sgbuf - > table [ i ] . addr & ~ PAGE_MASK ) < < PAGE_SHIFT ;
2005-04-16 15:20:36 -07:00
snd_dma_free_pages ( & tmpb ) ;
}
kfree ( sgbuf - > table ) ;
kfree ( sgbuf - > page_table ) ;
kfree ( sgbuf ) ;
dmab - > private_data = NULL ;
}
2008-07-30 15:13:33 +02:00
# define MAX_ALLOC_PAGES 32
2021-08-02 09:28:01 +02:00
static void * snd_dma_sg_alloc ( struct snd_dma_buffer * dmab , size_t size )
2005-04-16 15:20:36 -07:00
{
struct snd_sg_buf * sgbuf ;
2008-07-30 15:13:33 +02:00
unsigned int i , pages , chunk , maxpages ;
2005-04-16 15:20:36 -07:00
struct snd_dma_buffer tmpb ;
2008-07-30 15:13:33 +02:00
struct snd_sg_page * table ;
struct page * * pgtable ;
2018-08-08 17:01:00 +02:00
int type = SNDRV_DMA_TYPE_DEV ;
pgprot_t prot = PAGE_KERNEL ;
2021-08-02 09:28:01 +02:00
void * area ;
2005-04-16 15:20:36 -07:00
2006-07-25 15:28:03 +02:00
dmab - > private_data = sgbuf = kzalloc ( sizeof ( * sgbuf ) , GFP_KERNEL ) ;
2021-06-09 18:25:49 +02:00
if ( ! sgbuf )
2021-08-02 09:28:01 +02:00
return NULL ;
2021-08-02 09:28:02 +02:00
if ( dmab - > dev . type = = SNDRV_DMA_TYPE_DEV_WC_SG ) {
type = SNDRV_DMA_TYPE_DEV_WC ;
2018-08-08 17:01:00 +02:00
# ifdef pgprot_noncached
prot = pgprot_noncached ( PAGE_KERNEL ) ;
# endif
}
2021-06-09 18:25:49 +02:00
sgbuf - > dev = dmab - > dev . dev ;
2005-04-16 15:20:36 -07:00
pages = snd_sgbuf_aligned_pages ( size ) ;
sgbuf - > tblsize = sgbuf_align_table ( pages ) ;
2008-07-30 15:13:33 +02:00
table = kcalloc ( sgbuf - > tblsize , sizeof ( * table ) , GFP_KERNEL ) ;
if ( ! table )
2005-04-16 15:20:36 -07:00
goto _failed ;
2008-07-30 15:13:33 +02:00
sgbuf - > table = table ;
pgtable = kcalloc ( sgbuf - > tblsize , sizeof ( * pgtable ) , GFP_KERNEL ) ;
if ( ! pgtable )
2005-04-16 15:20:36 -07:00
goto _failed ;
2008-07-30 15:13:33 +02:00
sgbuf - > page_table = pgtable ;
2005-04-16 15:20:36 -07:00
2008-07-30 15:13:33 +02:00
/* allocate pages */
maxpages = MAX_ALLOC_PAGES ;
while ( pages > 0 ) {
chunk = pages ;
/* don't be too eager to take a huge chunk */
if ( chunk > maxpages )
chunk = maxpages ;
chunk < < = PAGE_SHIFT ;
2021-06-09 18:25:49 +02:00
if ( snd_dma_alloc_pages_fallback ( type , dmab - > dev . dev ,
2008-07-30 15:13:33 +02:00
chunk , & tmpb ) < 0 ) {
if ( ! sgbuf - > pages )
2012-08-03 12:48:32 +02:00
goto _failed ;
2008-07-30 15:13:33 +02:00
size = sgbuf - > pages * PAGE_SIZE ;
2005-04-16 15:20:36 -07:00
break ;
}
2008-07-30 15:13:33 +02:00
chunk = tmpb . bytes > > PAGE_SHIFT ;
for ( i = 0 ; i < chunk ; i + + ) {
table - > buf = tmpb . area ;
table - > addr = tmpb . addr ;
if ( ! i )
table - > addr | = chunk ; /* mark head */
table + + ;
* pgtable + + = virt_to_page ( tmpb . area ) ;
tmpb . area + = PAGE_SIZE ;
tmpb . addr + = PAGE_SIZE ;
}
sgbuf - > pages + = chunk ;
pages - = chunk ;
if ( chunk < maxpages )
maxpages = chunk ;
2005-04-16 15:20:36 -07:00
}
sgbuf - > size = size ;
2021-08-02 09:28:01 +02:00
area = vmap ( sgbuf - > page_table , sgbuf - > pages , VM_MAP , prot ) ;
if ( ! area )
2005-04-16 15:20:36 -07:00
goto _failed ;
2021-08-02 09:28:01 +02:00
return area ;
2005-04-16 15:20:36 -07:00
_failed :
2021-06-09 18:25:49 +02:00
snd_dma_sg_free ( dmab ) ; /* free the table */
2021-08-02 09:28:01 +02:00
return NULL ;
2005-04-16 15:20:36 -07:00
}
2012-09-20 20:29:12 -07:00
2021-06-09 18:25:49 +02:00
static dma_addr_t snd_dma_sg_get_addr ( struct snd_dma_buffer * dmab ,
size_t offset )
{
struct snd_sg_buf * sgbuf = dmab - > private_data ;
dma_addr_t addr ;
addr = sgbuf - > table [ offset > > PAGE_SHIFT ] . addr ;
addr & = ~ ( ( dma_addr_t ) PAGE_SIZE - 1 ) ;
return addr + offset % PAGE_SIZE ;
}
static struct page * snd_dma_sg_get_page ( struct snd_dma_buffer * dmab ,
size_t offset )
{
struct snd_sg_buf * sgbuf = dmab - > private_data ;
unsigned int idx = offset > > PAGE_SHIFT ;
if ( idx > = ( unsigned int ) sgbuf - > pages )
return NULL ;
return sgbuf - > page_table [ idx ] ;
}
static unsigned int snd_dma_sg_get_chunk_size ( struct snd_dma_buffer * dmab ,
unsigned int ofs ,
unsigned int size )
2012-09-20 20:29:12 -07:00
{
struct snd_sg_buf * sg = dmab - > private_data ;
unsigned int start , end , pg ;
start = ofs > > PAGE_SHIFT ;
end = ( ofs + size - 1 ) > > PAGE_SHIFT ;
/* check page continuity */
pg = sg - > table [ start ] . addr > > PAGE_SHIFT ;
for ( ; ; ) {
start + + ;
if ( start > end )
break ;
pg + + ;
if ( ( sg - > table [ start ] . addr > > PAGE_SHIFT ) ! = pg )
return ( start < < PAGE_SHIFT ) - ofs ;
}
/* ok, all on continuous pages */
return size ;
}
2021-06-09 18:25:49 +02:00
2021-08-08 10:00:34 +02:00
static int snd_dma_sg_mmap ( struct snd_dma_buffer * dmab ,
struct vm_area_struct * area )
{
if ( dmab - > dev . type = = SNDRV_DMA_TYPE_DEV_WC_SG )
area - > vm_page_prot = pgprot_writecombine ( area - > vm_page_prot ) ;
return - ENOENT ; /* continue with the default mmap handler */
}
2021-06-09 18:25:49 +02:00
const struct snd_malloc_ops snd_dma_sg_ops = {
. alloc = snd_dma_sg_alloc ,
. free = snd_dma_sg_free ,
. get_addr = snd_dma_sg_get_addr ,
. get_page = snd_dma_sg_get_page ,
. get_chunk_size = snd_dma_sg_get_chunk_size ,
2021-08-08 10:00:34 +02:00
. mmap = snd_dma_sg_mmap ,
2021-06-09 18:25:49 +02:00
} ;