2005-04-17 02:20:36 +04:00
/*
2007-10-15 11:50:19 +04:00
* Copyright ( c ) by Jaroslav Kysela < perex @ perex . cz >
2005-04-17 02:20:36 +04:00
* Copyright ( c ) by Takashi Iwai < tiwai @ suse . de >
*
* EMU10K1 memory page allocation ( PTB area )
*
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*
*/
# include <linux/pci.h>
# include <linux/time.h>
2006-01-16 18:34:20 +03:00
# include <linux/mutex.h>
2005-04-17 02:20:36 +04:00
# include <sound/core.h>
# include <sound/emu10k1.h>
/* page arguments of these two macros are Emu page (4096 bytes), not like
* aligned pages in others
*/
# define __set_ptb_entry(emu,page,addr) \
( ( ( u32 * ) ( emu ) - > ptb_pages . area ) [ page ] = cpu_to_le32 ( ( ( addr ) < < 1 ) | ( page ) ) )
# define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
# define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
/* get aligned page from offset address */
# define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
/* get offset address from aligned page */
# define aligned_page_offset(page) ((page) << PAGE_SHIFT)
# if PAGE_SIZE == 4096
/* page size == EMUPAGESIZE */
/* fill PTB entrie(s) corresponding to page with addr */
# define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
/* fill PTB entrie(s) corresponding to page with silence pointer */
# define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
# else
/* fill PTB entries -- we need to fill UNIT_PAGES entries */
2005-11-17 16:50:13 +03:00
static inline void set_ptb_entry ( struct snd_emu10k1 * emu , int page , dma_addr_t addr )
2005-04-17 02:20:36 +04:00
{
int i ;
page * = UNIT_PAGES ;
for ( i = 0 ; i < UNIT_PAGES ; i + + , page + + ) {
__set_ptb_entry ( emu , page , addr ) ;
addr + = EMUPAGESIZE ;
}
}
2005-11-17 16:50:13 +03:00
static inline void set_silent_ptb ( struct snd_emu10k1 * emu , int page )
2005-04-17 02:20:36 +04:00
{
int i ;
page * = UNIT_PAGES ;
for ( i = 0 ; i < UNIT_PAGES ; i + + , page + + )
/* do not increment ptr */
__set_ptb_entry ( emu , page , emu - > silent_page . addr ) ;
}
# endif /* PAGE_SIZE */
/*
*/
2005-11-17 16:50:13 +03:00
static int synth_alloc_pages ( struct snd_emu10k1 * hw , struct snd_emu10k1_memblk * blk ) ;
static int synth_free_pages ( struct snd_emu10k1 * hw , struct snd_emu10k1_memblk * blk ) ;
2005-04-17 02:20:36 +04:00
2005-11-17 16:50:13 +03:00
# define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
2005-04-17 02:20:36 +04:00
/* initialize emu10k1 part */
2005-11-17 16:50:13 +03:00
static void emu10k1_memblk_init ( struct snd_emu10k1_memblk * blk )
2005-04-17 02:20:36 +04:00
{
blk - > mapped_page = - 1 ;
INIT_LIST_HEAD ( & blk - > mapped_link ) ;
INIT_LIST_HEAD ( & blk - > mapped_order_link ) ;
blk - > map_locked = 0 ;
blk - > first_page = get_aligned_page ( blk - > mem . offset ) ;
blk - > last_page = get_aligned_page ( blk - > mem . offset + blk - > mem . size - 1 ) ;
blk - > pages = blk - > last_page - blk - > first_page + 1 ;
}
/*
* search empty region on PTB with the given size
*
* if an empty region is found , return the page and store the next mapped block
* in nextp
* if not found , return a negative error code .
*/
2005-11-17 16:50:13 +03:00
static int search_empty_map_area ( struct snd_emu10k1 * emu , int npages , struct list_head * * nextp )
2005-04-17 02:20:36 +04:00
{
int page = 0 , found_page = - ENOMEM ;
int max_size = npages ;
int size ;
struct list_head * candidate = & emu - > mapped_link_head ;
struct list_head * pos ;
list_for_each ( pos , & emu - > mapped_link_head ) {
2005-11-17 16:50:13 +03:00
struct snd_emu10k1_memblk * blk = get_emu10k1_memblk ( pos , mapped_link ) ;
2008-08-08 19:12:14 +04:00
if ( blk - > mapped_page < 0 )
continue ;
2005-04-17 02:20:36 +04:00
size = blk - > mapped_page - page ;
if ( size = = npages ) {
* nextp = pos ;
return page ;
}
else if ( size > max_size ) {
/* we look for the maximum empty hole */
max_size = size ;
candidate = pos ;
found_page = page ;
}
page = blk - > mapped_page + blk - > pages ;
}
size = MAX_ALIGN_PAGES - page ;
if ( size > = max_size ) {
* nextp = pos ;
return page ;
}
* nextp = candidate ;
return found_page ;
}
/*
* map a memory block onto emu10k1 ' s PTB
*
* call with memblk_lock held
*/
2005-11-17 16:50:13 +03:00
static int map_memblk ( struct snd_emu10k1 * emu , struct snd_emu10k1_memblk * blk )
2005-04-17 02:20:36 +04:00
{
int page , pg ;
struct list_head * next ;
page = search_empty_map_area ( emu , blk - > pages , & next ) ;
if ( page < 0 ) /* not found */
return page ;
/* insert this block in the proper position of mapped list */
list_add_tail ( & blk - > mapped_link , next ) ;
/* append this as a newest block in order list */
list_add_tail ( & blk - > mapped_order_link , & emu - > mapped_order_link_head ) ;
blk - > mapped_page = page ;
/* fill PTB */
for ( pg = blk - > first_page ; pg < = blk - > last_page ; pg + + ) {
set_ptb_entry ( emu , page , emu - > page_addr_table [ pg ] ) ;
page + + ;
}
return 0 ;
}
/*
* unmap the block
* return the size of resultant empty pages
*
* call with memblk_lock held
*/
2005-11-17 16:50:13 +03:00
static int unmap_memblk ( struct snd_emu10k1 * emu , struct snd_emu10k1_memblk * blk )
2005-04-17 02:20:36 +04:00
{
int start_page , end_page , mpage , pg ;
struct list_head * p ;
2005-11-17 16:50:13 +03:00
struct snd_emu10k1_memblk * q ;
2005-04-17 02:20:36 +04:00
/* calculate the expected size of empty region */
if ( ( p = blk - > mapped_link . prev ) ! = & emu - > mapped_link_head ) {
q = get_emu10k1_memblk ( p , mapped_link ) ;
start_page = q - > mapped_page + q - > pages ;
} else
start_page = 0 ;
if ( ( p = blk - > mapped_link . next ) ! = & emu - > mapped_link_head ) {
q = get_emu10k1_memblk ( p , mapped_link ) ;
end_page = q - > mapped_page ;
} else
end_page = MAX_ALIGN_PAGES ;
/* remove links */
list_del ( & blk - > mapped_link ) ;
list_del ( & blk - > mapped_order_link ) ;
/* clear PTB */
mpage = blk - > mapped_page ;
for ( pg = blk - > first_page ; pg < = blk - > last_page ; pg + + ) {
set_silent_ptb ( emu , mpage ) ;
mpage + + ;
}
blk - > mapped_page = - 1 ;
return end_page - start_page ; /* return the new empty size */
}
/*
* search empty pages with the given size , and create a memory block
*
* unlike synth_alloc the memory block is aligned to the page start
*/
2005-11-17 16:50:13 +03:00
static struct snd_emu10k1_memblk *
search_empty ( struct snd_emu10k1 * emu , int size )
2005-04-17 02:20:36 +04:00
{
struct list_head * p ;
2005-11-17 16:50:13 +03:00
struct snd_emu10k1_memblk * blk ;
2005-04-17 02:20:36 +04:00
int page , psize ;
psize = get_aligned_page ( size + PAGE_SIZE - 1 ) ;
page = 0 ;
list_for_each ( p , & emu - > memhdr - > block ) {
blk = get_emu10k1_memblk ( p , mem . list ) ;
if ( page + psize < = blk - > first_page )
goto __found_pages ;
page = blk - > last_page + 1 ;
}
if ( page + psize > emu - > max_cache_pages )
return NULL ;
__found_pages :
/* create a new memory block */
2005-11-17 16:50:13 +03:00
blk = ( struct snd_emu10k1_memblk * ) __snd_util_memblk_new ( emu - > memhdr , psize < < PAGE_SHIFT , p - > prev ) ;
2005-04-17 02:20:36 +04:00
if ( blk = = NULL )
return NULL ;
blk - > mem . offset = aligned_page_offset ( page ) ; /* set aligned offset */
emu10k1_memblk_init ( blk ) ;
return blk ;
}
/*
* check if the given pointer is valid for pages
*/
2005-11-17 16:50:13 +03:00
static int is_valid_page ( struct snd_emu10k1 * emu , dma_addr_t addr )
2005-04-17 02:20:36 +04:00
{
if ( addr & ~ emu - > dma_mask ) {
2005-10-20 20:26:44 +04:00
snd_printk ( KERN_ERR " max memory size is 0x%lx (addr = 0x%lx)!! \n " , emu - > dma_mask , ( unsigned long ) addr ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
if ( addr & ( EMUPAGESIZE - 1 ) ) {
2005-10-20 20:26:44 +04:00
snd_printk ( KERN_ERR " page is not aligned \n " ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
return 1 ;
}
/*
* map the given memory block on PTB .
* if the block is already mapped , update the link order .
* if no empty pages are found , tries to release unsed memory blocks
* and retry the mapping .
*/
2005-11-17 16:50:13 +03:00
int snd_emu10k1_memblk_map ( struct snd_emu10k1 * emu , struct snd_emu10k1_memblk * blk )
2005-04-17 02:20:36 +04:00
{
int err ;
int size ;
struct list_head * p , * nextp ;
2005-11-17 16:50:13 +03:00
struct snd_emu10k1_memblk * deleted ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
spin_lock_irqsave ( & emu - > memblk_lock , flags ) ;
if ( blk - > mapped_page > = 0 ) {
/* update order link */
list_del ( & blk - > mapped_order_link ) ;
list_add_tail ( & blk - > mapped_order_link , & emu - > mapped_order_link_head ) ;
spin_unlock_irqrestore ( & emu - > memblk_lock , flags ) ;
return 0 ;
}
if ( ( err = map_memblk ( emu , blk ) ) < 0 ) {
/* no enough page - try to unmap some blocks */
/* starting from the oldest block */
p = emu - > mapped_order_link_head . next ;
for ( ; p ! = & emu - > mapped_order_link_head ; p = nextp ) {
nextp = p - > next ;
deleted = get_emu10k1_memblk ( p , mapped_order_link ) ;
if ( deleted - > map_locked )
continue ;
size = unmap_memblk ( emu , deleted ) ;
if ( size > = blk - > pages ) {
/* ok the empty region is enough large */
err = map_memblk ( emu , blk ) ;
break ;
}
}
}
spin_unlock_irqrestore ( & emu - > memblk_lock , flags ) ;
return err ;
}
2006-04-28 17:13:39 +04:00
EXPORT_SYMBOL ( snd_emu10k1_memblk_map ) ;
2005-04-17 02:20:36 +04:00
/*
* page allocation for DMA
*/
2005-11-17 16:50:13 +03:00
struct snd_util_memblk *
snd_emu10k1_alloc_pages ( struct snd_emu10k1 * emu , struct snd_pcm_substream * substream )
2005-04-17 02:20:36 +04:00
{
2005-11-17 16:50:13 +03:00
struct snd_pcm_runtime * runtime = substream - > runtime ;
struct snd_util_memhdr * hdr ;
struct snd_emu10k1_memblk * blk ;
2005-04-17 02:20:36 +04:00
int page , err , idx ;
2008-08-08 19:12:14 +04:00
if ( snd_BUG_ON ( ! emu ) )
return NULL ;
if ( snd_BUG_ON ( runtime - > dma_bytes < = 0 | |
runtime - > dma_bytes > = MAXPAGES * EMUPAGESIZE ) )
return NULL ;
2005-04-17 02:20:36 +04:00
hdr = emu - > memhdr ;
2008-08-08 19:12:14 +04:00
if ( snd_BUG_ON ( ! hdr ) )
return NULL ;
2005-04-17 02:20:36 +04:00
2006-01-16 18:34:20 +03:00
mutex_lock ( & hdr - > block_mutex ) ;
2005-04-17 02:20:36 +04:00
blk = search_empty ( emu , runtime - > dma_bytes ) ;
if ( blk = = NULL ) {
2006-01-16 18:34:20 +03:00
mutex_unlock ( & hdr - > block_mutex ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
/* fill buffer addresses but pointers are not stored so that
* snd_free_pci_page ( ) is not called in in synth_free ( )
*/
idx = 0 ;
for ( page = blk - > first_page ; page < = blk - > last_page ; page + + , idx + + ) {
2008-08-21 15:00:13 +04:00
unsigned long ofs = idx < < PAGE_SHIFT ;
2005-04-17 02:20:36 +04:00
dma_addr_t addr ;
2008-08-21 15:00:13 +04:00
addr = snd_pcm_sgbuf_get_addr ( substream , ofs ) ;
2005-04-17 02:20:36 +04:00
if ( ! is_valid_page ( emu , addr ) ) {
printk ( KERN_ERR " emu: failure page = %d \n " , idx ) ;
2006-01-16 18:34:20 +03:00
mutex_unlock ( & hdr - > block_mutex ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
emu - > page_addr_table [ page ] = addr ;
emu - > page_ptr_table [ page ] = NULL ;
}
/* set PTB entries */
blk - > map_locked = 1 ; /* do not unmap this block! */
err = snd_emu10k1_memblk_map ( emu , blk ) ;
if ( err < 0 ) {
2005-11-17 16:50:13 +03:00
__snd_util_mem_free ( hdr , ( struct snd_util_memblk * ) blk ) ;
2006-01-16 18:34:20 +03:00
mutex_unlock ( & hdr - > block_mutex ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
2006-01-16 18:34:20 +03:00
mutex_unlock ( & hdr - > block_mutex ) ;
2005-11-17 16:50:13 +03:00
return ( struct snd_util_memblk * ) blk ;
2005-04-17 02:20:36 +04:00
}
/*
* release DMA buffer from page table
*/
2005-11-17 16:50:13 +03:00
int snd_emu10k1_free_pages ( struct snd_emu10k1 * emu , struct snd_util_memblk * blk )
2005-04-17 02:20:36 +04:00
{
2008-08-08 19:12:14 +04:00
if ( snd_BUG_ON ( ! emu | | ! blk ) )
return - EINVAL ;
2005-04-17 02:20:36 +04:00
return snd_emu10k1_synth_free ( emu , blk ) ;
}
/*
* memory allocation using multiple pages ( for synth )
* Unlike the DMA allocation above , non - contiguous pages are assined .
*/
/*
* allocate a synth sample area
*/
2005-11-17 16:50:13 +03:00
struct snd_util_memblk *
snd_emu10k1_synth_alloc ( struct snd_emu10k1 * hw , unsigned int size )
2005-04-17 02:20:36 +04:00
{
2005-11-17 16:50:13 +03:00
struct snd_emu10k1_memblk * blk ;
struct snd_util_memhdr * hdr = hw - > memhdr ;
2005-04-17 02:20:36 +04:00
2006-01-16 18:34:20 +03:00
mutex_lock ( & hdr - > block_mutex ) ;
2005-11-17 16:50:13 +03:00
blk = ( struct snd_emu10k1_memblk * ) __snd_util_mem_alloc ( hdr , size ) ;
2005-04-17 02:20:36 +04:00
if ( blk = = NULL ) {
2006-01-16 18:34:20 +03:00
mutex_unlock ( & hdr - > block_mutex ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
if ( synth_alloc_pages ( hw , blk ) ) {
2005-11-17 16:50:13 +03:00
__snd_util_mem_free ( hdr , ( struct snd_util_memblk * ) blk ) ;
2006-01-16 18:34:20 +03:00
mutex_unlock ( & hdr - > block_mutex ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
snd_emu10k1_memblk_map ( hw , blk ) ;
2006-01-16 18:34:20 +03:00
mutex_unlock ( & hdr - > block_mutex ) ;
2005-11-17 16:50:13 +03:00
return ( struct snd_util_memblk * ) blk ;
2005-04-17 02:20:36 +04:00
}
2006-04-28 17:13:39 +04:00
EXPORT_SYMBOL ( snd_emu10k1_synth_alloc ) ;
2005-04-17 02:20:36 +04:00
/*
* free a synth sample area
*/
int
2005-11-17 16:50:13 +03:00
snd_emu10k1_synth_free ( struct snd_emu10k1 * emu , struct snd_util_memblk * memblk )
2005-04-17 02:20:36 +04:00
{
2005-11-17 16:50:13 +03:00
struct snd_util_memhdr * hdr = emu - > memhdr ;
struct snd_emu10k1_memblk * blk = ( struct snd_emu10k1_memblk * ) memblk ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
2006-01-16 18:34:20 +03:00
mutex_lock ( & hdr - > block_mutex ) ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( & emu - > memblk_lock , flags ) ;
if ( blk - > mapped_page > = 0 )
unmap_memblk ( emu , blk ) ;
spin_unlock_irqrestore ( & emu - > memblk_lock , flags ) ;
synth_free_pages ( emu , blk ) ;
__snd_util_mem_free ( hdr , memblk ) ;
2006-01-16 18:34:20 +03:00
mutex_unlock ( & hdr - > block_mutex ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2006-04-28 17:13:39 +04:00
EXPORT_SYMBOL ( snd_emu10k1_synth_free ) ;
2005-04-17 02:20:36 +04:00
/* check new allocation range */
2005-11-17 16:50:13 +03:00
static void get_single_page_range ( struct snd_util_memhdr * hdr ,
struct snd_emu10k1_memblk * blk ,
int * first_page_ret , int * last_page_ret )
2005-04-17 02:20:36 +04:00
{
struct list_head * p ;
2005-11-17 16:50:13 +03:00
struct snd_emu10k1_memblk * q ;
2005-04-17 02:20:36 +04:00
int first_page , last_page ;
first_page = blk - > first_page ;
if ( ( p = blk - > mem . list . prev ) ! = & hdr - > block ) {
q = get_emu10k1_memblk ( p , mem . list ) ;
if ( q - > last_page = = first_page )
first_page + + ; /* first page was already allocated */
}
last_page = blk - > last_page ;
if ( ( p = blk - > mem . list . next ) ! = & hdr - > block ) {
q = get_emu10k1_memblk ( p , mem . list ) ;
if ( q - > first_page = = last_page )
last_page - - ; /* last page was already allocated */
}
* first_page_ret = first_page ;
* last_page_ret = last_page ;
}
2008-05-30 11:49:41 +04:00
/* release allocated pages */
static void __synth_free_pages ( struct snd_emu10k1 * emu , int first_page ,
int last_page )
{
int page ;
for ( page = first_page ; page < = last_page ; page + + ) {
free_page ( ( unsigned long ) emu - > page_ptr_table [ page ] ) ;
emu - > page_addr_table [ page ] = 0 ;
emu - > page_ptr_table [ page ] = NULL ;
}
}
2005-04-17 02:20:36 +04:00
/*
* allocate kernel pages
*/
2005-11-17 16:50:13 +03:00
static int synth_alloc_pages ( struct snd_emu10k1 * emu , struct snd_emu10k1_memblk * blk )
2005-04-17 02:20:36 +04:00
{
int page , first_page , last_page ;
emu10k1_memblk_init ( blk ) ;
get_single_page_range ( emu - > memhdr , blk , & first_page , & last_page ) ;
/* allocate kernel pages */
for ( page = first_page ; page < = last_page ; page + + ) {
2008-05-30 11:49:41 +04:00
/* first try to allocate from <4GB zone */
struct page * p = alloc_page ( GFP_KERNEL | GFP_DMA32 |
__GFP_NOWARN ) ;
2008-06-17 18:20:13 +04:00
if ( ! p | | ( page_to_pfn ( p ) & ~ ( emu - > dma_mask > > PAGE_SHIFT ) ) ) {
2008-06-17 18:30:27 +04:00
if ( p )
__free_page ( p ) ;
2008-05-30 11:49:41 +04:00
/* try to allocate from <16MB zone */
2008-06-17 18:30:27 +04:00
p = alloc_page ( GFP_ATOMIC | GFP_DMA |
2008-05-30 11:49:41 +04:00
__GFP_NORETRY | /* no OOM-killer */
__GFP_NOWARN ) ;
2008-06-17 18:20:13 +04:00
}
2008-05-30 11:49:41 +04:00
if ( ! p ) {
__synth_free_pages ( emu , first_page , page - 1 ) ;
return - ENOMEM ;
2005-04-17 02:20:36 +04:00
}
2008-05-30 11:49:41 +04:00
emu - > page_addr_table [ page ] = page_to_phys ( p ) ;
emu - > page_ptr_table [ page ] = page_address ( p ) ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
/*
* free pages
*/
2005-11-17 16:50:13 +03:00
static int synth_free_pages ( struct snd_emu10k1 * emu , struct snd_emu10k1_memblk * blk )
2005-04-17 02:20:36 +04:00
{
2008-05-30 11:49:41 +04:00
int first_page , last_page ;
2005-04-17 02:20:36 +04:00
get_single_page_range ( emu - > memhdr , blk , & first_page , & last_page ) ;
2008-05-30 11:49:41 +04:00
__synth_free_pages ( emu , first_page , last_page ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
/* calculate buffer pointer from offset address */
2005-11-17 16:50:13 +03:00
static inline void * offset_ptr ( struct snd_emu10k1 * emu , int page , int offset )
2005-04-17 02:20:36 +04:00
{
char * ptr ;
2008-08-08 19:12:14 +04:00
if ( snd_BUG_ON ( page < 0 | | page > = emu - > max_cache_pages ) )
return NULL ;
2005-04-17 02:20:36 +04:00
ptr = emu - > page_ptr_table [ page ] ;
if ( ! ptr ) {
2005-10-20 20:26:44 +04:00
printk ( KERN_ERR " emu10k1: access to NULL ptr: page = %d \n " , page ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
ptr + = offset & ( PAGE_SIZE - 1 ) ;
return ( void * ) ptr ;
}
/*
* bzero ( blk + offset , size )
*/
2005-11-17 16:50:13 +03:00
int snd_emu10k1_synth_bzero ( struct snd_emu10k1 * emu , struct snd_util_memblk * blk ,
int offset , int size )
2005-04-17 02:20:36 +04:00
{
int page , nextofs , end_offset , temp , temp1 ;
void * ptr ;
2005-11-17 16:50:13 +03:00
struct snd_emu10k1_memblk * p = ( struct snd_emu10k1_memblk * ) blk ;
2005-04-17 02:20:36 +04:00
offset + = blk - > offset & ( PAGE_SIZE - 1 ) ;
end_offset = offset + size ;
page = get_aligned_page ( offset ) ;
do {
nextofs = aligned_page_offset ( page + 1 ) ;
temp = nextofs - offset ;
temp1 = end_offset - offset ;
if ( temp1 < temp )
temp = temp1 ;
ptr = offset_ptr ( emu , page + p - > first_page , offset ) ;
if ( ptr )
memset ( ptr , 0 , temp ) ;
offset = nextofs ;
page + + ;
} while ( offset < end_offset ) ;
return 0 ;
}
2006-04-28 17:13:39 +04:00
EXPORT_SYMBOL ( snd_emu10k1_synth_bzero ) ;
2005-04-17 02:20:36 +04:00
/*
* copy_from_user ( blk + offset , data , size )
*/
2005-11-17 16:50:13 +03:00
int snd_emu10k1_synth_copy_from_user ( struct snd_emu10k1 * emu , struct snd_util_memblk * blk ,
int offset , const char __user * data , int size )
2005-04-17 02:20:36 +04:00
{
int page , nextofs , end_offset , temp , temp1 ;
void * ptr ;
2005-11-17 16:50:13 +03:00
struct snd_emu10k1_memblk * p = ( struct snd_emu10k1_memblk * ) blk ;
2005-04-17 02:20:36 +04:00
offset + = blk - > offset & ( PAGE_SIZE - 1 ) ;
end_offset = offset + size ;
page = get_aligned_page ( offset ) ;
do {
nextofs = aligned_page_offset ( page + 1 ) ;
temp = nextofs - offset ;
temp1 = end_offset - offset ;
if ( temp1 < temp )
temp = temp1 ;
ptr = offset_ptr ( emu , page + p - > first_page , offset ) ;
if ( ptr & & copy_from_user ( ptr , data , temp ) )
return - EFAULT ;
offset = nextofs ;
data + = temp ;
page + + ;
} while ( offset < end_offset ) ;
return 0 ;
}
2006-04-28 17:13:39 +04:00
EXPORT_SYMBOL ( snd_emu10k1_synth_copy_from_user ) ;