2009-05-14 08:05:58 +02:00
/**
* Copyright ( C ) 2008 , Creative Technology Ltd . All Rights Reserved .
*
* This source file is released under GPL v2 license ( no other versions ) .
* See the COPYING file included in the main directory of this source
* distribution for the license terms and conditions .
*
* @ File ctvmem . c
*
* @ Brief
* This file contains the implementation of virtual memory management object
* for card device .
*
* @ Author Liu Chun
* @ Date Apr 1 2008
*/
# include "ctvmem.h"
# include <linux/slab.h>
# include <linux/mm.h>
# include <linux/io.h>
2009-06-02 15:26:19 +02:00
# include <sound/pcm.h>
2009-05-14 08:05:58 +02:00
2009-06-02 15:04:29 +02:00
# define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
# define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
2009-05-14 08:05:58 +02:00
/* *
* Find or create vm block based on requested @ size .
* @ size must be page aligned .
* */
static struct ct_vm_block *
get_vm_block ( struct ct_vm * vm , unsigned int size )
{
2009-06-08 14:57:57 +02:00
struct ct_vm_block * block = NULL , * entry ;
struct list_head * pos ;
2009-05-14 08:05:58 +02:00
2009-06-02 15:26:19 +02:00
size = CT_PAGE_ALIGN ( size ) ;
if ( size > vm - > size ) {
printk ( KERN_ERR " ctxfi: Fail! No sufficient device virtural "
" memory space available! \n " ) ;
return NULL ;
}
2009-06-02 08:40:51 +02:00
mutex_lock ( & vm - > lock ) ;
2009-05-14 08:05:58 +02:00
list_for_each ( pos , & vm - > unused ) {
entry = list_entry ( pos , struct ct_vm_block , list ) ;
if ( entry - > size > = size )
break ; /* found a block that is big enough */
}
if ( pos = = & vm - > unused )
2009-06-02 08:40:51 +02:00
goto out ;
2009-05-14 08:05:58 +02:00
if ( entry - > size = = size ) {
/* Move the vm node from unused list to used list directly */
list_del ( & entry - > list ) ;
list_add ( & entry - > list , & vm - > used ) ;
vm - > size - = size ;
2009-06-02 08:40:51 +02:00
block = entry ;
goto out ;
2009-05-14 08:05:58 +02:00
}
block = kzalloc ( sizeof ( * block ) , GFP_KERNEL ) ;
2009-07-22 17:12:34 +02:00
if ( ! block )
2009-06-02 08:40:51 +02:00
goto out ;
2009-05-14 08:05:58 +02:00
block - > addr = entry - > addr ;
block - > size = size ;
list_add ( & block - > list , & vm - > used ) ;
entry - > addr + = size ;
entry - > size - = size ;
vm - > size - = size ;
2009-06-02 08:40:51 +02:00
out :
mutex_unlock ( & vm - > lock ) ;
2009-05-14 08:05:58 +02:00
return block ;
}
static void put_vm_block ( struct ct_vm * vm , struct ct_vm_block * block )
{
2009-06-08 14:57:57 +02:00
struct ct_vm_block * entry , * pre_ent ;
struct list_head * pos , * pre ;
2009-05-14 08:05:58 +02:00
2009-06-02 15:26:19 +02:00
block - > size = CT_PAGE_ALIGN ( block - > size ) ;
2009-06-02 08:40:51 +02:00
mutex_lock ( & vm - > lock ) ;
2009-05-14 08:05:58 +02:00
list_del ( & block - > list ) ;
vm - > size + = block - > size ;
list_for_each ( pos , & vm - > unused ) {
entry = list_entry ( pos , struct ct_vm_block , list ) ;
if ( entry - > addr > = ( block - > addr + block - > size ) )
break ; /* found a position */
}
if ( pos = = & vm - > unused ) {
list_add_tail ( & block - > list , & vm - > unused ) ;
entry = block ;
} else {
if ( ( block - > addr + block - > size ) = = entry - > addr ) {
entry - > addr = block - > addr ;
entry - > size + = block - > size ;
kfree ( block ) ;
} else {
__list_add ( & block - > list , pos - > prev , pos ) ;
entry = block ;
}
}
pos = & entry - > list ;
pre = pos - > prev ;
while ( pre ! = & vm - > unused ) {
entry = list_entry ( pos , struct ct_vm_block , list ) ;
pre_ent = list_entry ( pre , struct ct_vm_block , list ) ;
if ( ( pre_ent - > addr + pre_ent - > size ) > entry - > addr )
break ;
pre_ent - > size + = entry - > size ;
list_del ( pos ) ;
kfree ( entry ) ;
pos = pre ;
pre = pos - > prev ;
}
2009-06-02 08:40:51 +02:00
mutex_unlock ( & vm - > lock ) ;
2009-05-14 08:05:58 +02:00
}
/* Map host addr (kmalloced/vmalloced) to device logical addr. */
static struct ct_vm_block *
2009-06-02 15:26:19 +02:00
ct_vm_map ( struct ct_vm * vm , struct snd_pcm_substream * substream , int size )
2009-05-14 08:05:58 +02:00
{
2009-06-02 15:26:19 +02:00
struct ct_vm_block * block ;
unsigned int pte_start ;
unsigned i , pages ;
2009-05-14 08:05:58 +02:00
unsigned long * ptp ;
2009-06-02 15:26:19 +02:00
block = get_vm_block ( vm , size ) ;
2009-05-14 08:05:58 +02:00
if ( block = = NULL ) {
2009-05-14 15:19:30 +02:00
printk ( KERN_ERR " ctxfi: No virtual memory block that is big "
2009-05-14 08:05:58 +02:00
" enough to allocate! \n " ) ;
return NULL ;
}
2009-06-02 15:26:19 +02:00
ptp = vm - > ptp [ 0 ] ;
2009-06-02 15:04:29 +02:00
pte_start = ( block - > addr > > CT_PAGE_SHIFT ) ;
2009-06-02 15:26:19 +02:00
pages = block - > size > > CT_PAGE_SHIFT ;
for ( i = 0 ; i < pages ; i + + ) {
unsigned long addr ;
addr = snd_pcm_sgbuf_get_addr ( substream , i < < CT_PAGE_SHIFT ) ;
ptp [ pte_start + i ] = addr ;
}
2009-05-14 08:05:58 +02:00
block - > size = size ;
return block ;
}
static void ct_vm_unmap ( struct ct_vm * vm , struct ct_vm_block * block )
{
/* do unmapping */
put_vm_block ( vm , block ) ;
}
/* *
* return the host ( kmalloced ) addr of the @ index - th device
* page talbe page on success , or NULL on failure .
* The first returned NULL indicates the termination .
* */
static void *
ct_get_ptp_virt ( struct ct_vm * vm , int index )
{
void * addr ;
addr = ( index > = CT_PTP_NUM ) ? NULL : vm - > ptp [ index ] ;
return addr ;
}
int ct_vm_create ( struct ct_vm * * rvm )
{
struct ct_vm * vm ;
struct ct_vm_block * block ;
int i ;
* rvm = NULL ;
vm = kzalloc ( sizeof ( * vm ) , GFP_KERNEL ) ;
2009-07-22 17:12:34 +02:00
if ( ! vm )
2009-05-14 08:05:58 +02:00
return - ENOMEM ;
2009-06-02 08:40:51 +02:00
mutex_init ( & vm - > lock ) ;
2009-05-14 08:05:58 +02:00
/* Allocate page table pages */
for ( i = 0 ; i < CT_PTP_NUM ; i + + ) {
vm - > ptp [ i ] = kmalloc ( PAGE_SIZE , GFP_KERNEL ) ;
2009-07-22 17:12:34 +02:00
if ( ! vm - > ptp [ i ] )
2009-05-14 08:05:58 +02:00
break ;
}
if ( ! i ) {
/* no page table pages are allocated */
kfree ( vm ) ;
return - ENOMEM ;
}
vm - > size = CT_ADDRS_PER_PAGE * i ;
/* Initialise remaining ptps */
for ( ; i < CT_PTP_NUM ; i + + )
vm - > ptp [ i ] = NULL ;
vm - > map = ct_vm_map ;
vm - > unmap = ct_vm_unmap ;
vm - > get_ptp_virt = ct_get_ptp_virt ;
INIT_LIST_HEAD ( & vm - > unused ) ;
INIT_LIST_HEAD ( & vm - > used ) ;
block = kzalloc ( sizeof ( * block ) , GFP_KERNEL ) ;
if ( NULL ! = block ) {
block - > addr = 0 ;
block - > size = vm - > size ;
list_add ( & block - > list , & vm - > unused ) ;
}
* rvm = vm ;
return 0 ;
}
/* The caller must ensure no mapping pages are being used
* by hardware before calling this function */
void ct_vm_destroy ( struct ct_vm * vm )
{
int i ;
2009-06-08 14:57:57 +02:00
struct list_head * pos ;
struct ct_vm_block * entry ;
2009-05-14 08:05:58 +02:00
/* free used and unused list nodes */
while ( ! list_empty ( & vm - > used ) ) {
pos = vm - > used . next ;
list_del ( pos ) ;
entry = list_entry ( pos , struct ct_vm_block , list ) ;
kfree ( entry ) ;
}
while ( ! list_empty ( & vm - > unused ) ) {
pos = vm - > unused . next ;
list_del ( pos ) ;
entry = list_entry ( pos , struct ct_vm_block , list ) ;
kfree ( entry ) ;
}
/* free allocated page table pages */
for ( i = 0 ; i < CT_PTP_NUM ; i + + )
kfree ( vm - > ptp [ i ] ) ;
vm - > size = 0 ;
kfree ( vm ) ;
}