2005-04-16 15:20:36 -07:00
/*
* Dynamic DMA mapping support .
*
* On i386 there is no hardware dynamic DMA address translation ,
* so consistent alloc / free are merely page allocation / freeing .
* The rest of the dynamic DMA mapping interface is implemented
* in asm / pci . h .
*/
# include <linux/types.h>
# include <linux/mm.h>
# include <linux/string.h>
# include <linux/pci.h>
2005-06-23 00:08:33 -07:00
# include <linux/module.h>
2005-04-16 15:20:36 -07:00
# include <asm/io.h>
struct dma_coherent_mem {
void * virt_base ;
u32 device_base ;
int size ;
int flags ;
unsigned long * bitmap ;
} ;
void * dma_alloc_coherent ( struct device * dev , size_t size ,
2005-10-07 07:46:04 +01:00
dma_addr_t * dma_handle , gfp_t gfp )
2005-04-16 15:20:36 -07:00
{
void * ret ;
struct dma_coherent_mem * mem = dev ? dev - > dma_mem : NULL ;
int order = get_order ( size ) ;
/* ignore region specifiers */
gfp & = ~ ( __GFP_DMA | __GFP_HIGHMEM ) ;
if ( mem ) {
int page = bitmap_find_free_region ( mem - > bitmap , mem - > size ,
order ) ;
if ( page > = 0 ) {
* dma_handle = mem - > device_base + ( page < < PAGE_SHIFT ) ;
ret = mem - > virt_base + ( page < < PAGE_SHIFT ) ;
memset ( ret , 0 , size ) ;
return ret ;
}
if ( mem - > flags & DMA_MEMORY_EXCLUSIVE )
return NULL ;
}
if ( dev = = NULL | | ( dev - > coherent_dma_mask < 0xffffffff ) )
gfp | = GFP_DMA ;
ret = ( void * ) __get_free_pages ( gfp , order ) ;
if ( ret ! = NULL ) {
memset ( ret , 0 , size ) ;
* dma_handle = virt_to_phys ( ret ) ;
}
return ret ;
}
2005-06-23 00:08:33 -07:00
EXPORT_SYMBOL ( dma_alloc_coherent ) ;
2005-04-16 15:20:36 -07:00
void dma_free_coherent ( struct device * dev , size_t size ,
void * vaddr , dma_addr_t dma_handle )
{
struct dma_coherent_mem * mem = dev ? dev - > dma_mem : NULL ;
int order = get_order ( size ) ;
2007-08-10 13:10:27 -07:00
WARN_ON ( irqs_disabled ( ) ) ; /* for portability */
2005-04-16 15:20:36 -07:00
if ( mem & & vaddr > = mem - > virt_base & & vaddr < ( mem - > virt_base + ( mem - > size < < PAGE_SHIFT ) ) ) {
int page = ( vaddr - mem - > virt_base ) > > PAGE_SHIFT ;
bitmap_release_region ( mem - > bitmap , page , order ) ;
} else
free_pages ( ( unsigned long ) vaddr , order ) ;
}
2005-06-23 00:08:33 -07:00
EXPORT_SYMBOL ( dma_free_coherent ) ;
2005-04-16 15:20:36 -07:00
int dma_declare_coherent_memory ( struct device * dev , dma_addr_t bus_addr ,
dma_addr_t device_addr , size_t size , int flags )
{
2006-10-17 10:02:50 +05:30
void __iomem * mem_base = NULL ;
2005-04-16 15:20:36 -07:00
int pages = size > > PAGE_SHIFT ;
2007-05-08 00:31:25 -07:00
int bitmap_size = BITS_TO_LONGS ( pages ) * sizeof ( long ) ;
2005-04-16 15:20:36 -07:00
if ( ( flags & ( DMA_MEMORY_MAP | DMA_MEMORY_IO ) ) = = 0 )
goto out ;
if ( ! size )
goto out ;
if ( dev - > dma_mem )
goto out ;
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
mem_base = ioremap ( bus_addr , size ) ;
if ( ! mem_base )
goto out ;
2006-12-07 02:14:19 +01:00
dev - > dma_mem = kzalloc ( sizeof ( struct dma_coherent_mem ) , GFP_KERNEL ) ;
2005-04-16 15:20:36 -07:00
if ( ! dev - > dma_mem )
goto out ;
2006-12-07 02:14:19 +01:00
dev - > dma_mem - > bitmap = kzalloc ( bitmap_size , GFP_KERNEL ) ;
2005-04-16 15:20:36 -07:00
if ( ! dev - > dma_mem - > bitmap )
goto free1_out ;
dev - > dma_mem - > virt_base = mem_base ;
dev - > dma_mem - > device_base = device_addr ;
dev - > dma_mem - > size = pages ;
dev - > dma_mem - > flags = flags ;
if ( flags & DMA_MEMORY_MAP )
return DMA_MEMORY_MAP ;
return DMA_MEMORY_IO ;
free1_out :
2007-02-28 20:13:51 -08:00
kfree ( dev - > dma_mem ) ;
2005-04-16 15:20:36 -07:00
out :
2006-10-17 10:02:50 +05:30
if ( mem_base )
iounmap ( mem_base ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
EXPORT_SYMBOL ( dma_declare_coherent_memory ) ;
void dma_release_declared_memory ( struct device * dev )
{
struct dma_coherent_mem * mem = dev - > dma_mem ;
if ( ! mem )
return ;
dev - > dma_mem = NULL ;
iounmap ( mem - > virt_base ) ;
kfree ( mem - > bitmap ) ;
kfree ( mem ) ;
}
EXPORT_SYMBOL ( dma_release_declared_memory ) ;
void * dma_mark_declared_memory_occupied ( struct device * dev ,
dma_addr_t device_addr , size_t size )
{
struct dma_coherent_mem * mem = dev - > dma_mem ;
int pages = ( size + ( device_addr & ~ PAGE_MASK ) + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
int pos , err ;
if ( ! mem )
return ERR_PTR ( - EINVAL ) ;
pos = ( device_addr - mem - > device_base ) > > PAGE_SHIFT ;
err = bitmap_allocate_region ( mem - > bitmap , pos , get_order ( pages ) ) ;
if ( err ! = 0 )
return ERR_PTR ( err ) ;
return mem - > virt_base + ( pos < < PAGE_SHIFT ) ;
}
EXPORT_SYMBOL ( dma_mark_declared_memory_occupied ) ;
2007-06-20 12:23:32 +02:00
# ifdef CONFIG_PCI
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
int forbid_dac ;
EXPORT_SYMBOL ( forbid_dac ) ;
static __devinit void via_no_dac ( struct pci_dev * dev )
{
if ( ( dev - > class > > 8 ) = = PCI_CLASS_BRIDGE_PCI & & forbid_dac = = 0 ) {
printk ( KERN_INFO " PCI: VIA PCI bridge detected. Disabling DAC. \n " ) ;
forbid_dac = 1 ;
}
}
DECLARE_PCI_FIXUP_FINAL ( PCI_VENDOR_ID_VIA , PCI_ANY_ID , via_no_dac ) ;
static int check_iommu ( char * s )
{
if ( ! strcmp ( s , " usedac " ) ) {
forbid_dac = - 1 ;
return 1 ;
}
return 0 ;
}
__setup ( " iommu= " , check_iommu ) ;
# endif