2006-01-11 22:44:42 +01:00
/* Glue code to lib/swiotlb.c */
# include <linux/pci.h>
# include <linux/cache.h>
# include <linux/module.h>
2008-12-16 12:17:35 -08:00
# include <linux/swiotlb.h>
# include <linux/bootmem.h>
2006-09-29 01:59:48 -07:00
# include <linux/dma-mapping.h>
2008-07-11 10:23:42 +09:00
# include <asm/iommu.h>
2006-01-11 22:44:42 +01:00
# include <asm/swiotlb.h>
# include <asm/dma.h>
int swiotlb __read_mostly ;
2008-12-30 20:18:00 -08:00
void * __init swiotlb_alloc_boot ( size_t size , unsigned long nslabs )
2008-12-16 12:17:35 -08:00
{
return alloc_bootmem_low_pages ( size ) ;
}
void * swiotlb_alloc ( unsigned order , unsigned long nslabs )
{
return ( void * ) __get_free_pages ( GFP_DMA | __GFP_NOWARN , order ) ;
}
2008-12-22 10:26:05 -08:00
dma_addr_t swiotlb_phys_to_bus ( struct device * hwdev , phys_addr_t paddr )
2008-12-16 12:17:37 -08:00
{
return paddr ;
}
2009-04-08 09:09:21 -05:00
phys_addr_t swiotlb_bus_to_phys ( struct device * hwdev , dma_addr_t baddr )
2008-12-16 12:17:37 -08:00
{
return baddr ;
}
2009-01-09 18:32:09 +00:00
int __weak swiotlb_arch_range_needs_mapping ( phys_addr_t paddr , size_t size )
2008-12-16 12:17:38 -08:00
{
return 0 ;
}
2008-10-23 23:14:29 +09:00
static void * x86_swiotlb_alloc_coherent ( struct device * hwdev , size_t size ,
dma_addr_t * dma_handle , gfp_t flags )
{
void * vaddr ;
vaddr = dma_generic_alloc_coherent ( hwdev , size , dma_handle , flags ) ;
if ( vaddr )
return vaddr ;
return swiotlb_alloc_coherent ( hwdev , size , dma_handle , flags ) ;
}
2009-04-12 23:24:21 +05:30
static struct dma_map_ops swiotlb_dma_ops = {
2006-01-11 22:44:42 +01:00
. mapping_error = swiotlb_dma_mapping_error ,
2008-10-23 23:14:29 +09:00
. alloc_coherent = x86_swiotlb_alloc_coherent ,
2006-01-11 22:44:42 +01:00
. free_coherent = swiotlb_free_coherent ,
. sync_single_for_cpu = swiotlb_sync_single_for_cpu ,
. sync_single_for_device = swiotlb_sync_single_for_device ,
. sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu ,
. sync_single_range_for_device = swiotlb_sync_single_range_for_device ,
. sync_sg_for_cpu = swiotlb_sync_sg_for_cpu ,
. sync_sg_for_device = swiotlb_sync_sg_for_device ,
2009-01-05 23:59:02 +09:00
. map_sg = swiotlb_map_sg_attrs ,
. unmap_sg = swiotlb_unmap_sg_attrs ,
2009-01-05 23:47:22 +09:00
. map_page = swiotlb_map_page ,
. unmap_page = swiotlb_unmap_page ,
2006-01-11 22:44:42 +01:00
. dma_supported = NULL ,
} ;
2007-02-05 18:51:25 -08:00
void __init pci_swiotlb_init ( void )
2006-01-11 22:44:42 +01:00
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
2008-12-16 12:17:36 -08:00
# ifdef CONFIG_X86_64
2009-04-24 17:30:20 -07:00
if ( ( ! iommu_detected & & ! no_iommu & & max_pfn > MAX_DMA32_PFN ) | |
iommu_pass_through )
2006-01-11 22:44:42 +01:00
swiotlb = 1 ;
2008-12-16 12:17:36 -08:00
# endif
2006-07-29 21:42:49 +02:00
if ( swiotlb_force )
swiotlb = 1 ;
2006-01-11 22:44:42 +01:00
if ( swiotlb ) {
printk ( KERN_INFO " PCI-DMA: Using software bounce buffering for IO (SWIOTLB) \n " ) ;
2006-02-03 21:51:59 +01:00
swiotlb_init ( ) ;
2006-01-11 22:44:42 +01:00
dma_ops = & swiotlb_dma_ops ;
}
}