2008-10-17 12:14:13 -07:00
/*
* Dynamic DMA mapping support .
*/
# include <linux/types.h>
# include <linux/mm.h>
# include <linux/string.h>
# include <linux/pci.h>
# include <linux/module.h>
# include <linux/dmar.h>
# include <asm/iommu.h>
# include <asm/machvec.h>
# include <linux/dma-mapping.h>
2011-08-23 17:05:25 -07:00
# ifdef CONFIG_INTEL_IOMMU
2008-10-17 12:14:13 -07:00
# include <linux/kernel.h>
# include <asm/page.h>
dma_addr_t bad_dma_address __read_mostly ;
EXPORT_SYMBOL ( bad_dma_address ) ;
static int iommu_sac_force __read_mostly ;
int no_iommu __read_mostly ;
# ifdef CONFIG_IOMMU_DEBUG
int force_iommu __read_mostly = 1 ;
# else
int force_iommu __read_mostly ;
# endif
2009-04-30 17:57:11 -07:00
int iommu_pass_through ;
2008-10-17 12:14:13 -07:00
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask , but this is bug - to - bug compatible
to i386 . */
struct device fallback_dev = {
2009-01-06 10:44:40 -08:00
. init_name = " fallback device " ,
2009-04-06 19:01:15 -07:00
. coherent_dma_mask = DMA_BIT_MASK ( 32 ) ,
2008-10-17 12:14:13 -07:00
. dma_mask = & fallback_dev . coherent_dma_mask ,
} ;
2009-01-05 23:59:02 +09:00
extern struct dma_map_ops intel_dma_ops ;
2008-10-17 12:14:13 -07:00
static int __init pci_iommu_init ( void )
{
if ( iommu_detected )
intel_iommu_init ( ) ;
return 0 ;
}
/* Must execute after PCI subsystem */
fs_initcall ( pci_iommu_init ) ;
void pci_iommu_shutdown ( void )
{
return ;
}
void __init
iommu_dma_init ( void )
{
return ;
}
int iommu_dma_supported ( struct device * dev , u64 mask )
{
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent .
The caller just has to use GFP_DMA in this case . */
2009-04-06 19:01:18 -07:00
if ( mask < DMA_BIT_MASK ( 24 ) )
2008-10-17 12:14:13 -07:00
return 0 ;
/* Tell the device to use SAC when IOMMU force is on. This
allows the driver to use cheaper accesses in some cases .
Problem with this is that if we overflow the IOMMU area and
return DAC as fallback address the device may not handle it
correctly .
As a special case some controllers have a 39 bit address
mode that is as efficient as 32 bit ( aic79xx ) . Don ' t force
SAC for these . Assume all masks < = 40 bits are of this
type . Normally this doesn ' t make any difference , but gives
more gentle handling of IOMMU overflow . */
2009-04-06 19:01:14 -07:00
if ( iommu_sac_force & & ( mask > = DMA_BIT_MASK ( 40 ) ) ) {
2009-05-22 13:49:49 -07:00
dev_info ( dev , " Force SAC with mask %llx \n " , mask ) ;
2008-10-17 12:14:13 -07:00
return 0 ;
}
return 1 ;
}
EXPORT_SYMBOL ( iommu_dma_supported ) ;
2009-01-05 23:59:02 +09:00
void __init pci_iommu_alloc ( void )
{
dma_ops = & intel_dma_ops ;
dma_ops - > sync_single_for_cpu = machvec_dma_sync_single ;
dma_ops - > sync_sg_for_cpu = machvec_dma_sync_sg ;
dma_ops - > sync_single_for_device = machvec_dma_sync_single ;
dma_ops - > sync_sg_for_device = machvec_dma_sync_sg ;
dma_ops - > dma_supported = iommu_dma_supported ;
/*
* The order of these functions is important for
* fall - back / fail - over reasons
*/
detect_intel_iommu ( ) ;
# ifdef CONFIG_SWIOTLB
pci_swiotlb_init ( ) ;
# endif
}
2008-10-17 12:14:13 -07:00
# endif