2008-04-08 20:20:43 +04:00
# include <linux/dma-mapping.h>
2008-04-08 20:20:51 +04:00
# include <linux/dmar.h>
2008-04-08 20:20:54 +04:00
# include <linux/bootmem.h>
2008-04-08 20:20:53 +04:00
# include <linux/pci.h>
2008-04-08 20:20:51 +04:00
2008-04-08 20:20:54 +04:00
# include <asm/proto.h>
# include <asm/dma.h>
2008-07-11 05:23:42 +04:00
# include <asm/iommu.h>
2008-04-08 20:20:51 +04:00
# include <asm/calgary.h>
2008-06-26 23:28:08 +04:00
# include <asm/amd_iommu.h>
2008-04-08 20:20:43 +04:00
2008-10-24 03:51:00 +04:00
static int forbid_dac __read_mostly ;
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * dma_ops ;
2008-04-08 20:20:50 +04:00
EXPORT_SYMBOL ( dma_ops ) ;
2008-04-28 03:15:58 +04:00
static int iommu_sac_force __read_mostly ;
2008-04-08 20:20:55 +04:00
2008-04-08 20:20:52 +04:00
# ifdef CONFIG_IOMMU_DEBUG
int panic_on_overflow __read_mostly = 1 ;
int force_iommu __read_mostly = 1 ;
# else
int panic_on_overflow __read_mostly = 0 ;
int force_iommu __read_mostly = 0 ;
# endif
2008-04-08 20:20:56 +04:00
int iommu_merge __read_mostly = 0 ;
int no_iommu __read_mostly ;
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly = 0 ;
/* This tells the BIO block layer to assume merging. Default to off
because we cannot guarantee merging later . */
int iommu_bio_merge __read_mostly = 0 ;
EXPORT_SYMBOL ( iommu_bio_merge ) ;
2008-04-08 20:21:00 +04:00
dma_addr_t bad_dma_address __read_mostly = 0 ;
EXPORT_SYMBOL ( bad_dma_address ) ;
2008-04-08 20:20:56 +04:00
2008-04-09 20:18:10 +04:00
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask , but this is bug - to - bug compatible
to older i386 . */
2008-08-19 18:32:45 +04:00
struct device x86_dma_fallback_dev = {
2008-04-09 20:18:10 +04:00
. bus_id = " fallback device " ,
. coherent_dma_mask = DMA_32BIT_MASK ,
2008-08-19 18:32:45 +04:00
. dma_mask = & x86_dma_fallback_dev . coherent_dma_mask ,
2008-04-09 20:18:10 +04:00
} ;
2008-08-19 18:32:45 +04:00
EXPORT_SYMBOL ( x86_dma_fallback_dev ) ;
2008-04-09 20:18:10 +04:00
2008-04-08 20:20:43 +04:00
int dma_set_mask ( struct device * dev , u64 mask )
{
if ( ! dev - > dma_mask | | ! dma_supported ( dev , mask ) )
return - EIO ;
* dev - > dma_mask = mask ;
return 0 ;
}
EXPORT_SYMBOL ( dma_set_mask ) ;
2008-04-08 20:20:54 +04:00
# ifdef CONFIG_X86_64
static __initdata void * dma32_bootmem_ptr ;
static unsigned long dma32_bootmem_size __initdata = ( 128ULL < < 20 ) ;
static int __init parse_dma32_size_opt ( char * p )
{
if ( ! p )
return - EINVAL ;
dma32_bootmem_size = memparse ( p , & p ) ;
return 0 ;
}
early_param ( " dma32_size " , parse_dma32_size_opt ) ;
void __init dma32_reserve_bootmem ( void )
{
unsigned long size , align ;
2008-06-25 09:14:09 +04:00
if ( max_pfn < = MAX_DMA32_PFN )
2008-04-08 20:20:54 +04:00
return ;
2008-04-15 07:40:37 +04:00
/*
* check aperture_64 . c allocate_aperture ( ) for reason about
* using 512 M as goal
*/
2008-04-08 20:20:54 +04:00
align = 64ULL < < 20 ;
2008-07-25 18:48:55 +04:00
size = roundup ( dma32_bootmem_size , align ) ;
2008-04-08 20:20:54 +04:00
dma32_bootmem_ptr = __alloc_bootmem_nopanic ( size , align ,
2008-04-15 07:40:37 +04:00
512ULL < < 20 ) ;
2008-04-08 20:20:54 +04:00
if ( dma32_bootmem_ptr )
dma32_bootmem_size = size ;
else
dma32_bootmem_size = 0 ;
}
static void __init dma32_free_bootmem ( void )
{
2008-06-25 09:14:09 +04:00
if ( max_pfn < = MAX_DMA32_PFN )
2008-04-08 20:20:54 +04:00
return ;
if ( ! dma32_bootmem_ptr )
return ;
2008-04-19 12:31:45 +04:00
free_bootmem ( __pa ( dma32_bootmem_ptr ) , dma32_bootmem_size ) ;
2008-04-08 20:20:54 +04:00
dma32_bootmem_ptr = NULL ;
dma32_bootmem_size = 0 ;
}
void __init pci_iommu_alloc ( void )
{
/* free the range so iommu could get some range less than 4G */
dma32_free_bootmem ( ) ;
/*
* The order of these functions is important for
* fall - back / fail - over reasons
*/
gart_iommu_hole_init ( ) ;
detect_calgary ( ) ;
detect_intel_iommu ( ) ;
2008-06-26 23:28:08 +04:00
amd_iommu_detect ( ) ;
2008-04-08 20:20:54 +04:00
pci_swiotlb_init ( ) ;
}
2008-07-29 08:38:53 +04:00
2008-10-16 09:02:07 +04:00
unsigned long iommu_nr_pages ( unsigned long addr , unsigned long len )
2008-07-29 08:38:53 +04:00
{
unsigned long size = roundup ( ( addr & ~ PAGE_MASK ) + len , PAGE_SIZE ) ;
return size > > PAGE_SHIFT ;
}
2008-10-16 09:02:07 +04:00
EXPORT_SYMBOL ( iommu_nr_pages ) ;
2008-04-08 20:20:54 +04:00
# endif
2008-09-24 15:48:35 +04:00
void * dma_generic_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_addr , gfp_t flag )
{
unsigned long dma_mask ;
struct page * page ;
dma_addr_t addr ;
dma_mask = dma_alloc_coherent_mask ( dev , flag ) ;
flag | = __GFP_ZERO ;
again :
page = alloc_pages_node ( dev_to_node ( dev ) , flag , get_order ( size ) ) ;
if ( ! page )
return NULL ;
addr = page_to_phys ( page ) ;
if ( ! is_buffer_dma_capable ( dma_mask , addr , size ) ) {
__free_pages ( page , get_order ( size ) ) ;
if ( dma_mask < DMA_32BIT_MASK & & ! ( flag & GFP_DMA ) ) {
flag = ( flag & ~ GFP_DMA32 ) | GFP_DMA ;
goto again ;
}
return NULL ;
}
* dma_addr = addr ;
return page_address ( page ) ;
}
2008-04-08 20:20:56 +04:00
/*
* See < Documentation / x86_64 / boot - options . txt > for the iommu kernel parameter
* documentation .
*/
static __init int iommu_setup ( char * p )
{
iommu_merge = 1 ;
if ( ! p )
return - EINVAL ;
while ( * p ) {
if ( ! strncmp ( p , " off " , 3 ) )
no_iommu = 1 ;
/* gart_parse_options has more force support */
if ( ! strncmp ( p , " force " , 5 ) )
force_iommu = 1 ;
if ( ! strncmp ( p , " noforce " , 7 ) ) {
iommu_merge = 0 ;
force_iommu = 0 ;
}
if ( ! strncmp ( p , " biomerge " , 8 ) ) {
iommu_bio_merge = 4096 ;
iommu_merge = 1 ;
force_iommu = 1 ;
}
if ( ! strncmp ( p , " panic " , 5 ) )
panic_on_overflow = 1 ;
if ( ! strncmp ( p , " nopanic " , 7 ) )
panic_on_overflow = 0 ;
if ( ! strncmp ( p , " merge " , 5 ) ) {
iommu_merge = 1 ;
force_iommu = 1 ;
}
if ( ! strncmp ( p , " nomerge " , 7 ) )
iommu_merge = 0 ;
if ( ! strncmp ( p , " forcesac " , 8 ) )
iommu_sac_force = 1 ;
if ( ! strncmp ( p , " allowdac " , 8 ) )
forbid_dac = 0 ;
if ( ! strncmp ( p , " nodac " , 5 ) )
forbid_dac = - 1 ;
if ( ! strncmp ( p , " usedac " , 6 ) ) {
forbid_dac = - 1 ;
return 1 ;
}
# ifdef CONFIG_SWIOTLB
if ( ! strncmp ( p , " soft " , 4 ) )
swiotlb = 1 ;
# endif
gart_parse_options ( p ) ;
# ifdef CONFIG_CALGARY_IOMMU
if ( ! strncmp ( p , " calgary " , 7 ) )
use_calgary = 1 ;
# endif /* CONFIG_CALGARY_IOMMU */
p + = strcspn ( p , " , " ) ;
if ( * p = = ' , ' )
+ + p ;
}
return 0 ;
}
early_param ( " iommu " , iommu_setup ) ;
2008-04-08 20:20:55 +04:00
int dma_supported ( struct device * dev , u64 mask )
{
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * ops = get_dma_ops ( dev ) ;
2008-04-08 20:20:55 +04:00
# ifdef CONFIG_PCI
if ( mask > 0xffffffff & & forbid_dac > 0 ) {
2008-05-02 08:02:41 +04:00
dev_info ( dev , " PCI: Disallowing DAC for device \n " ) ;
2008-04-08 20:20:55 +04:00
return 0 ;
}
# endif
2008-07-26 06:44:49 +04:00
if ( ops - > dma_supported )
return ops - > dma_supported ( dev , mask ) ;
2008-04-08 20:20:55 +04:00
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent .
The caller just has to use GFP_DMA in this case . */
if ( mask < DMA_24BIT_MASK )
return 0 ;
/* Tell the device to use SAC when IOMMU force is on. This
allows the driver to use cheaper accesses in some cases .
Problem with this is that if we overflow the IOMMU area and
return DAC as fallback address the device may not handle it
correctly .
As a special case some controllers have a 39 bit address
mode that is as efficient as 32 bit ( aic79xx ) . Don ' t force
SAC for these . Assume all masks < = 40 bits are of this
type . Normally this doesn ' t make any difference , but gives
more gentle handling of IOMMU overflow . */
if ( iommu_sac_force & & ( mask > = DMA_40BIT_MASK ) ) {
2008-05-02 08:02:41 +04:00
dev_info ( dev , " Force SAC with mask %Lx \n " , mask ) ;
2008-04-08 20:20:55 +04:00
return 0 ;
}
return 1 ;
}
EXPORT_SYMBOL ( dma_supported ) ;
2008-04-08 20:20:51 +04:00
static int __init pci_iommu_init ( void )
{
calgary_iommu_init ( ) ;
intel_iommu_init ( ) ;
2008-06-26 23:28:08 +04:00
amd_iommu_init ( ) ;
2008-04-08 20:20:51 +04:00
gart_iommu_init ( ) ;
2008-04-08 20:20:43 +04:00
2008-04-08 20:20:51 +04:00
no_iommu_init ( ) ;
return 0 ;
}
void pci_iommu_shutdown ( void )
{
gart_iommu_shutdown ( ) ;
}
/* Must execute after PCI subsystem */
fs_initcall ( pci_iommu_init ) ;
2008-10-24 03:51:00 +04:00
# ifdef CONFIG_PCI
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
static __devinit void via_no_dac ( struct pci_dev * dev )
{
if ( ( dev - > class > > 8 ) = = PCI_CLASS_BRIDGE_PCI & & forbid_dac = = 0 ) {
2008-12-05 15:47:29 +03:00
printk ( KERN_INFO
" PCI: VIA PCI bridge detected. Disabling DAC. \n " ) ;
2008-10-24 03:51:00 +04:00
forbid_dac = 1 ;
}
}
DECLARE_PCI_FIXUP_FINAL ( PCI_VENDOR_ID_VIA , PCI_ANY_ID , via_no_dac ) ;
# endif