2008-04-08 20:20:43 +04:00
# include <linux/dma-mapping.h>
2009-01-09 17:13:15 +03:00
# include <linux/dma-debug.h>
2008-04-08 20:20:51 +04:00
# include <linux/dmar.h>
2008-04-08 20:20:54 +04:00
# include <linux/bootmem.h>
2008-04-08 20:20:53 +04:00
# include <linux/pci.h>
2009-08-27 17:29:20 +04:00
# include <linux/kmemleak.h>
2008-04-08 20:20:51 +04:00
2008-04-08 20:20:54 +04:00
# include <asm/proto.h>
# include <asm/dma.h>
2008-07-11 05:23:42 +04:00
# include <asm/iommu.h>
2008-11-27 20:39:15 +03:00
# include <asm/gart.h>
2008-04-08 20:20:51 +04:00
# include <asm/calgary.h>
2008-06-26 23:28:08 +04:00
# include <asm/amd_iommu.h>
2008-04-08 20:20:43 +04:00
2008-10-24 03:51:00 +04:00
static int forbid_dac __read_mostly ;
2009-01-05 17:59:02 +03:00
struct dma_map_ops * dma_ops ;
2008-04-08 20:20:50 +04:00
EXPORT_SYMBOL ( dma_ops ) ;
2008-04-28 03:15:58 +04:00
static int iommu_sac_force __read_mostly ;
2008-04-08 20:20:55 +04:00
2008-04-08 20:20:52 +04:00
# ifdef CONFIG_IOMMU_DEBUG
int panic_on_overflow __read_mostly = 1 ;
int force_iommu __read_mostly = 1 ;
# else
int panic_on_overflow __read_mostly = 0 ;
int force_iommu __read_mostly = 0 ;
# endif
2008-04-08 20:20:56 +04:00
int iommu_merge __read_mostly = 0 ;
int no_iommu __read_mostly ;
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly = 0 ;
2009-09-01 18:00:35 +04:00
/*
* This variable becomes 1 if iommu = pt is passed on the kernel command line .
2009-10-03 21:45:02 +04:00
* If this variable is 1 , IOMMU implementations do no DMA translation for
2009-09-01 18:00:35 +04:00
* devices and allow every device to access to whole physical memory . This is
* useful if a user want to use an IOMMU only for KVM device assignment to
* guests and not for driver dma translation .
*/
int iommu_pass_through __read_mostly ;
2009-05-01 04:57:11 +04:00
2008-04-08 20:21:00 +04:00
dma_addr_t bad_dma_address __read_mostly = 0 ;
EXPORT_SYMBOL ( bad_dma_address ) ;
2008-04-08 20:20:56 +04:00
2008-04-09 20:18:10 +04:00
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask , but this is bug - to - bug compatible
to older i386 . */
2008-08-19 18:32:45 +04:00
struct device x86_dma_fallback_dev = {
2008-10-30 04:17:49 +03:00
. init_name = " fallback device " ,
2009-04-07 06:01:15 +04:00
. coherent_dma_mask = DMA_BIT_MASK ( 32 ) ,
2008-08-19 18:32:45 +04:00
. dma_mask = & x86_dma_fallback_dev . coherent_dma_mask ,
2008-04-09 20:18:10 +04:00
} ;
2008-08-19 18:32:45 +04:00
EXPORT_SYMBOL ( x86_dma_fallback_dev ) ;
2008-04-09 20:18:10 +04:00
2009-01-09 17:13:15 +03:00
/* Number of entries preallocated for DMA-API debugging */
# define PREALLOC_DMA_DEBUG_ENTRIES 32768
2008-04-08 20:20:43 +04:00
int dma_set_mask ( struct device * dev , u64 mask )
{
if ( ! dev - > dma_mask | | ! dma_supported ( dev , mask ) )
return - EIO ;
* dev - > dma_mask = mask ;
return 0 ;
}
EXPORT_SYMBOL ( dma_set_mask ) ;
2008-04-08 20:20:54 +04:00
# ifdef CONFIG_X86_64
static __initdata void * dma32_bootmem_ptr ;
static unsigned long dma32_bootmem_size __initdata = ( 128ULL < < 20 ) ;
static int __init parse_dma32_size_opt ( char * p )
{
if ( ! p )
return - EINVAL ;
dma32_bootmem_size = memparse ( p , & p ) ;
return 0 ;
}
early_param ( " dma32_size " , parse_dma32_size_opt ) ;
void __init dma32_reserve_bootmem ( void )
{
unsigned long size , align ;
2008-06-25 09:14:09 +04:00
if ( max_pfn < = MAX_DMA32_PFN )
2008-04-08 20:20:54 +04:00
return ;
2008-04-15 07:40:37 +04:00
/*
* check aperture_64 . c allocate_aperture ( ) for reason about
* using 512 M as goal
*/
2008-04-08 20:20:54 +04:00
align = 64ULL < < 20 ;
2008-07-25 18:48:55 +04:00
size = roundup ( dma32_bootmem_size , align ) ;
2008-04-08 20:20:54 +04:00
dma32_bootmem_ptr = __alloc_bootmem_nopanic ( size , align ,
2008-04-15 07:40:37 +04:00
512ULL < < 20 ) ;
2009-08-27 17:29:20 +04:00
/*
* Kmemleak should not scan this block as it may not be mapped via the
* kernel direct mapping .
*/
kmemleak_ignore ( dma32_bootmem_ptr ) ;
2008-04-08 20:20:54 +04:00
if ( dma32_bootmem_ptr )
dma32_bootmem_size = size ;
else
dma32_bootmem_size = 0 ;
}
static void __init dma32_free_bootmem ( void )
{
2008-06-25 09:14:09 +04:00
if ( max_pfn < = MAX_DMA32_PFN )
2008-04-08 20:20:54 +04:00
return ;
if ( ! dma32_bootmem_ptr )
return ;
2008-04-19 12:31:45 +04:00
free_bootmem ( __pa ( dma32_bootmem_ptr ) , dma32_bootmem_size ) ;
2008-04-08 20:20:54 +04:00
dma32_bootmem_ptr = NULL ;
dma32_bootmem_size = 0 ;
}
2008-12-16 23:17:36 +03:00
# endif
2008-04-08 20:20:54 +04:00
void __init pci_iommu_alloc ( void )
{
2008-12-16 23:17:36 +03:00
# ifdef CONFIG_X86_64
2008-04-08 20:20:54 +04:00
/* free the range so iommu could get some range less than 4G */
dma32_free_bootmem ( ) ;
2008-12-16 23:17:36 +03:00
# endif
2008-04-08 20:20:54 +04:00
/*
* The order of these functions is important for
* fall - back / fail - over reasons
*/
gart_iommu_hole_init ( ) ;
detect_calgary ( ) ;
detect_intel_iommu ( ) ;
2008-06-26 23:28:08 +04:00
amd_iommu_detect ( ) ;
2008-04-08 20:20:54 +04:00
pci_swiotlb_init ( ) ;
}
2008-07-29 08:38:53 +04:00
2008-09-24 15:48:35 +04:00
void * dma_generic_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_addr , gfp_t flag )
{
unsigned long dma_mask ;
struct page * page ;
dma_addr_t addr ;
dma_mask = dma_alloc_coherent_mask ( dev , flag ) ;
flag | = __GFP_ZERO ;
again :
page = alloc_pages_node ( dev_to_node ( dev ) , flag , get_order ( size ) ) ;
if ( ! page )
return NULL ;
addr = page_to_phys ( page ) ;
2009-07-10 05:04:55 +04:00
if ( addr + size > dma_mask ) {
2008-09-24 15:48:35 +04:00
__free_pages ( page , get_order ( size ) ) ;
2009-04-07 06:01:15 +04:00
if ( dma_mask < DMA_BIT_MASK ( 32 ) & & ! ( flag & GFP_DMA ) ) {
2008-09-24 15:48:35 +04:00
flag = ( flag & ~ GFP_DMA32 ) | GFP_DMA ;
goto again ;
}
return NULL ;
}
* dma_addr = addr ;
return page_address ( page ) ;
}
2008-04-08 20:20:56 +04:00
/*
* See < Documentation / x86_64 / boot - options . txt > for the iommu kernel parameter
* documentation .
*/
static __init int iommu_setup ( char * p )
{
iommu_merge = 1 ;
if ( ! p )
return - EINVAL ;
while ( * p ) {
if ( ! strncmp ( p , " off " , 3 ) )
no_iommu = 1 ;
/* gart_parse_options has more force support */
if ( ! strncmp ( p , " force " , 5 ) )
force_iommu = 1 ;
if ( ! strncmp ( p , " noforce " , 7 ) ) {
iommu_merge = 0 ;
force_iommu = 0 ;
}
if ( ! strncmp ( p , " biomerge " , 8 ) ) {
iommu_merge = 1 ;
force_iommu = 1 ;
}
if ( ! strncmp ( p , " panic " , 5 ) )
panic_on_overflow = 1 ;
if ( ! strncmp ( p , " nopanic " , 7 ) )
panic_on_overflow = 0 ;
if ( ! strncmp ( p , " merge " , 5 ) ) {
iommu_merge = 1 ;
force_iommu = 1 ;
}
if ( ! strncmp ( p , " nomerge " , 7 ) )
iommu_merge = 0 ;
if ( ! strncmp ( p , " forcesac " , 8 ) )
iommu_sac_force = 1 ;
if ( ! strncmp ( p , " allowdac " , 8 ) )
forbid_dac = 0 ;
if ( ! strncmp ( p , " nodac " , 5 ) )
2009-10-26 17:41:46 +03:00
forbid_dac = 1 ;
2008-04-08 20:20:56 +04:00
if ( ! strncmp ( p , " usedac " , 6 ) ) {
forbid_dac = - 1 ;
return 1 ;
}
# ifdef CONFIG_SWIOTLB
if ( ! strncmp ( p , " soft " , 4 ) )
swiotlb = 1 ;
2009-07-01 21:56:16 +04:00
# endif
2009-07-31 02:15:18 +04:00
if ( ! strncmp ( p , " pt " , 2 ) )
2009-04-25 04:30:20 +04:00
iommu_pass_through = 1 ;
2008-04-08 20:20:56 +04:00
gart_parse_options ( p ) ;
# ifdef CONFIG_CALGARY_IOMMU
if ( ! strncmp ( p , " calgary " , 7 ) )
use_calgary = 1 ;
# endif /* CONFIG_CALGARY_IOMMU */
p + = strcspn ( p , " , " ) ;
if ( * p = = ' , ' )
+ + p ;
}
return 0 ;
}
early_param ( " iommu " , iommu_setup ) ;
2008-04-08 20:20:55 +04:00
int dma_supported ( struct device * dev , u64 mask )
{
2009-01-05 17:59:02 +03:00
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
2008-07-26 06:44:49 +04:00
2008-04-08 20:20:55 +04:00
# ifdef CONFIG_PCI
if ( mask > 0xffffffff & & forbid_dac > 0 ) {
2008-05-02 08:02:41 +04:00
dev_info ( dev , " PCI: Disallowing DAC for device \n " ) ;
2008-04-08 20:20:55 +04:00
return 0 ;
}
# endif
2008-07-26 06:44:49 +04:00
if ( ops - > dma_supported )
return ops - > dma_supported ( dev , mask ) ;
2008-04-08 20:20:55 +04:00
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent .
The caller just has to use GFP_DMA in this case . */
2009-04-07 06:01:18 +04:00
if ( mask < DMA_BIT_MASK ( 24 ) )
2008-04-08 20:20:55 +04:00
return 0 ;
/* Tell the device to use SAC when IOMMU force is on. This
allows the driver to use cheaper accesses in some cases .
Problem with this is that if we overflow the IOMMU area and
return DAC as fallback address the device may not handle it
correctly .
As a special case some controllers have a 39 bit address
mode that is as efficient as 32 bit ( aic79xx ) . Don ' t force
SAC for these . Assume all masks < = 40 bits are of this
type . Normally this doesn ' t make any difference , but gives
more gentle handling of IOMMU overflow . */
2009-04-07 06:01:14 +04:00
if ( iommu_sac_force & & ( mask > = DMA_BIT_MASK ( 40 ) ) ) {
2008-05-02 08:02:41 +04:00
dev_info ( dev , " Force SAC with mask %Lx \n " , mask ) ;
2008-04-08 20:20:55 +04:00
return 0 ;
}
return 1 ;
}
EXPORT_SYMBOL ( dma_supported ) ;
2008-04-08 20:20:51 +04:00
static int __init pci_iommu_init ( void )
{
2009-01-09 17:13:15 +03:00
dma_debug_init ( PREALLOC_DMA_DEBUG_ENTRIES ) ;
2009-03-16 19:50:28 +03:00
# ifdef CONFIG_PCI
dma_debug_add_bus ( & pci_bus_type ) ;
# endif
2009-11-10 13:46:12 +03:00
x86_init . iommu . iommu_init ( ) ;
2008-04-08 20:20:51 +04:00
no_iommu_init ( ) ;
return 0 ;
}
/* Must execute after PCI subsystem */
2009-10-12 15:59:29 +04:00
rootfs_initcall ( pci_iommu_init ) ;
2008-10-24 03:51:00 +04:00
# ifdef CONFIG_PCI
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
static __devinit void via_no_dac ( struct pci_dev * dev )
{
if ( ( dev - > class > > 8 ) = = PCI_CLASS_BRIDGE_PCI & & forbid_dac = = 0 ) {
2009-02-24 20:38:22 +03:00
dev_info ( & dev - > dev , " disabling DAC on VIA PCI bridge \n " ) ;
2008-10-24 03:51:00 +04:00
forbid_dac = 1 ;
}
}
DECLARE_PCI_FIXUP_FINAL ( PCI_VENDOR_ID_VIA , PCI_ANY_ID , via_no_dac ) ;
# endif