2005-04-17 02:20:36 +04:00
/*
* Dynamic DMA mapping support for AMD Hammer .
2008-01-30 15:30:12 +03:00
*
2005-04-17 02:20:36 +04:00
* Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI .
* This allows to use PCI devices that only support 32 bit addresses on systems
2008-01-30 15:30:12 +03:00
* with more than 4 GB .
2005-04-17 02:20:36 +04:00
*
* See Documentation / DMA - mapping . txt for the interface specification .
2008-01-30 15:30:12 +03:00
*
2005-04-17 02:20:36 +04:00
* Copyright 2002 Andi Kleen , SuSE Labs .
2007-10-17 20:04:37 +04:00
* Subject to the GNU General Public License v2 only .
2005-04-17 02:20:36 +04:00
*/
# include <linux/types.h>
# include <linux/ctype.h>
# include <linux/agp_backend.h>
# include <linux/init.h>
# include <linux/mm.h>
# include <linux/string.h>
# include <linux/spinlock.h>
# include <linux/pci.h>
# include <linux/module.h>
# include <linux/topology.h>
# include <linux/interrupt.h>
# include <linux/bitops.h>
2007-05-08 11:27:03 +04:00
# include <linux/kdebug.h>
2007-10-04 11:35:37 +04:00
# include <linux/scatterlist.h>
2008-02-05 09:28:11 +03:00
# include <linux/iommu-helper.h>
2008-05-29 11:30:21 +04:00
# include <linux/sysdev.h>
2005-04-17 02:20:36 +04:00
# include <asm/atomic.h>
# include <asm/io.h>
# include <asm/mtrr.h>
# include <asm/pgtable.h>
# include <asm/proto.h>
2008-07-11 05:23:42 +04:00
# include <asm/iommu.h>
2007-10-24 14:49:47 +04:00
# include <asm/gart.h>
2005-04-17 02:20:36 +04:00
# include <asm/cacheflush.h>
2006-01-12 00:44:42 +03:00
# include <asm/swiotlb.h>
# include <asm/dma.h>
2006-06-26 15:56:40 +04:00
# include <asm/k8.h>
2005-04-17 02:20:36 +04:00
2007-10-24 14:49:49 +04:00
static unsigned long iommu_bus_base ; /* GART remapping area (physical) */
2008-01-30 15:30:12 +03:00
static unsigned long iommu_size ; /* size of remapping area bytes */
2005-04-17 02:20:36 +04:00
static unsigned long iommu_pages ; /* .. and in pages */
2008-01-30 15:30:12 +03:00
static u32 * iommu_gatt_base ; /* Remapping table */
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
/*
* If this is disabled the IOMMU will use an optimized flushing strategy
* of only flushing when an mapping is reused . With it true the GART is
* flushed for every mapping . Problem is that doing the lazy flush seems
* to trigger bugs with some popular PCI cards , in particular 3 ware ( but
* has been also also seen with Qlogic at least ) .
*/
2005-04-17 02:20:36 +04:00
int iommu_fullflush = 1 ;
2008-01-30 15:30:12 +03:00
/* Allocation bitmap for the remapping area: */
2005-04-17 02:20:36 +04:00
static DEFINE_SPINLOCK ( iommu_bitmap_lock ) ;
2008-01-30 15:30:12 +03:00
/* Guarded by iommu_bitmap_lock: */
static unsigned long * iommu_gart_bitmap ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
static u32 gart_unmapped_entry ;
2005-04-17 02:20:36 +04:00
# define GPTE_VALID 1
# define GPTE_COHERENT 2
# define GPTE_ENCODE(x) \
( ( ( x ) & 0xfffff000 ) | ( ( ( x ) > > 32 ) < < 4 ) | GPTE_VALID | GPTE_COHERENT )
# define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
2008-01-30 15:30:12 +03:00
# define EMERGENCY_PAGES 32 /* = 128KB */
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_AGP
# define AGPEXTERN extern
# else
# define AGPEXTERN
# endif
/* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved ;
AGPEXTERN __u32 * agp_gatt_table ;
static unsigned long next_bit ; /* protected by iommu_bitmap_lock */
2008-01-30 15:30:12 +03:00
static int need_flush ; /* global flush state. set for each gart wrap */
2005-04-17 02:20:36 +04:00
2008-02-05 09:28:11 +03:00
static unsigned long alloc_iommu ( struct device * dev , int size )
2008-01-30 15:30:12 +03:00
{
2005-04-17 02:20:36 +04:00
unsigned long offset , flags ;
2008-02-05 09:28:11 +03:00
unsigned long boundary_size ;
unsigned long base_index ;
base_index = ALIGN ( iommu_bus_base & dma_get_seg_boundary ( dev ) ,
PAGE_SIZE ) > > PAGE_SHIFT ;
boundary_size = ALIGN ( dma_get_seg_boundary ( dev ) + 1 ,
PAGE_SIZE ) > > PAGE_SHIFT ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
spin_lock_irqsave ( & iommu_bitmap_lock , flags ) ;
2008-02-05 09:28:11 +03:00
offset = iommu_area_alloc ( iommu_gart_bitmap , iommu_pages , next_bit ,
size , base_index , boundary_size , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( offset = = - 1 ) {
need_flush = 1 ;
2008-02-05 09:28:11 +03:00
offset = iommu_area_alloc ( iommu_gart_bitmap , iommu_pages , 0 ,
size , base_index , boundary_size , 0 ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-30 15:30:12 +03:00
if ( offset ! = - 1 ) {
next_bit = offset + size ;
if ( next_bit > = iommu_pages ) {
2005-04-17 02:20:36 +04:00
next_bit = 0 ;
need_flush = 1 ;
2008-01-30 15:30:12 +03:00
}
}
2005-04-17 02:20:36 +04:00
if ( iommu_fullflush )
need_flush = 1 ;
2008-01-30 15:30:12 +03:00
spin_unlock_irqrestore ( & iommu_bitmap_lock , flags ) ;
2005-04-17 02:20:36 +04:00
return offset ;
2008-01-30 15:30:12 +03:00
}
2005-04-17 02:20:36 +04:00
static void free_iommu ( unsigned long offset , int size )
2008-01-30 15:30:12 +03:00
{
2005-04-17 02:20:36 +04:00
unsigned long flags ;
2008-01-30 15:30:12 +03:00
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( & iommu_bitmap_lock , flags ) ;
2008-02-05 09:28:11 +03:00
iommu_area_free ( iommu_gart_bitmap , offset , size ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & iommu_bitmap_lock , flags ) ;
2008-01-30 15:30:12 +03:00
}
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
/*
2005-04-17 02:20:36 +04:00
* Use global flush state to avoid races with multiple flushers .
*/
2006-06-26 15:56:40 +04:00
static void flush_gart ( void )
2008-01-30 15:30:12 +03:00
{
2005-04-17 02:20:36 +04:00
unsigned long flags ;
2008-01-30 15:30:12 +03:00
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( & iommu_bitmap_lock , flags ) ;
2006-06-26 15:56:40 +04:00
if ( need_flush ) {
k8_flush_garts ( ) ;
2005-04-17 02:20:36 +04:00
need_flush = 0 ;
2008-01-30 15:30:12 +03:00
}
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & iommu_bitmap_lock , flags ) ;
2008-01-30 15:30:12 +03:00
}
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_IOMMU_LEAK
2008-01-30 15:30:12 +03:00
# define SET_LEAK(x) \
do { \
if ( iommu_leak_tab ) \
iommu_leak_tab [ x ] = __builtin_return_address ( 0 ) ; \
} while ( 0 )
# define CLEAR_LEAK(x) \
do { \
if ( iommu_leak_tab ) \
iommu_leak_tab [ x ] = NULL ; \
} while ( 0 )
2005-04-17 02:20:36 +04:00
/* Debugging aid for drivers that don't free their IOMMU tables */
2008-01-30 15:30:12 +03:00
static void * * iommu_leak_tab ;
2005-04-17 02:20:36 +04:00
static int leak_trace ;
2007-10-24 14:49:49 +04:00
static int iommu_leak_pages = 20 ;
2008-01-30 15:30:12 +03:00
2007-10-24 14:49:49 +04:00
static void dump_leak ( void )
2005-04-17 02:20:36 +04:00
{
int i ;
2008-01-30 15:30:12 +03:00
static int dump ;
if ( dump | | ! iommu_leak_tab )
return ;
2005-04-17 02:20:36 +04:00
dump = 1 ;
2008-01-30 15:30:12 +03:00
show_stack ( NULL , NULL ) ;
/* Very crude. dump some from the end of the table too */
printk ( KERN_DEBUG " Dumping %d pages from end of IOMMU: \n " ,
iommu_leak_pages ) ;
for ( i = 0 ; i < iommu_leak_pages ; i + = 2 ) {
printk ( KERN_DEBUG " %lu: " , iommu_pages - i ) ;
2008-01-30 15:33:07 +03:00
printk_address ( ( unsigned long ) iommu_leak_tab [ iommu_pages - i ] , 0 ) ;
2008-01-30 15:30:12 +03:00
printk ( KERN_CONT " %c " , ( i + 1 ) % 2 = = 0 ? ' \n ' : ' ' ) ;
}
printk ( KERN_DEBUG " \n " ) ;
2005-04-17 02:20:36 +04:00
}
# else
2008-01-30 15:30:12 +03:00
# define SET_LEAK(x)
# define CLEAR_LEAK(x)
2005-04-17 02:20:36 +04:00
# endif
2006-01-12 00:44:42 +03:00
static void iommu_full ( struct device * dev , size_t size , int dir )
2005-04-17 02:20:36 +04:00
{
2008-01-30 15:30:12 +03:00
/*
2005-04-17 02:20:36 +04:00
* Ran out of IOMMU space for this operation . This is very bad .
* Unfortunately the drivers cannot handle this operation properly .
2008-01-30 15:30:12 +03:00
* Return some non mapped prereserved space in the aperture and
2005-04-17 02:20:36 +04:00
* let the Northbridge deal with it . This will result in garbage
* in the IO operation . When the size exceeds the prereserved space
2008-01-30 15:30:12 +03:00
* memory corruption will occur or random memory will be DMAed
2005-04-17 02:20:36 +04:00
* out . Hopefully no network devices use single mappings that big .
2008-01-30 15:30:12 +03:00
*/
2008-05-02 08:02:41 +04:00
dev_err ( dev , " PCI-DMA: Out of IOMMU space for %lu bytes \n " , size ) ;
2005-04-17 02:20:36 +04:00
2006-01-12 00:44:42 +03:00
if ( size > PAGE_SIZE * EMERGENCY_PAGES ) {
2005-04-17 02:20:36 +04:00
if ( dir = = PCI_DMA_FROMDEVICE | | dir = = PCI_DMA_BIDIRECTIONAL )
panic ( " PCI-DMA: Memory would be corrupted \n " ) ;
2008-01-30 15:30:12 +03:00
if ( dir = = PCI_DMA_TODEVICE | | dir = = PCI_DMA_BIDIRECTIONAL )
panic ( KERN_ERR
" PCI-DMA: Random memory would be DMAed \n " ) ;
}
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_IOMMU_LEAK
2008-01-30 15:30:12 +03:00
dump_leak ( ) ;
2005-04-17 02:20:36 +04:00
# endif
2008-01-30 15:30:12 +03:00
}
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
static inline int
need_iommu ( struct device * dev , unsigned long addr , size_t size )
{
2005-04-17 02:20:36 +04:00
u64 mask = * dev - > dma_mask ;
2007-02-13 15:26:24 +03:00
int high = addr + size > mask ;
2005-04-17 02:20:36 +04:00
int mmu = high ;
2008-01-30 15:30:12 +03:00
if ( force_iommu )
mmu = 1 ;
return mmu ;
2005-04-17 02:20:36 +04:00
}
2008-01-30 15:30:12 +03:00
static inline int
nonforced_iommu ( struct device * dev , unsigned long addr , size_t size )
{
2005-04-17 02:20:36 +04:00
u64 mask = * dev - > dma_mask ;
2007-02-13 15:26:24 +03:00
int high = addr + size > mask ;
2005-04-17 02:20:36 +04:00
int mmu = high ;
2008-01-30 15:30:12 +03:00
return mmu ;
2005-04-17 02:20:36 +04:00
}
/* Map a single continuous physical area into the IOMMU.
* Caller needs to check if the iommu is needed and flush .
*/
2006-01-12 00:44:42 +03:00
static dma_addr_t dma_map_area ( struct device * dev , dma_addr_t phys_mem ,
size_t size , int dir )
2008-01-30 15:30:12 +03:00
{
2008-07-25 16:58:00 +04:00
unsigned long npages = iommu_num_pages ( phys_mem , size ) ;
2008-02-05 09:28:11 +03:00
unsigned long iommu_page = alloc_iommu ( dev , npages ) ;
2005-04-17 02:20:36 +04:00
int i ;
2008-01-30 15:30:12 +03:00
2005-04-17 02:20:36 +04:00
if ( iommu_page = = - 1 ) {
if ( ! nonforced_iommu ( dev , phys_mem , size ) )
2008-01-30 15:30:12 +03:00
return phys_mem ;
2005-04-17 02:20:36 +04:00
if ( panic_on_overflow )
panic ( " dma_map_area overflow %lu bytes \n " , size ) ;
2006-01-12 00:44:42 +03:00
iommu_full ( dev , size , dir ) ;
2005-04-17 02:20:36 +04:00
return bad_dma_address ;
}
for ( i = 0 ; i < npages ; i + + ) {
iommu_gatt_base [ iommu_page + i ] = GPTE_ENCODE ( phys_mem ) ;
SET_LEAK ( iommu_page + i ) ;
phys_mem + = PAGE_SIZE ;
}
return iommu_bus_base + iommu_page * PAGE_SIZE + ( phys_mem & ~ PAGE_MASK ) ;
}
2008-01-30 15:30:12 +03:00
static dma_addr_t
2008-04-19 21:19:56 +04:00
gart_map_simple ( struct device * dev , phys_addr_t paddr , size_t size , int dir )
2006-01-12 00:44:42 +03:00
{
2008-04-19 21:19:56 +04:00
dma_addr_t map = dma_map_area ( dev , paddr , size , dir ) ;
2008-01-30 15:30:12 +03:00
2006-06-26 15:56:40 +04:00
flush_gart ( ) ;
2008-01-30 15:30:12 +03:00
2006-01-12 00:44:42 +03:00
return map ;
}
2005-04-17 02:20:36 +04:00
/* Map a single area into the IOMMU */
2008-01-30 15:30:12 +03:00
static dma_addr_t
2008-04-19 21:19:56 +04:00
gart_map_single ( struct device * dev , phys_addr_t paddr , size_t size , int dir )
2005-04-17 02:20:36 +04:00
{
2008-04-19 21:19:56 +04:00
unsigned long bus ;
2005-04-17 02:20:36 +04:00
if ( ! dev )
dev = & fallback_dev ;
2008-04-19 21:19:56 +04:00
if ( ! need_iommu ( dev , paddr , size ) )
return paddr ;
2005-04-17 02:20:36 +04:00
2008-04-19 21:19:56 +04:00
bus = gart_map_simple ( dev , paddr , size , dir ) ;
2008-01-30 15:30:12 +03:00
return bus ;
2006-01-12 00:44:42 +03:00
}
2006-06-26 15:56:37 +04:00
/*
* Free a DMA mapping .
*/
2007-07-21 19:11:23 +04:00
static void gart_unmap_single ( struct device * dev , dma_addr_t dma_addr ,
2008-01-30 15:30:12 +03:00
size_t size , int direction )
2006-06-26 15:56:37 +04:00
{
unsigned long iommu_page ;
int npages ;
int i ;
if ( dma_addr < iommu_bus_base + EMERGENCY_PAGES * PAGE_SIZE | |
dma_addr > = iommu_bus_base + iommu_size )
return ;
2008-01-30 15:30:12 +03:00
2006-06-26 15:56:37 +04:00
iommu_page = ( dma_addr - iommu_bus_base ) > > PAGE_SHIFT ;
2008-07-25 16:58:00 +04:00
npages = iommu_num_pages ( dma_addr , size ) ;
2006-06-26 15:56:37 +04:00
for ( i = 0 ; i < npages ; i + + ) {
iommu_gatt_base [ iommu_page + i ] = gart_unmapped_entry ;
CLEAR_LEAK ( iommu_page + i ) ;
}
free_iommu ( iommu_page , npages ) ;
}
2006-01-12 00:44:42 +03:00
/*
* Wrapper for pci_unmap_single working with scatterlists .
*/
2008-01-30 15:30:12 +03:00
static void
gart_unmap_sg ( struct device * dev , struct scatterlist * sg , int nents , int dir )
2006-01-12 00:44:42 +03:00
{
2007-10-04 11:35:37 +04:00
struct scatterlist * s ;
2006-01-12 00:44:42 +03:00
int i ;
2007-10-04 11:35:37 +04:00
for_each_sg ( sg , s , nents , i ) {
2006-02-26 06:18:22 +03:00
if ( ! s - > dma_length | | ! s - > length )
2006-01-12 00:44:42 +03:00
break ;
2006-06-26 15:56:37 +04:00
gart_unmap_single ( dev , s - > dma_address , s - > dma_length , dir ) ;
2006-01-12 00:44:42 +03:00
}
}
2005-04-17 02:20:36 +04:00
/* Fallback for dma_map_sg in case of overflow */
static int dma_map_sg_nonforce ( struct device * dev , struct scatterlist * sg ,
int nents , int dir )
{
2007-10-04 11:35:37 +04:00
struct scatterlist * s ;
2005-04-17 02:20:36 +04:00
int i ;
# ifdef CONFIG_IOMMU_DEBUG
printk ( KERN_DEBUG " dma_map_sg overflow \n " ) ;
# endif
2007-10-04 11:35:37 +04:00
for_each_sg ( sg , s , nents , i ) {
2007-10-22 22:02:46 +04:00
unsigned long addr = sg_phys ( s ) ;
2008-01-30 15:30:12 +03:00
if ( nonforced_iommu ( dev , addr , s - > length ) ) {
2006-01-12 00:44:42 +03:00
addr = dma_map_area ( dev , addr , s - > length , dir ) ;
2008-01-30 15:30:12 +03:00
if ( addr = = bad_dma_address ) {
if ( i > 0 )
2006-01-12 00:44:42 +03:00
gart_unmap_sg ( dev , sg , i , dir ) ;
2008-01-30 15:30:12 +03:00
nents = 0 ;
2005-04-17 02:20:36 +04:00
sg [ 0 ] . dma_length = 0 ;
break ;
}
}
s - > dma_address = addr ;
s - > dma_length = s - > length ;
}
2006-06-26 15:56:40 +04:00
flush_gart ( ) ;
2008-01-30 15:30:12 +03:00
2005-04-17 02:20:36 +04:00
return nents ;
}
/* Map multiple scatterlist entries continuous into the first. */
2008-02-05 09:28:11 +03:00
static int __dma_map_cont ( struct device * dev , struct scatterlist * start ,
int nelems , struct scatterlist * sout ,
unsigned long pages )
2005-04-17 02:20:36 +04:00
{
2008-02-05 09:28:11 +03:00
unsigned long iommu_start = alloc_iommu ( dev , pages ) ;
2008-01-30 15:30:12 +03:00
unsigned long iommu_page = iommu_start ;
2007-10-04 11:35:37 +04:00
struct scatterlist * s ;
2005-04-17 02:20:36 +04:00
int i ;
if ( iommu_start = = - 1 )
return - 1 ;
2007-10-04 11:35:37 +04:00
for_each_sg ( start , s , nelems , i ) {
2005-04-17 02:20:36 +04:00
unsigned long pages , addr ;
unsigned long phys_addr = s - > dma_address ;
2008-01-30 15:30:12 +03:00
2007-10-04 11:35:37 +04:00
BUG_ON ( s ! = start & & s - > offset ) ;
if ( s = = start ) {
2005-04-17 02:20:36 +04:00
sout - > dma_address = iommu_bus_base ;
sout - > dma_address + = iommu_page * PAGE_SIZE + s - > offset ;
sout - > dma_length = s - > length ;
2008-01-30 15:30:12 +03:00
} else {
sout - > dma_length + = s - > length ;
2005-04-17 02:20:36 +04:00
}
addr = phys_addr ;
2008-07-25 16:58:00 +04:00
pages = iommu_num_pages ( s - > offset , s - > length ) ;
2008-01-30 15:30:12 +03:00
while ( pages - - ) {
iommu_gatt_base [ iommu_page ] = GPTE_ENCODE ( addr ) ;
2005-04-17 02:20:36 +04:00
SET_LEAK ( iommu_page ) ;
addr + = PAGE_SIZE ;
iommu_page + + ;
2006-02-13 01:34:59 +03:00
}
2008-01-30 15:30:12 +03:00
}
BUG_ON ( iommu_page - iommu_start ! = pages ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2008-01-30 15:30:12 +03:00
static inline int
2008-02-05 09:28:11 +03:00
dma_map_cont ( struct device * dev , struct scatterlist * start , int nelems ,
struct scatterlist * sout , unsigned long pages , int need )
2005-04-17 02:20:36 +04:00
{
2007-10-04 11:35:37 +04:00
if ( ! need ) {
BUG_ON ( nelems ! = 1 ) ;
2007-10-25 11:13:32 +04:00
sout - > dma_address = start - > dma_address ;
2007-10-04 11:35:37 +04:00
sout - > dma_length = start - > length ;
2005-04-17 02:20:36 +04:00
return 0 ;
2007-10-04 11:35:37 +04:00
}
2008-02-05 09:28:11 +03:00
return __dma_map_cont ( dev , start , nelems , sout , pages ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-30 15:30:12 +03:00
2005-04-17 02:20:36 +04:00
/*
* DMA map all entries in a scatterlist .
2008-01-30 15:30:12 +03:00
* Merge chunks that have page aligned sizes into a continuous mapping .
2005-04-17 02:20:36 +04:00
*/
2008-01-30 15:30:12 +03:00
static int
gart_map_sg ( struct device * dev , struct scatterlist * sg , int nents , int dir )
2005-04-17 02:20:36 +04:00
{
2007-10-04 11:35:37 +04:00
struct scatterlist * s , * ps , * start_sg , * sgmap ;
2008-01-30 15:30:12 +03:00
int need = 0 , nextneed , i , out , start ;
unsigned long pages = 0 ;
2008-02-05 09:27:56 +03:00
unsigned int seg_size ;
unsigned int max_seg_size ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
if ( nents = = 0 )
2005-04-17 02:20:36 +04:00
return 0 ;
if ( ! dev )
dev = & fallback_dev ;
out = 0 ;
start = 0 ;
2007-10-04 11:35:37 +04:00
start_sg = sgmap = sg ;
2008-02-05 09:27:56 +03:00
seg_size = 0 ;
max_seg_size = dma_get_max_seg_size ( dev ) ;
2007-10-04 11:35:37 +04:00
ps = NULL ; /* shut up gcc */
for_each_sg ( sg , s , nents , i ) {
2007-10-22 22:02:46 +04:00
dma_addr_t addr = sg_phys ( s ) ;
2008-01-30 15:30:12 +03:00
2005-04-17 02:20:36 +04:00
s - > dma_address = addr ;
2008-01-30 15:30:12 +03:00
BUG_ON ( s - > length = = 0 ) ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
nextneed = need_iommu ( dev , addr , s - > length ) ;
2005-04-17 02:20:36 +04:00
/* Handle the previous not yet processed entries */
if ( i > start ) {
2008-01-30 15:30:12 +03:00
/*
* Can only merge when the last chunk ends on a
* page boundary and the new one doesn ' t have an
* offset .
*/
2005-04-17 02:20:36 +04:00
if ( ! iommu_merge | | ! nextneed | | ! need | | s - > offset | |
2008-02-05 09:27:56 +03:00
( s - > length + seg_size > max_seg_size ) | |
2007-10-04 11:35:37 +04:00
( ps - > offset + ps - > length ) % PAGE_SIZE ) {
2008-02-05 09:28:11 +03:00
if ( dma_map_cont ( dev , start_sg , i - start ,
sgmap , pages , need ) < 0 )
2005-04-17 02:20:36 +04:00
goto error ;
out + + ;
2008-02-05 09:27:56 +03:00
seg_size = 0 ;
2007-10-04 11:35:37 +04:00
sgmap = sg_next ( sgmap ) ;
2005-04-17 02:20:36 +04:00
pages = 0 ;
2007-10-04 11:35:37 +04:00
start = i ;
start_sg = s ;
2005-04-17 02:20:36 +04:00
}
}
2008-02-05 09:27:56 +03:00
seg_size + = s - > length ;
2005-04-17 02:20:36 +04:00
need = nextneed ;
2008-07-25 16:58:00 +04:00
pages + = iommu_num_pages ( s - > offset , s - > length ) ;
2007-10-04 11:35:37 +04:00
ps = s ;
2005-04-17 02:20:36 +04:00
}
2008-02-05 09:28:11 +03:00
if ( dma_map_cont ( dev , start_sg , i - start , sgmap , pages , need ) < 0 )
2005-04-17 02:20:36 +04:00
goto error ;
out + + ;
2006-06-26 15:56:40 +04:00
flush_gart ( ) ;
2007-10-04 11:35:37 +04:00
if ( out < nents ) {
sgmap = sg_next ( sgmap ) ;
sgmap - > dma_length = 0 ;
}
2005-04-17 02:20:36 +04:00
return out ;
error :
2006-06-26 15:56:40 +04:00
flush_gart ( ) ;
2007-10-26 15:56:24 +04:00
gart_unmap_sg ( dev , sg , out , dir ) ;
2008-01-30 15:30:12 +03:00
2006-02-03 23:51:32 +03:00
/* When it was forced or merged try again in a dumb way */
if ( force_iommu | | iommu_merge ) {
out = dma_map_sg_nonforce ( dev , sg , nents , dir ) ;
if ( out > 0 )
return out ;
}
2005-04-17 02:20:36 +04:00
if ( panic_on_overflow )
panic ( " dma_map_sg: overflow on %lu pages \n " , pages ) ;
2008-01-30 15:30:12 +03:00
2006-01-12 00:44:42 +03:00
iommu_full ( dev , pages < < PAGE_SHIFT , dir ) ;
2007-10-04 11:35:37 +04:00
for_each_sg ( sg , s , nents , i )
s - > dma_address = bad_dma_address ;
2005-04-17 02:20:36 +04:00
return 0 ;
2008-01-30 15:30:12 +03:00
}
2005-04-17 02:20:36 +04:00
2006-01-12 00:44:42 +03:00
static int no_agp ;
2005-04-17 02:20:36 +04:00
static __init unsigned long check_iommu_size ( unsigned long aper , u64 aper_size )
2008-01-30 15:30:12 +03:00
{
unsigned long a ;
if ( ! iommu_size ) {
iommu_size = aper_size ;
if ( ! no_agp )
iommu_size / = 2 ;
}
a = aper + iommu_size ;
2008-02-04 18:48:08 +03:00
iommu_size - = round_up ( a , PMD_PAGE_SIZE ) - a ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
if ( iommu_size < 64 * 1024 * 1024 ) {
2005-04-17 02:20:36 +04:00
printk ( KERN_WARNING
2008-01-30 15:30:12 +03:00
" PCI-DMA: Warning: Small IOMMU %luMB. "
" Consider increasing the AGP aperture in BIOS \n " ,
iommu_size > > 20 ) ;
}
2005-04-17 02:20:36 +04:00
return iommu_size ;
2008-01-30 15:30:12 +03:00
}
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
static __init unsigned read_aperture ( struct pci_dev * dev , u32 * size )
{
unsigned aper_size = 0 , aper_base_32 , aper_order ;
2005-04-17 02:20:36 +04:00
u64 aper_base ;
2008-04-15 14:43:57 +04:00
pci_read_config_dword ( dev , AMD64_GARTAPERTUREBASE , & aper_base_32 ) ;
pci_read_config_dword ( dev , AMD64_GARTAPERTURECTL , & aper_order ) ;
2008-01-30 15:30:12 +03:00
aper_order = ( aper_order > > 1 ) & 7 ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
aper_base = aper_base_32 & 0x7fff ;
2005-04-17 02:20:36 +04:00
aper_base < < = 25 ;
2008-01-30 15:30:12 +03:00
aper_size = ( 32 * 1024 * 1024 ) < < aper_order ;
if ( aper_base + aper_size > 0x100000000UL | | ! aper_size )
2005-04-17 02:20:36 +04:00
aper_base = 0 ;
* size = aper_size ;
return aper_base ;
2008-01-30 15:30:12 +03:00
}
2005-04-17 02:20:36 +04:00
2008-06-10 02:10:48 +04:00
static void enable_gart_translations ( void )
{
int i ;
for ( i = 0 ; i < num_k8_northbridges ; i + + ) {
struct pci_dev * dev = k8_northbridges [ i ] ;
enable_gart_translation ( dev , __pa ( agp_gatt_table ) ) ;
}
}
/*
* If fix_up_north_bridges is set , the north bridges have to be fixed up on
* resume in the same way as they are handled in gart_iommu_hole_init ( ) .
*/
static bool fix_up_north_bridges ;
static u32 aperture_order ;
static u32 aperture_alloc ;
void set_up_gart_resume ( u32 aper_order , u32 aper_alloc )
{
fix_up_north_bridges = true ;
aperture_order = aper_order ;
aperture_alloc = aper_alloc ;
}
2008-05-29 11:30:21 +04:00
static int gart_resume ( struct sys_device * dev )
{
2008-06-10 02:10:48 +04:00
printk ( KERN_INFO " PCI-DMA: Resuming GART IOMMU \n " ) ;
if ( fix_up_north_bridges ) {
int i ;
printk ( KERN_INFO " PCI-DMA: Restoring GART aperture settings \n " ) ;
for ( i = 0 ; i < num_k8_northbridges ; i + + ) {
struct pci_dev * dev = k8_northbridges [ i ] ;
/*
* Don ' t enable translations just yet . That is the next
* step . Restore the pre - suspend aperture settings .
*/
pci_write_config_dword ( dev , AMD64_GARTAPERTURECTL ,
aperture_order < < 1 ) ;
pci_write_config_dword ( dev , AMD64_GARTAPERTUREBASE ,
aperture_alloc > > 25 ) ;
}
}
enable_gart_translations ( ) ;
2008-05-29 11:30:21 +04:00
return 0 ;
}
static int gart_suspend ( struct sys_device * dev , pm_message_t state )
{
2008-06-10 02:10:48 +04:00
return 0 ;
2008-05-29 11:30:21 +04:00
}
static struct sysdev_class gart_sysdev_class = {
. name = " gart " ,
. suspend = gart_suspend ,
. resume = gart_resume ,
} ;
static struct sys_device device_gart = {
. id = 0 ,
. cls = & gart_sysdev_class ,
} ;
2008-01-30 15:30:12 +03:00
/*
2005-04-17 02:20:36 +04:00
* Private Northbridge GATT initialization in case we cannot use the
2008-01-30 15:30:12 +03:00
* AGP driver for some reason .
2005-04-17 02:20:36 +04:00
*/
static __init int init_k8_gatt ( struct agp_kern_info * info )
2008-01-30 15:30:12 +03:00
{
unsigned aper_size , gatt_size , new_aper_size ;
unsigned aper_base , new_aper_base ;
2005-04-17 02:20:36 +04:00
struct pci_dev * dev ;
void * gatt ;
2008-05-29 11:30:21 +04:00
int i , error ;
2008-07-13 01:30:35 +04:00
unsigned long start_pfn , end_pfn ;
2006-06-26 15:56:40 +04:00
2005-04-17 02:20:36 +04:00
printk ( KERN_INFO " PCI-DMA: Disabling AGP. \n " ) ;
aper_size = aper_base = info - > aper_size = 0 ;
2006-06-26 15:56:40 +04:00
dev = NULL ;
for ( i = 0 ; i < num_k8_northbridges ; i + + ) {
dev = k8_northbridges [ i ] ;
2008-01-30 15:30:12 +03:00
new_aper_base = read_aperture ( dev , & new_aper_size ) ;
if ( ! new_aper_base )
goto nommu ;
if ( ! aper_base ) {
2005-04-17 02:20:36 +04:00
aper_size = new_aper_size ;
aper_base = new_aper_base ;
2008-01-30 15:30:12 +03:00
}
if ( aper_size ! = new_aper_size | | aper_base ! = new_aper_base )
2005-04-17 02:20:36 +04:00
goto nommu ;
}
if ( ! aper_base )
2008-01-30 15:30:12 +03:00
goto nommu ;
2005-04-17 02:20:36 +04:00
info - > aper_base = aper_base ;
2008-01-30 15:30:12 +03:00
info - > aper_size = aper_size > > 20 ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
gatt_size = ( aper_size > > PAGE_SHIFT ) * sizeof ( u32 ) ;
gatt = ( void * ) __get_free_pages ( GFP_KERNEL , get_order ( gatt_size ) ) ;
if ( ! gatt )
2007-04-24 15:05:36 +04:00
panic ( " Cannot allocate GATT table " ) ;
2008-01-30 15:34:06 +03:00
if ( set_memory_uc ( ( unsigned long ) gatt , gatt_size > > PAGE_SHIFT ) )
2007-04-24 15:05:36 +04:00
panic ( " Could not set GART PTEs to uncacheable pages " ) ;
2008-01-30 15:30:12 +03:00
memset ( gatt , 0 , gatt_size ) ;
2005-04-17 02:20:36 +04:00
agp_gatt_table = gatt ;
2006-06-26 15:56:40 +04:00
2008-06-10 02:10:48 +04:00
enable_gart_translations ( ) ;
2008-05-29 11:30:21 +04:00
error = sysdev_class_register ( & gart_sysdev_class ) ;
if ( ! error )
error = sysdev_register ( & device_gart ) ;
if ( error )
panic ( " Could not register gart_sysdev -- would corrupt data on next suspend " ) ;
2008-06-10 02:10:48 +04:00
2006-06-26 15:56:40 +04:00
flush_gart ( ) ;
2008-01-30 15:30:12 +03:00
printk ( KERN_INFO " PCI-DMA: aperture base @ %x size %u KB \n " ,
aper_base , aper_size > > 10 ) ;
2008-07-13 01:30:35 +04:00
/* need to map that range */
end_pfn = ( aper_base > > PAGE_SHIFT ) + ( aper_size > > PAGE_SHIFT ) ;
if ( end_pfn > max_low_pfn_mapped ) {
2008-07-14 01:29:41 +04:00
start_pfn = ( aper_base > > PAGE_SHIFT ) ;
init_memory_mapping ( start_pfn < < PAGE_SHIFT , end_pfn < < PAGE_SHIFT ) ;
2008-07-13 01:30:35 +04:00
}
2005-04-17 02:20:36 +04:00
return 0 ;
nommu :
2008-01-30 15:30:12 +03:00
/* Should not happen anymore */
2008-04-01 16:24:03 +04:00
printk ( KERN_WARNING " PCI-DMA: More than 4GB of RAM and no IOMMU \n "
KERN_WARNING " falling back to iommu=soft. \n " ) ;
2008-01-30 15:30:12 +03:00
return - 1 ;
}
2005-04-17 02:20:36 +04:00
extern int agp_amd64_init ( void ) ;
2008-07-26 06:44:49 +04:00
static struct dma_mapping_ops gart_dma_ops = {
2008-01-30 15:30:12 +03:00
. map_single = gart_map_single ,
. map_simple = gart_map_simple ,
. unmap_single = gart_unmap_single ,
. sync_single_for_cpu = NULL ,
. sync_single_for_device = NULL ,
. sync_single_range_for_cpu = NULL ,
. sync_single_range_for_device = NULL ,
. sync_sg_for_cpu = NULL ,
. sync_sg_for_device = NULL ,
. map_sg = gart_map_sg ,
. unmap_sg = gart_unmap_sg ,
2006-01-12 00:44:42 +03:00
} ;
2007-07-21 19:11:28 +04:00
void gart_iommu_shutdown ( void )
{
struct pci_dev * dev ;
int i ;
if ( no_agp & & ( dma_ops ! = & gart_dma_ops ) )
return ;
2008-01-30 15:30:12 +03:00
for ( i = 0 ; i < num_k8_northbridges ; i + + ) {
u32 ctl ;
2007-07-21 19:11:28 +04:00
2008-01-30 15:30:12 +03:00
dev = k8_northbridges [ i ] ;
2008-04-15 14:43:57 +04:00
pci_read_config_dword ( dev , AMD64_GARTAPERTURECTL , & ctl ) ;
2007-07-21 19:11:28 +04:00
2008-04-15 14:43:57 +04:00
ctl & = ~ GARTEN ;
2007-07-21 19:11:28 +04:00
2008-04-15 14:43:57 +04:00
pci_write_config_dword ( dev , AMD64_GARTAPERTURECTL , ctl ) ;
2008-01-30 15:30:12 +03:00
}
2007-07-21 19:11:28 +04:00
}
2006-06-26 15:58:11 +04:00
void __init gart_iommu_init ( void )
2008-01-30 15:30:12 +03:00
{
2005-04-17 02:20:36 +04:00
struct agp_kern_info info ;
unsigned long iommu_start ;
2008-01-30 15:30:12 +03:00
unsigned long aper_size ;
2005-04-17 02:20:36 +04:00
unsigned long scratch ;
long i ;
2006-06-26 15:56:40 +04:00
if ( cache_k8_northbridges ( ) < 0 | | num_k8_northbridges = = 0 ) {
printk ( KERN_INFO " PCI-GART: No AMD northbridge found. \n " ) ;
2006-06-26 15:58:11 +04:00
return ;
2006-06-26 15:56:40 +04:00
}
2005-04-17 02:20:36 +04:00
# ifndef CONFIG_AGP_AMD64
2008-01-30 15:30:12 +03:00
no_agp = 1 ;
2005-04-17 02:20:36 +04:00
# else
/* Makefile puts PCI initialization via subsys_initcall first. */
/* Add other K8 AGP bridge drivers here */
2008-01-30 15:30:12 +03:00
no_agp = no_agp | |
( agp_amd64_init ( ) < 0 ) | |
2005-04-17 02:20:36 +04:00
( agp_copy_info ( agp_bridge , & info ) < 0 ) ;
2008-01-30 15:30:12 +03:00
# endif
2005-04-17 02:20:36 +04:00
2006-02-26 06:18:22 +03:00
if ( swiotlb )
2006-06-26 15:58:11 +04:00
return ;
2006-02-26 06:18:22 +03:00
2006-06-26 15:58:05 +04:00
/* Did we detect a different HW IOMMU? */
2007-10-24 14:49:50 +04:00
if ( iommu_detected & & ! gart_iommu_aperture )
2006-06-26 15:58:11 +04:00
return ;
2006-06-26 15:58:05 +04:00
2005-04-17 02:20:36 +04:00
if ( no_iommu | |
2008-06-25 09:14:09 +04:00
( ! force_iommu & & max_pfn < = MAX_DMA32_PFN ) | |
2007-10-24 14:49:50 +04:00
! gart_iommu_aperture | |
2005-04-17 02:20:36 +04:00
( no_agp & & init_k8_gatt ( & info ) < 0 ) ) {
2008-06-25 09:14:09 +04:00
if ( max_pfn > MAX_DMA32_PFN ) {
2008-04-01 16:24:03 +04:00
printk ( KERN_WARNING " More than 4GB of memory "
" but GART IOMMU not available. \n "
KERN_WARNING " falling back to iommu=soft. \n " ) ;
2006-02-03 23:51:59 +03:00
}
2006-06-26 15:58:11 +04:00
return ;
2005-04-17 02:20:36 +04:00
}
2006-02-03 23:51:59 +03:00
printk ( KERN_INFO " PCI-DMA: using GART IOMMU. \n " ) ;
2008-01-30 15:30:12 +03:00
aper_size = info . aper_size * 1024 * 1024 ;
iommu_size = check_iommu_size ( info . aper_base , aper_size ) ;
iommu_pages = iommu_size > > PAGE_SHIFT ;
iommu_gart_bitmap = ( void * ) __get_free_pages ( GFP_KERNEL ,
get_order ( iommu_pages / 8 ) ) ;
if ( ! iommu_gart_bitmap )
panic ( " Cannot allocate iommu bitmap \n " ) ;
2005-04-17 02:20:36 +04:00
memset ( iommu_gart_bitmap , 0 , iommu_pages / 8 ) ;
# ifdef CONFIG_IOMMU_LEAK
2008-01-30 15:30:12 +03:00
if ( leak_trace ) {
iommu_leak_tab = ( void * ) __get_free_pages ( GFP_KERNEL ,
2005-04-17 02:20:36 +04:00
get_order ( iommu_pages * sizeof ( void * ) ) ) ;
2008-01-30 15:30:12 +03:00
if ( iommu_leak_tab )
memset ( iommu_leak_tab , 0 , iommu_pages * 8 ) ;
2005-04-17 02:20:36 +04:00
else
2008-01-30 15:30:12 +03:00
printk ( KERN_DEBUG
" PCI-DMA: Cannot allocate leak trace area \n " ) ;
}
2005-04-17 02:20:36 +04:00
# endif
2008-01-30 15:30:12 +03:00
/*
2005-04-17 02:20:36 +04:00
* Out of IOMMU space handling .
2008-01-30 15:30:12 +03:00
* Reserve some invalid pages at the beginning of the GART .
*/
set_bit_string ( iommu_gart_bitmap , 0 , EMERGENCY_PAGES ) ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
agp_memory_reserved = iommu_size ;
2005-04-17 02:20:36 +04:00
printk ( KERN_INFO
" PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture \n " ,
2008-01-30 15:30:12 +03:00
iommu_size > > 20 ) ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
iommu_start = aper_size - iommu_size ;
iommu_bus_base = info . aper_base + iommu_start ;
2005-04-17 02:20:36 +04:00
bad_dma_address = iommu_bus_base ;
iommu_gatt_base = agp_gatt_table + ( iommu_start > > PAGE_SHIFT ) ;
2008-01-30 15:30:12 +03:00
/*
2005-04-17 02:20:36 +04:00
* Unmap the IOMMU part of the GART . The alias of the page is
* always mapped with cache enabled and there is no full cache
* coherency across the GART remapping . The unmapping avoids
* automatic prefetches from the CPU allocating cache lines in
* there . All CPU accesses are done via the direct mapping to
* the backing memory . The GART address is only used by PCI
2008-01-30 15:30:12 +03:00
* devices .
2005-04-17 02:20:36 +04:00
*/
2008-02-04 18:48:08 +03:00
set_memory_np ( ( unsigned long ) __va ( iommu_bus_base ) ,
iommu_size > > PAGE_SHIFT ) ;
2008-02-15 01:30:20 +03:00
/*
* Tricky . The GART table remaps the physical memory range ,
* so the CPU wont notice potential aliases and if the memory
* is remapped to UC later on , we might surprise the PCI devices
* with a stray writeout of a cacheline . So play it sure and
* do an explicit , full - scale wbinvd ( ) _after_ having marked all
* the pages as Not - Present :
*/
wbinvd ( ) ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:30:12 +03:00
/*
2008-06-26 02:25:43 +04:00
* Try to workaround a bug ( thanks to BenH ) :
2008-01-30 15:30:12 +03:00
* Set unmapped entries to a scratch page instead of 0.
2005-04-17 02:20:36 +04:00
* Any prefetches that hit unmapped entries won ' t get an bus abort
2008-06-26 02:25:43 +04:00
* then . ( P2P bridge may be prefetching on DMA reads ) .
2005-04-17 02:20:36 +04:00
*/
2008-01-30 15:30:12 +03:00
scratch = get_zeroed_page ( GFP_KERNEL ) ;
if ( ! scratch )
2005-04-17 02:20:36 +04:00
panic ( " Cannot allocate iommu scratch page " ) ;
gart_unmapped_entry = GPTE_ENCODE ( __pa ( scratch ) ) ;
2008-01-30 15:30:12 +03:00
for ( i = EMERGENCY_PAGES ; i < iommu_pages ; i + + )
2005-04-17 02:20:36 +04:00
iommu_gatt_base [ i ] = gart_unmapped_entry ;
2006-06-26 15:56:40 +04:00
flush_gart ( ) ;
2006-01-12 00:44:42 +03:00
dma_ops = & gart_dma_ops ;
2008-01-30 15:30:12 +03:00
}
2005-04-17 02:20:36 +04:00
2007-03-16 23:07:36 +03:00
void __init gart_parse_options ( char * p )
2006-01-12 00:44:42 +03:00
{
int arg ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_IOMMU_LEAK
2008-01-30 15:30:12 +03:00
if ( ! strncmp ( p , " leak " , 4 ) ) {
2006-01-12 00:44:42 +03:00
leak_trace = 1 ;
p + = 4 ;
if ( * p = = ' = ' ) + + p ;
if ( isdigit ( * p ) & & get_option ( & p , & arg ) )
iommu_leak_pages = arg ;
}
2005-04-17 02:20:36 +04:00
# endif
2006-01-12 00:44:42 +03:00
if ( isdigit ( * p ) & & get_option ( & p , & arg ) )
iommu_size = arg ;
2008-01-30 15:30:12 +03:00
if ( ! strncmp ( p , " fullflush " , 8 ) )
2006-01-12 00:44:42 +03:00
iommu_fullflush = 1 ;
2008-01-30 15:30:12 +03:00
if ( ! strncmp ( p , " nofullflush " , 11 ) )
2006-01-12 00:44:42 +03:00
iommu_fullflush = 0 ;
2008-01-30 15:30:12 +03:00
if ( ! strncmp ( p , " noagp " , 5 ) )
2006-01-12 00:44:42 +03:00
no_agp = 1 ;
2008-01-30 15:30:12 +03:00
if ( ! strncmp ( p , " noaperture " , 10 ) )
2006-01-12 00:44:42 +03:00
fix_aperture = 0 ;
/* duplicated from pci-dma.c */
2008-01-30 15:30:12 +03:00
if ( ! strncmp ( p , " force " , 5 ) )
2007-10-24 14:49:50 +04:00
gart_iommu_aperture_allowed = 1 ;
2008-01-30 15:30:12 +03:00
if ( ! strncmp ( p , " allowed " , 7 ) )
2007-10-24 14:49:50 +04:00
gart_iommu_aperture_allowed = 1 ;
2006-01-12 00:44:42 +03:00
if ( ! strncmp ( p , " memaper " , 7 ) ) {
fallback_aper_force = 1 ;
p + = 7 ;
if ( * p = = ' = ' ) {
+ + p ;
if ( get_option ( & p , & arg ) )
fallback_aper_order = arg ;
}
}
}