2005-04-17 02:20:36 +04:00
/*
* Copyright ( C ) 2001 Mike Corrigan & Dave Engebretsen , IBM Corporation
*
2006-04-29 07:51:59 +04:00
* Rewrite , cleanup :
2005-04-17 02:20:36 +04:00
*
2005-11-21 11:12:32 +03:00
* Copyright ( C ) 2004 Olof Johansson < olof @ lixom . net > , IBM Corporation
2006-04-29 07:51:59 +04:00
* Copyright ( C ) 2006 Olof Johansson < olof @ lixom . net >
2005-04-17 02:20:36 +04:00
*
* Dynamic DMA mapping support , pSeries - specific parts , both SMP and LPAR .
*
2006-04-29 07:51:59 +04:00
*
2005-04-17 02:20:36 +04:00
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
2006-04-29 07:51:59 +04:00
*
2005-04-17 02:20:36 +04:00
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
2006-04-29 07:51:59 +04:00
*
2005-04-17 02:20:36 +04:00
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# include <linux/init.h>
# include <linux/types.h>
# include <linux/slab.h>
# include <linux/mm.h>
2012-07-26 01:20:03 +04:00
# include <linux/memblock.h>
2005-04-17 02:20:36 +04:00
# include <linux/spinlock.h>
2011-05-27 22:25:11 +04:00
# include <linux/sched.h> /* for show_stack */
2005-04-17 02:20:36 +04:00
# include <linux/string.h>
# include <linux/pci.h>
# include <linux/dma-mapping.h>
2008-10-23 00:39:04 +04:00
# include <linux/crash_dump.h>
2011-02-10 12:10:47 +03:00
# include <linux/memory.h>
2012-10-02 20:57:57 +04:00
# include <linux/of.h>
2005-04-17 02:20:36 +04:00
# include <asm/io.h>
# include <asm/prom.h>
# include <asm/rtas.h>
# include <asm/iommu.h>
# include <asm/pci-bridge.h>
# include <asm/machdep.h>
2005-08-03 08:35:25 +04:00
# include <asm/firmware.h>
2005-09-20 07:45:41 +04:00
# include <asm/tce.h>
2005-09-27 20:50:25 +04:00
# include <asm/ppc-pci.h>
2005-11-07 05:18:13 +03:00
# include <asm/udbg.h>
2011-02-10 12:10:47 +03:00
# include <asm/mmzone.h>
2005-04-17 02:20:36 +04:00
2005-11-03 07:33:31 +03:00
# include "plpar_wrappers.h"
2005-04-17 02:20:36 +04:00
2011-06-30 00:58:33 +04:00
static void tce_invalidate_pSeries_sw ( struct iommu_table * tbl ,
u64 * startp , u64 * endp )
{
u64 __iomem * invalidate = ( u64 __iomem * ) tbl - > it_index ;
unsigned long start , end , inc ;
start = __pa ( startp ) ;
end = __pa ( endp ) ;
inc = L1_CACHE_BYTES ; /* invalidate a cacheline of TCEs at a time */
/* If this is non-zero, change the format. We shift the
* address and or in the magic from the device tree . */
if ( tbl - > it_busno ) {
start < < = 12 ;
end < < = 12 ;
inc < < = 12 ;
start | = tbl - > it_busno ;
end | = tbl - > it_busno ;
}
end | = inc - 1 ; /* round up end to be different than start */
mb ( ) ; /* Make sure TCEs in memory are written */
while ( start < = end ) {
out_be64 ( invalidate , start ) ;
start + = inc ;
}
}
2008-07-23 22:31:16 +04:00
static int tce_build_pSeries ( struct iommu_table * tbl , long index ,
2006-04-29 07:51:59 +04:00
long npages , unsigned long uaddr ,
2008-07-15 23:51:47 +04:00
enum dma_data_direction direction ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2006-04-29 07:51:59 +04:00
u64 proto_tce ;
2011-06-30 00:58:33 +04:00
u64 * tcep , * tces ;
2006-04-29 07:51:59 +04:00
u64 rpn ;
2005-04-17 02:20:36 +04:00
2006-04-29 07:51:59 +04:00
proto_tce = TCE_PCI_READ ; // Read allowed
2005-04-17 02:20:36 +04:00
if ( direction ! = DMA_TO_DEVICE )
2006-04-29 07:51:59 +04:00
proto_tce | = TCE_PCI_WRITE ;
2005-04-17 02:20:36 +04:00
2011-06-30 00:58:33 +04:00
tces = tcep = ( ( u64 * ) tbl - > it_base ) + index ;
2005-04-17 02:20:36 +04:00
while ( npages - - ) {
2010-07-12 08:36:09 +04:00
/* can't move this out since we might cross MEMBLOCK boundary */
2012-07-26 01:19:57 +04:00
rpn = __pa ( uaddr ) > > TCE_SHIFT ;
2006-04-29 07:51:59 +04:00
* tcep = proto_tce | ( rpn & TCE_RPN_MASK ) < < TCE_RPN_SHIFT ;
2005-04-17 02:20:36 +04:00
2005-09-20 07:46:44 +04:00
uaddr + = TCE_PAGE_SIZE ;
2006-04-29 07:51:59 +04:00
tcep + + ;
2005-04-17 02:20:36 +04:00
}
2011-06-30 00:58:33 +04:00
2012-06-27 01:26:37 +04:00
if ( tbl - > it_type & TCE_PCI_SWINV_CREATE )
2011-06-30 00:58:33 +04:00
tce_invalidate_pSeries_sw ( tbl , tces , tcep - 1 ) ;
2008-07-23 22:31:16 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
static void tce_free_pSeries ( struct iommu_table * tbl , long index , long npages )
{
2011-06-30 00:58:33 +04:00
u64 * tcep , * tces ;
2005-04-17 02:20:36 +04:00
2011-06-30 00:58:33 +04:00
tces = tcep = ( ( u64 * ) tbl - > it_base ) + index ;
2006-04-29 07:51:59 +04:00
while ( npages - - )
* ( tcep + + ) = 0 ;
2011-06-30 00:58:33 +04:00
2012-06-27 01:26:37 +04:00
if ( tbl - > it_type & TCE_PCI_SWINV_FREE )
2011-06-30 00:58:33 +04:00
tce_invalidate_pSeries_sw ( tbl , tces , tcep - 1 ) ;
2005-04-17 02:20:36 +04:00
}
2006-06-23 10:35:10 +04:00
static unsigned long tce_get_pseries ( struct iommu_table * tbl , long index )
{
u64 * tcep ;
tcep = ( ( u64 * ) tbl - > it_base ) + index ;
return * tcep ;
}
2005-04-17 02:20:36 +04:00
2008-07-23 22:31:16 +04:00
static void tce_free_pSeriesLP ( struct iommu_table * , long , long ) ;
static void tce_freemulti_pSeriesLP ( struct iommu_table * , long , long ) ;
static int tce_build_pSeriesLP ( struct iommu_table * tbl , long tcenum ,
2005-04-17 02:20:36 +04:00
long npages , unsigned long uaddr ,
2008-07-15 23:51:47 +04:00
enum dma_data_direction direction ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2008-07-23 22:31:16 +04:00
u64 rc = 0 ;
2006-04-29 07:51:59 +04:00
u64 proto_tce , tce ;
u64 rpn ;
2008-07-23 22:31:16 +04:00
int ret = 0 ;
long tcenum_start = tcenum , npages_start = npages ;
2005-04-17 02:20:36 +04:00
2012-07-26 01:19:57 +04:00
rpn = __pa ( uaddr ) > > TCE_SHIFT ;
2006-04-29 07:51:59 +04:00
proto_tce = TCE_PCI_READ ;
2005-04-17 02:20:36 +04:00
if ( direction ! = DMA_TO_DEVICE )
2006-04-29 07:51:59 +04:00
proto_tce | = TCE_PCI_WRITE ;
2005-04-17 02:20:36 +04:00
while ( npages - - ) {
2006-04-29 07:51:59 +04:00
tce = proto_tce | ( rpn & TCE_RPN_MASK ) < < TCE_RPN_SHIFT ;
rc = plpar_tce_put ( ( u64 ) tbl - > it_index , ( u64 ) tcenum < < 12 , tce ) ;
2008-07-23 22:31:16 +04:00
if ( unlikely ( rc = = H_NOT_ENOUGH_RESOURCES ) ) {
ret = ( int ) rc ;
tce_free_pSeriesLP ( tbl , tcenum_start ,
( npages_start - ( npages + 1 ) ) ) ;
break ;
}
2005-04-17 02:20:36 +04:00
if ( rc & & printk_ratelimit ( ) ) {
2009-01-06 17:26:03 +03:00
printk ( " tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld \n " , rc ) ;
printk ( " \t index = 0x%llx \n " , ( u64 ) tbl - > it_index ) ;
printk ( " \t tcenum = 0x%llx \n " , ( u64 ) tcenum ) ;
printk ( " \t tce val = 0x%llx \n " , tce ) ;
2005-04-17 02:20:36 +04:00
show_stack ( current , ( unsigned long * ) __get_SP ( ) ) ;
}
2006-04-29 07:51:59 +04:00
2005-04-17 02:20:36 +04:00
tcenum + + ;
2006-04-29 07:51:59 +04:00
rpn + + ;
2005-04-17 02:20:36 +04:00
}
2008-07-23 22:31:16 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2010-10-18 11:27:03 +04:00
static DEFINE_PER_CPU ( u64 * , tce_page ) ;
2005-04-17 02:20:36 +04:00
2008-07-23 22:31:16 +04:00
static int tce_buildmulti_pSeriesLP ( struct iommu_table * tbl , long tcenum ,
2005-04-17 02:20:36 +04:00
long npages , unsigned long uaddr ,
2008-07-15 23:51:47 +04:00
enum dma_data_direction direction ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2008-07-23 22:31:16 +04:00
u64 rc = 0 ;
2006-04-29 07:51:59 +04:00
u64 proto_tce ;
u64 * tcep ;
u64 rpn ;
2005-04-17 02:20:36 +04:00
long l , limit ;
2008-07-23 22:31:16 +04:00
long tcenum_start = tcenum , npages_start = npages ;
int ret = 0 ;
2012-06-03 23:42:13 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
2008-05-08 08:27:23 +04:00
if ( npages = = 1 ) {
2008-07-23 22:31:16 +04:00
return tce_build_pSeriesLP ( tbl , tcenum , npages , uaddr ,
direction , attrs ) ;
2008-05-08 08:27:23 +04:00
}
2005-04-17 02:20:36 +04:00
2012-06-03 23:42:13 +04:00
local_irq_save ( flags ) ; /* to protect tcep and the page behind it */
2005-04-17 02:20:36 +04:00
tcep = __get_cpu_var ( tce_page ) ;
/* This is safe to do since interrupts are off when we're called
* from iommu_alloc { , _sg } ( )
*/
if ( ! tcep ) {
2006-04-29 07:51:59 +04:00
tcep = ( u64 * ) __get_free_page ( GFP_ATOMIC ) ;
2005-04-17 02:20:36 +04:00
/* If allocation fails, fall back to the loop implementation */
2008-05-08 08:27:23 +04:00
if ( ! tcep ) {
2012-06-03 23:42:13 +04:00
local_irq_restore ( flags ) ;
2008-07-23 22:31:16 +04:00
return tce_build_pSeriesLP ( tbl , tcenum , npages , uaddr ,
2008-07-15 23:51:47 +04:00
direction , attrs ) ;
2008-05-08 08:27:23 +04:00
}
2005-04-17 02:20:36 +04:00
__get_cpu_var ( tce_page ) = tcep ;
}
2012-07-26 01:19:57 +04:00
rpn = __pa ( uaddr ) > > TCE_SHIFT ;
2006-04-29 07:51:59 +04:00
proto_tce = TCE_PCI_READ ;
2005-04-17 02:20:36 +04:00
if ( direction ! = DMA_TO_DEVICE )
2006-04-29 07:51:59 +04:00
proto_tce | = TCE_PCI_WRITE ;
2005-04-17 02:20:36 +04:00
/* We can map max one pageful of TCEs at a time */
do {
/*
* Set up the page with TCE data , looping through and setting
* the values .
*/
2006-04-29 07:51:59 +04:00
limit = min_t ( long , npages , 4096 / TCE_ENTRY_SIZE ) ;
2005-04-17 02:20:36 +04:00
for ( l = 0 ; l < limit ; l + + ) {
2006-04-29 07:51:59 +04:00
tcep [ l ] = proto_tce | ( rpn & TCE_RPN_MASK ) < < TCE_RPN_SHIFT ;
rpn + + ;
2005-04-17 02:20:36 +04:00
}
rc = plpar_tce_put_indirect ( ( u64 ) tbl - > it_index ,
( u64 ) tcenum < < 12 ,
2012-07-26 01:19:57 +04:00
( u64 ) __pa ( tcep ) ,
2005-04-17 02:20:36 +04:00
limit ) ;
npages - = limit ;
tcenum + = limit ;
} while ( npages > 0 & & ! rc ) ;
2012-06-03 23:42:13 +04:00
local_irq_restore ( flags ) ;
2008-07-23 22:31:16 +04:00
if ( unlikely ( rc = = H_NOT_ENOUGH_RESOURCES ) ) {
ret = ( int ) rc ;
tce_freemulti_pSeriesLP ( tbl , tcenum_start ,
( npages_start - ( npages + limit ) ) ) ;
return ret ;
}
2005-04-17 02:20:36 +04:00
if ( rc & & printk_ratelimit ( ) ) {
2009-01-06 17:26:03 +03:00
printk ( " tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld \n " , rc ) ;
printk ( " \t index = 0x%llx \n " , ( u64 ) tbl - > it_index ) ;
printk ( " \t npages = 0x%llx \n " , ( u64 ) npages ) ;
printk ( " \t tce[0] val = 0x%llx \n " , tcep [ 0 ] ) ;
2005-04-17 02:20:36 +04:00
show_stack ( current , ( unsigned long * ) __get_SP ( ) ) ;
}
2008-07-23 22:31:16 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
static void tce_free_pSeriesLP ( struct iommu_table * tbl , long tcenum , long npages )
{
u64 rc ;
while ( npages - - ) {
2006-04-29 07:51:59 +04:00
rc = plpar_tce_put ( ( u64 ) tbl - > it_index , ( u64 ) tcenum < < 12 , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( rc & & printk_ratelimit ( ) ) {
2009-01-06 17:26:03 +03:00
printk ( " tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld \n " , rc ) ;
printk ( " \t index = 0x%llx \n " , ( u64 ) tbl - > it_index ) ;
printk ( " \t tcenum = 0x%llx \n " , ( u64 ) tcenum ) ;
2005-04-17 02:20:36 +04:00
show_stack ( current , ( unsigned long * ) __get_SP ( ) ) ;
}
tcenum + + ;
}
}
static void tce_freemulti_pSeriesLP ( struct iommu_table * tbl , long tcenum , long npages )
{
u64 rc ;
2006-04-29 07:51:59 +04:00
rc = plpar_tce_stuff ( ( u64 ) tbl - > it_index , ( u64 ) tcenum < < 12 , 0 , npages ) ;
2005-04-17 02:20:36 +04:00
if ( rc & & printk_ratelimit ( ) ) {
printk ( " tce_freemulti_pSeriesLP: plpar_tce_stuff failed \n " ) ;
2009-01-06 17:26:03 +03:00
printk ( " \t rc = %lld \n " , rc ) ;
printk ( " \t index = 0x%llx \n " , ( u64 ) tbl - > it_index ) ;
printk ( " \t npages = 0x%llx \n " , ( u64 ) npages ) ;
2005-04-17 02:20:36 +04:00
show_stack ( current , ( unsigned long * ) __get_SP ( ) ) ;
}
}
2006-06-23 10:35:10 +04:00
static unsigned long tce_get_pSeriesLP ( struct iommu_table * tbl , long tcenum )
{
u64 rc ;
unsigned long tce_ret ;
rc = plpar_tce_get ( ( u64 ) tbl - > it_index , ( u64 ) tcenum < < 12 , & tce_ret ) ;
if ( rc & & printk_ratelimit ( ) ) {
2009-01-06 17:26:03 +03:00
printk ( " tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld \n " , rc ) ;
printk ( " \t index = 0x%llx \n " , ( u64 ) tbl - > it_index ) ;
printk ( " \t tcenum = 0x%llx \n " , ( u64 ) tcenum ) ;
2006-06-23 10:35:10 +04:00
show_stack ( current , ( unsigned long * ) __get_SP ( ) ) ;
}
return tce_ret ;
}
2011-03-31 05:57:33 +04:00
/* this is compatible with cells for the device tree property */
2011-02-10 12:10:47 +03:00
struct dynamic_dma_window_prop {
__be32 liobn ; /* tce table number */
__be64 dma_base ; /* address hi,lo */
__be32 tce_shift ; /* ilog2(tce_page_size) */
__be32 window_shift ; /* ilog2(tce_window_size) */
} ;
struct direct_window {
struct device_node * device ;
const struct dynamic_dma_window_prop * prop ;
struct list_head list ;
} ;
/* Dynamic DMA Window support */
struct ddw_query_response {
u32 windows_available ;
u32 largest_available_block ;
u32 page_size ;
u32 migration_capable ;
} ;
struct ddw_create_response {
u32 liobn ;
u32 addr_hi ;
u32 addr_lo ;
} ;
static LIST_HEAD ( direct_window_list ) ;
/* prevents races between memory on/offline and window creation */
static DEFINE_SPINLOCK ( direct_window_list_lock ) ;
/* protects initializing window twice for same device */
static DEFINE_MUTEX ( direct_window_init_mutex ) ;
# define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
static int tce_clearrange_multi_pSeriesLP ( unsigned long start_pfn ,
unsigned long num_pfn , const void * arg )
{
const struct dynamic_dma_window_prop * maprange = arg ;
int rc ;
u64 tce_size , num_tce , dma_offset , next ;
u32 tce_shift ;
long limit ;
tce_shift = be32_to_cpu ( maprange - > tce_shift ) ;
tce_size = 1ULL < < tce_shift ;
next = start_pfn < < PAGE_SHIFT ;
num_tce = num_pfn < < PAGE_SHIFT ;
/* round back to the beginning of the tce page size */
num_tce + = next & ( tce_size - 1 ) ;
next & = ~ ( tce_size - 1 ) ;
/* covert to number of tces */
num_tce | = tce_size - 1 ;
num_tce > > = tce_shift ;
do {
/*
* Set up the page with TCE data , looping through and setting
* the values .
*/
limit = min_t ( long , num_tce , 512 ) ;
dma_offset = next + be64_to_cpu ( maprange - > dma_base ) ;
rc = plpar_tce_stuff ( ( u64 ) be32_to_cpu ( maprange - > liobn ) ,
dma_offset ,
0 , limit ) ;
2013-01-18 13:16:24 +04:00
next + = limit * tce_size ;
2011-02-10 12:10:47 +03:00
num_tce - = limit ;
} while ( num_tce > 0 & & ! rc ) ;
return rc ;
}
static int tce_setrange_multi_pSeriesLP ( unsigned long start_pfn ,
unsigned long num_pfn , const void * arg )
{
const struct dynamic_dma_window_prop * maprange = arg ;
u64 * tcep , tce_size , num_tce , dma_offset , next , proto_tce , liobn ;
u32 tce_shift ;
u64 rc = 0 ;
long l , limit ;
local_irq_disable ( ) ; /* to protect tcep and the page behind it */
tcep = __get_cpu_var ( tce_page ) ;
if ( ! tcep ) {
tcep = ( u64 * ) __get_free_page ( GFP_ATOMIC ) ;
if ( ! tcep ) {
local_irq_enable ( ) ;
return - ENOMEM ;
}
__get_cpu_var ( tce_page ) = tcep ;
}
proto_tce = TCE_PCI_READ | TCE_PCI_WRITE ;
liobn = ( u64 ) be32_to_cpu ( maprange - > liobn ) ;
tce_shift = be32_to_cpu ( maprange - > tce_shift ) ;
tce_size = 1ULL < < tce_shift ;
next = start_pfn < < PAGE_SHIFT ;
num_tce = num_pfn < < PAGE_SHIFT ;
/* round back to the beginning of the tce page size */
num_tce + = next & ( tce_size - 1 ) ;
next & = ~ ( tce_size - 1 ) ;
/* covert to number of tces */
num_tce | = tce_size - 1 ;
num_tce > > = tce_shift ;
/* We can map max one pageful of TCEs at a time */
do {
/*
* Set up the page with TCE data , looping through and setting
* the values .
*/
limit = min_t ( long , num_tce , 4096 / TCE_ENTRY_SIZE ) ;
dma_offset = next + be64_to_cpu ( maprange - > dma_base ) ;
for ( l = 0 ; l < limit ; l + + ) {
tcep [ l ] = proto_tce | next ;
next + = tce_size ;
}
rc = plpar_tce_put_indirect ( liobn ,
dma_offset ,
2012-07-26 01:19:57 +04:00
( u64 ) __pa ( tcep ) ,
2011-02-10 12:10:47 +03:00
limit ) ;
num_tce - = limit ;
} while ( num_tce > 0 & & ! rc ) ;
/* error cleanup: caller will clear whole range */
local_irq_enable ( ) ;
return rc ;
}
static int tce_setrange_multi_pSeriesLP_walk ( unsigned long start_pfn ,
unsigned long num_pfn , void * arg )
{
return tce_setrange_multi_pSeriesLP ( start_pfn , num_pfn , arg ) ;
}
2007-03-04 09:04:44 +03:00
# ifdef CONFIG_PCI
2005-04-17 02:20:36 +04:00
static void iommu_table_setparms ( struct pci_controller * phb ,
struct device_node * dn ,
2006-04-29 07:51:59 +04:00
struct iommu_table * tbl )
2005-04-17 02:20:36 +04:00
{
struct device_node * node ;
2011-06-30 00:58:33 +04:00
const unsigned long * basep , * sw_inval ;
2006-10-05 07:28:00 +04:00
const u32 * sizep ;
2005-04-17 02:20:36 +04:00
2007-12-10 06:33:21 +03:00
node = phb - > dn ;
2005-04-17 02:20:36 +04:00
2007-04-03 16:26:41 +04:00
basep = of_get_property ( node , " linux,tce-base " , NULL ) ;
sizep = of_get_property ( node , " linux,tce-size " , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( basep = = NULL | | sizep = = NULL ) {
printk ( KERN_ERR " PCI_DMA: iommu_table_setparms: %s has "
" missing tce entries ! \n " , dn - > full_name ) ;
return ;
}
tbl - > it_base = ( unsigned long ) __va ( * basep ) ;
2006-06-23 10:35:10 +04:00
2008-10-23 00:39:04 +04:00
if ( ! is_kdump_kernel ( ) )
2008-10-21 21:38:10 +04:00
memset ( ( void * ) tbl - > it_base , 0 , * sizep ) ;
2005-04-17 02:20:36 +04:00
tbl - > it_busno = phb - > bus - > number ;
2006-04-29 07:51:59 +04:00
2005-04-17 02:20:36 +04:00
/* Units of tce entries */
2006-10-30 08:15:59 +03:00
tbl - > it_offset = phb - > dma_window_base_cur > > IOMMU_PAGE_SHIFT ;
2006-04-29 07:51:59 +04:00
2005-04-17 02:20:36 +04:00
/* Test if we are going over 2GB of DMA space */
2005-09-21 20:55:31 +04:00
if ( phb - > dma_window_base_cur + phb - > dma_window_size > 0x80000000ul ) {
udbg_printf ( " PCI_DMA: Unexpected number of IOAs under this PHB. \n " ) ;
2006-04-29 07:51:59 +04:00
panic ( " PCI_DMA: Unexpected number of IOAs under this PHB. \n " ) ;
2005-09-21 20:55:31 +04:00
}
2006-04-29 07:51:59 +04:00
2005-04-17 02:20:36 +04:00
phb - > dma_window_base_cur + = phb - > dma_window_size ;
/* Set the tce table size - measured in entries */
2006-10-30 08:15:59 +03:00
tbl - > it_size = phb - > dma_window_size > > IOMMU_PAGE_SHIFT ;
2005-04-17 02:20:36 +04:00
tbl - > it_index = 0 ;
tbl - > it_blocksize = 16 ;
tbl - > it_type = TCE_PCI ;
2011-06-30 00:58:33 +04:00
sw_inval = of_get_property ( node , " linux,tce-sw-invalidate-info " , NULL ) ;
if ( sw_inval ) {
/*
* This property contains information on how to
* invalidate the TCE entry . The first property is
* the base MMIO address used to invalidate entries .
* The second property tells us the format of the TCE
* invalidate ( whether it needs to be shifted ) and
* some magic routing info to add to our invalidate
* command .
*/
tbl - > it_index = ( unsigned long ) ioremap ( sw_inval [ 0 ] , 8 ) ;
tbl - > it_busno = sw_inval [ 1 ] ; /* overload this with magic */
2011-11-06 22:55:59 +04:00
tbl - > it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE ;
2011-06-30 00:58:33 +04:00
}
2005-04-17 02:20:36 +04:00
}
/*
* iommu_table_setparms_lpar
*
* Function : On pSeries LPAR systems , return TCE table info , given a pci bus .
*/
static void iommu_table_setparms_lpar ( struct pci_controller * phb ,
struct device_node * dn ,
struct iommu_table * tbl ,
2010-12-09 07:24:01 +03:00
const void * dma_window )
2005-04-17 02:20:36 +04:00
{
2006-05-18 12:06:37 +04:00
unsigned long offset , size ;
of_parse_dma_window ( dn , dma_window , & tbl - > it_index , & offset , & size ) ;
2005-04-17 02:20:36 +04:00
2010-12-09 07:24:01 +03:00
tbl - > it_busno = phb - > bus - > number ;
2005-04-17 02:20:36 +04:00
tbl - > it_base = 0 ;
tbl - > it_blocksize = 16 ;
tbl - > it_type = TCE_PCI ;
2006-10-30 08:15:59 +03:00
tbl - > it_offset = offset > > IOMMU_PAGE_SHIFT ;
tbl - > it_size = size > > IOMMU_PAGE_SHIFT ;
2005-04-17 02:20:36 +04:00
}
2006-11-11 09:25:02 +03:00
static void pci_dma_bus_setup_pSeries ( struct pci_bus * bus )
2005-04-17 02:20:36 +04:00
{
2005-09-21 20:55:31 +04:00
struct device_node * dn ;
2005-04-17 02:20:36 +04:00
struct iommu_table * tbl ;
2005-09-21 20:55:31 +04:00
struct device_node * isa_dn , * isa_dn_orig ;
struct device_node * tmp ;
struct pci_dn * pci ;
int children ;
2005-04-17 02:20:36 +04:00
2005-09-21 20:55:31 +04:00
dn = pci_bus_to_OF_node ( bus ) ;
2006-11-11 09:25:02 +03:00
2008-04-24 09:13:19 +04:00
pr_debug ( " pci_dma_bus_setup_pSeries: setting up bus %s \n " , dn - > full_name ) ;
2005-09-21 20:55:31 +04:00
if ( bus - > self ) {
/* This is not a root bus, any setup will be done for the
* device - side of the bridge in iommu_dev_setup_pSeries ( ) .
*/
return ;
}
2006-11-11 09:25:02 +03:00
pci = PCI_DN ( dn ) ;
2005-09-21 20:55:31 +04:00
/* Check if the ISA bus on the system is under
* this PHB .
2005-04-17 02:20:36 +04:00
*/
2005-09-21 20:55:31 +04:00
isa_dn = isa_dn_orig = of_find_node_by_type ( NULL , " isa " ) ;
2005-04-17 02:20:36 +04:00
2005-09-21 20:55:31 +04:00
while ( isa_dn & & isa_dn ! = dn )
isa_dn = isa_dn - > parent ;
if ( isa_dn_orig )
of_node_put ( isa_dn_orig ) ;
2005-04-17 02:20:36 +04:00
2006-06-20 12:00:30 +04:00
/* Count number of direct PCI children of the PHB. */
2005-09-21 20:55:31 +04:00
for ( children = 0 , tmp = dn - > child ; tmp ; tmp = tmp - > sibling )
2006-06-20 12:00:30 +04:00
children + + ;
2005-04-17 02:20:36 +04:00
2008-04-24 09:13:19 +04:00
pr_debug ( " Children: %d \n " , children ) ;
2005-04-17 02:20:36 +04:00
2005-09-21 20:55:31 +04:00
/* Calculate amount of DMA window per slot. Each window must be
* a power of two ( due to pci_alloc_consistent requirements ) .
*
* Keep 256 MB aside for PHBs with ISA .
*/
2005-04-17 02:20:36 +04:00
2005-09-21 20:55:31 +04:00
if ( ! isa_dn ) {
/* No ISA/IDE - just set window size and return */
pci - > phb - > dma_window_size = 0x80000000ul ; /* To be divided */
while ( pci - > phb - > dma_window_size * children > 0x80000000ul )
pci - > phb - > dma_window_size > > = 1 ;
2009-06-02 22:21:30 +04:00
pr_debug ( " No ISA/IDE, window size is 0x%llx \n " ,
2008-04-24 09:13:19 +04:00
pci - > phb - > dma_window_size ) ;
2005-09-21 20:55:31 +04:00
pci - > phb - > dma_window_base_cur = 0 ;
return ;
2005-04-17 02:20:36 +04:00
}
2005-09-21 20:55:31 +04:00
/* If we have ISA, then we probably have an IDE
* controller too . Allocate a 128 MB table but
* skip the first 128 MB to avoid stepping on ISA
* space .
*/
pci - > phb - > dma_window_size = 0x8000000ul ;
pci - > phb - > dma_window_base_cur = 0x8000000ul ;
2010-08-11 20:42:48 +04:00
tbl = kzalloc_node ( sizeof ( struct iommu_table ) , GFP_KERNEL ,
2006-06-10 14:58:08 +04:00
pci - > phb - > node ) ;
2005-09-21 20:55:31 +04:00
iommu_table_setparms ( pci - > phb , dn , tbl ) ;
2006-06-10 14:58:08 +04:00
pci - > iommu_table = iommu_init_table ( tbl , pci - > phb - > node ) ;
2005-09-21 20:55:31 +04:00
/* Divide the rest (1.75GB) among the children */
pci - > phb - > dma_window_size = 0x80000000ul ;
while ( pci - > phb - > dma_window_size * children > 0x70000000ul )
pci - > phb - > dma_window_size > > = 1 ;
2009-06-02 22:21:30 +04:00
pr_debug ( " ISA/IDE, window size is 0x%llx \n " , pci - > phb - > dma_window_size ) ;
2005-04-17 02:20:36 +04:00
}
2006-11-11 09:25:02 +03:00
static void pci_dma_bus_setup_pSeriesLP ( struct pci_bus * bus )
2005-04-17 02:20:36 +04:00
{
struct iommu_table * tbl ;
struct device_node * dn , * pdn ;
2005-09-06 07:17:54 +04:00
struct pci_dn * ppci ;
2006-07-12 09:39:43 +04:00
const void * dma_window = NULL ;
2005-04-17 02:20:36 +04:00
dn = pci_bus_to_OF_node ( bus ) ;
2008-04-24 09:13:19 +04:00
pr_debug ( " pci_dma_bus_setup_pSeriesLP: setting up bus %s \n " ,
dn - > full_name ) ;
2006-11-11 09:25:02 +03:00
2005-04-17 02:20:36 +04:00
/* Find nearest ibm,dma-window, walking up the device tree */
for ( pdn = dn ; pdn ! = NULL ; pdn = pdn - > parent ) {
2007-04-03 16:26:41 +04:00
dma_window = of_get_property ( pdn , " ibm,dma-window " , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( dma_window ! = NULL )
break ;
}
if ( dma_window = = NULL ) {
2008-04-24 09:13:19 +04:00
pr_debug ( " no ibm,dma-window property ! \n " ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2005-12-06 04:37:35 +03:00
ppci = PCI_DN ( pdn ) ;
2006-11-11 09:25:02 +03:00
2008-04-24 09:13:19 +04:00
pr_debug ( " parent is %s, iommu_table: 0x%p \n " ,
pdn - > full_name , ppci - > iommu_table ) ;
2006-11-11 09:25:02 +03:00
2005-09-06 07:17:54 +04:00
if ( ! ppci - > iommu_table ) {
2010-08-11 20:42:48 +04:00
tbl = kzalloc_node ( sizeof ( struct iommu_table ) , GFP_KERNEL ,
2006-06-10 14:58:08 +04:00
ppci - > phb - > node ) ;
2010-12-09 07:24:01 +03:00
iommu_table_setparms_lpar ( ppci - > phb , pdn , tbl , dma_window ) ;
2006-06-10 14:58:08 +04:00
ppci - > iommu_table = iommu_init_table ( tbl , ppci - > phb - > node ) ;
2008-04-24 09:13:19 +04:00
pr_debug ( " created table: %p \n " , ppci - > iommu_table ) ;
2005-04-17 02:20:36 +04:00
}
}
2006-11-11 09:25:02 +03:00
static void pci_dma_dev_setup_pSeries ( struct pci_dev * dev )
2005-04-17 02:20:36 +04:00
{
2006-11-11 09:25:02 +03:00
struct device_node * dn ;
2005-09-21 20:55:31 +04:00
struct iommu_table * tbl ;
2005-04-17 02:20:36 +04:00
2008-04-24 09:13:19 +04:00
pr_debug ( " pci_dma_dev_setup_pSeries: %s \n " , pci_name ( dev ) ) ;
2005-04-17 02:20:36 +04:00
2010-04-14 03:12:56 +04:00
dn = dev - > dev . of_node ;
2005-04-17 02:20:36 +04:00
2005-09-21 20:55:31 +04:00
/* If we're the direct child of a root bus, then we need to allocate
* an iommu table ourselves . The bus setup code should have setup
* the window sizes already .
*/
if ( ! dev - > bus - > self ) {
2006-11-11 09:25:02 +03:00
struct pci_controller * phb = PCI_DN ( dn ) - > phb ;
2008-04-24 09:13:19 +04:00
pr_debug ( " --> first child, no bridge. Allocating iommu table. \n " ) ;
2010-08-11 20:42:48 +04:00
tbl = kzalloc_node ( sizeof ( struct iommu_table ) , GFP_KERNEL ,
2006-11-11 09:25:02 +03:00
phb - > node ) ;
iommu_table_setparms ( phb , dn , tbl ) ;
2007-01-11 04:16:29 +03:00
PCI_DN ( dn ) - > iommu_table = iommu_init_table ( tbl , phb - > node ) ;
2009-09-21 12:26:35 +04:00
set_iommu_table_base ( & dev - > dev , PCI_DN ( dn ) - > iommu_table ) ;
2005-09-21 20:55:31 +04:00
return ;
}
/* If this device is further down the bus tree, search upwards until
* an already allocated iommu table is found and use that .
*/
2005-12-06 04:37:35 +03:00
while ( dn & & PCI_DN ( dn ) & & PCI_DN ( dn ) - > iommu_table = = NULL )
2005-04-17 02:20:36 +04:00
dn = dn - > parent ;
2006-11-11 09:25:02 +03:00
if ( dn & & PCI_DN ( dn ) )
2009-09-21 12:26:35 +04:00
set_iommu_table_base ( & dev - > dev , PCI_DN ( dn ) - > iommu_table ) ;
2006-11-11 09:25:02 +03:00
else
printk ( KERN_WARNING " iommu: Device %s has no iommu table \n " ,
pci_name ( dev ) ) ;
2005-04-17 02:20:36 +04:00
}
2011-02-10 12:10:47 +03:00
static int __read_mostly disable_ddw ;
static int __init disable_ddw_setup ( char * str )
{
disable_ddw = 1 ;
printk ( KERN_INFO " ppc iommu: disabling ddw. \n " ) ;
return 0 ;
}
early_param ( " disable_ddw " , disable_ddw_setup ) ;
2012-05-15 11:04:32 +04:00
static inline void __remove_ddw ( struct device_node * np , const u32 * ddw_avail , u64 liobn )
{
int ret ;
ret = rtas_call ( ddw_avail [ 2 ] , 1 , 1 , NULL , liobn ) ;
if ( ret )
pr_warning ( " %s: failed to remove DMA window: rtas returned "
" %d to ibm,remove-pe-dma-window(%x) %llx \n " ,
np - > full_name , ret , ddw_avail [ 2 ] , liobn ) ;
else
pr_debug ( " %s: successfully removed DMA window: rtas returned "
" %d to ibm,remove-pe-dma-window(%x) %llx \n " ,
np - > full_name , ret , ddw_avail [ 2 ] , liobn ) ;
}
2011-02-10 12:10:47 +03:00
static void remove_ddw ( struct device_node * np )
{
struct dynamic_dma_window_prop * dwp ;
struct property * win64 ;
2011-05-11 16:25:00 +04:00
const u32 * ddw_avail ;
2011-02-10 12:10:47 +03:00
u64 liobn ;
int len , ret ;
2011-05-11 16:25:00 +04:00
ddw_avail = of_get_property ( np , " ibm,ddw-applicable " , & len ) ;
2011-02-10 12:10:47 +03:00
win64 = of_find_property ( np , DIRECT64_PROPNAME , NULL ) ;
2011-05-11 16:24:58 +04:00
if ( ! win64 )
2011-02-10 12:10:47 +03:00
return ;
2011-05-11 16:25:00 +04:00
if ( ! ddw_avail | | len < 3 * sizeof ( u32 ) | | win64 - > length < sizeof ( * dwp ) )
2011-05-11 16:24:58 +04:00
goto delprop ;
2011-02-10 12:10:47 +03:00
dwp = win64 - > value ;
liobn = ( u64 ) be32_to_cpu ( dwp - > liobn ) ;
/* clear the whole window, note the arg is in kernel pages */
ret = tce_clearrange_multi_pSeriesLP ( 0 ,
1ULL < < ( be32_to_cpu ( dwp - > window_shift ) - PAGE_SHIFT ) , dwp ) ;
if ( ret )
pr_warning ( " %s failed to clear tces in window. \n " ,
np - > full_name ) ;
else
pr_debug ( " %s successfully cleared tces in window. \n " ,
np - > full_name ) ;
2012-05-15 11:04:32 +04:00
__remove_ddw ( np , ddw_avail , liobn ) ;
2011-02-10 12:10:47 +03:00
2011-05-11 16:24:58 +04:00
delprop :
2012-10-02 20:58:46 +04:00
ret = of_remove_property ( np , win64 ) ;
2011-05-11 16:24:58 +04:00
if ( ret )
2011-05-11 16:24:59 +04:00
pr_warning ( " %s: failed to remove direct window property: %d \n " ,
2011-05-11 16:24:58 +04:00
np - > full_name , ret ) ;
}
2011-02-10 12:10:47 +03:00
2011-05-11 16:25:00 +04:00
static u64 find_existing_ddw ( struct device_node * pdn )
2011-02-10 12:10:47 +03:00
{
struct direct_window * window ;
const struct dynamic_dma_window_prop * direct64 ;
u64 dma_addr = 0 ;
spin_lock ( & direct_window_list_lock ) ;
/* check if we already created a window and dupe that config if so */
list_for_each_entry ( window , & direct_window_list , list ) {
if ( window - > device = = pdn ) {
direct64 = window - > prop ;
dma_addr = direct64 - > dma_base ;
break ;
}
}
spin_unlock ( & direct_window_list_lock ) ;
return dma_addr ;
}
2011-05-11 16:24:59 +04:00
static int find_existing_ddw_windows ( void )
2011-02-10 12:10:47 +03:00
{
int len ;
2011-05-11 16:24:59 +04:00
struct device_node * pdn ;
2011-02-10 12:10:47 +03:00
struct direct_window * window ;
const struct dynamic_dma_window_prop * direct64 ;
2011-05-11 16:24:59 +04:00
if ( ! firmware_has_feature ( FW_FEATURE_LPAR ) )
return 0 ;
for_each_node_with_property ( pdn , DIRECT64_PROPNAME ) {
direct64 = of_get_property ( pdn , DIRECT64_PROPNAME , & len ) ;
if ( ! direct64 )
continue ;
window = kzalloc ( sizeof ( * window ) , GFP_KERNEL ) ;
if ( ! window | | len < sizeof ( struct dynamic_dma_window_prop ) ) {
kfree ( window ) ;
2011-02-10 12:10:47 +03:00
remove_ddw ( pdn ) ;
2011-05-11 16:24:59 +04:00
continue ;
2011-02-10 12:10:47 +03:00
}
2011-05-11 16:24:59 +04:00
window - > device = pdn ;
window - > prop = direct64 ;
spin_lock ( & direct_window_list_lock ) ;
list_add ( & window - > list , & direct_window_list ) ;
spin_unlock ( & direct_window_list_lock ) ;
2011-02-10 12:10:47 +03:00
}
2011-05-11 16:24:59 +04:00
return 0 ;
2011-02-10 12:10:47 +03:00
}
2011-05-11 16:24:59 +04:00
machine_arch_initcall ( pseries , find_existing_ddw_windows ) ;
2011-02-10 12:10:47 +03:00
2011-05-11 16:25:00 +04:00
static int query_ddw ( struct pci_dev * dev , const u32 * ddw_avail ,
2011-02-10 12:10:47 +03:00
struct ddw_query_response * query )
{
2012-03-21 01:30:28 +04:00
struct eeh_dev * edev ;
2011-02-10 12:10:47 +03:00
u32 cfg_addr ;
u64 buid ;
int ret ;
/*
* Get the config address and phb buid of the PE window .
* Rely on eeh to retrieve this for us .
* Retrieve them from the pci device , not the node with the
* dma - window property
*/
2012-03-21 01:30:28 +04:00
edev = pci_dev_to_eeh_dev ( dev ) ;
cfg_addr = edev - > config_addr ;
if ( edev - > pe_config_addr )
cfg_addr = edev - > pe_config_addr ;
buid = edev - > phb - > buid ;
2011-05-11 16:25:00 +04:00
ret = rtas_call ( ddw_avail [ 0 ] , 3 , 5 , ( u32 * ) query ,
2011-02-10 12:10:47 +03:00
cfg_addr , BUID_HI ( buid ) , BUID_LO ( buid ) ) ;
dev_info ( & dev - > dev , " ibm,query-pe-dma-windows(%x) %x %x %x "
2011-05-11 16:25:00 +04:00
" returned %d \n " , ddw_avail [ 0 ] , cfg_addr , BUID_HI ( buid ) ,
2011-02-10 12:10:47 +03:00
BUID_LO ( buid ) , ret ) ;
return ret ;
}
2011-05-11 16:25:00 +04:00
static int create_ddw ( struct pci_dev * dev , const u32 * ddw_avail ,
2011-02-10 12:10:47 +03:00
struct ddw_create_response * create , int page_shift ,
int window_shift )
{
2012-03-21 01:30:28 +04:00
struct eeh_dev * edev ;
2011-02-10 12:10:47 +03:00
u32 cfg_addr ;
u64 buid ;
int ret ;
/*
* Get the config address and phb buid of the PE window .
* Rely on eeh to retrieve this for us .
* Retrieve them from the pci device , not the node with the
* dma - window property
*/
2012-03-21 01:30:28 +04:00
edev = pci_dev_to_eeh_dev ( dev ) ;
cfg_addr = edev - > config_addr ;
if ( edev - > pe_config_addr )
cfg_addr = edev - > pe_config_addr ;
buid = edev - > phb - > buid ;
2011-02-10 12:10:47 +03:00
do {
/* extra outputs are LIOBN and dma-addr (hi, lo) */
2011-05-11 16:25:00 +04:00
ret = rtas_call ( ddw_avail [ 1 ] , 5 , 4 , ( u32 * ) create , cfg_addr ,
2011-02-10 12:10:47 +03:00
BUID_HI ( buid ) , BUID_LO ( buid ) , page_shift , window_shift ) ;
} while ( rtas_busy_delay ( ret ) ) ;
dev_info ( & dev - > dev ,
" ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
2011-05-11 16:25:00 +04:00
" (liobn = 0x%x starting addr = %x %x) \n " , ddw_avail [ 1 ] ,
2011-02-10 12:10:47 +03:00
cfg_addr , BUID_HI ( buid ) , BUID_LO ( buid ) , page_shift ,
window_shift , ret , create - > liobn , create - > addr_hi , create - > addr_lo ) ;
return ret ;
}
2012-05-15 11:04:32 +04:00
static void restore_default_window ( struct pci_dev * dev ,
u32 ddw_restore_token , unsigned long liobn )
{
struct eeh_dev * edev ;
u32 cfg_addr ;
u64 buid ;
int ret ;
/*
* Get the config address and phb buid of the PE window .
* Rely on eeh to retrieve this for us .
* Retrieve them from the pci device , not the node with the
* dma - window property
*/
edev = pci_dev_to_eeh_dev ( dev ) ;
cfg_addr = edev - > config_addr ;
if ( edev - > pe_config_addr )
cfg_addr = edev - > pe_config_addr ;
buid = edev - > phb - > buid ;
do {
ret = rtas_call ( ddw_restore_token , 3 , 1 , NULL , cfg_addr ,
BUID_HI ( buid ) , BUID_LO ( buid ) ) ;
} while ( rtas_busy_delay ( ret ) ) ;
dev_info ( & dev - > dev ,
" ibm,reset-pe-dma-windows(%x) %x %x %x returned %d \n " ,
ddw_restore_token , cfg_addr , BUID_HI ( buid ) , BUID_LO ( buid ) , ret ) ;
}
2011-02-10 12:10:47 +03:00
/*
* If the PE supports dynamic dma windows , and there is space for a table
* that can map all pages in a linear offset , then setup such a table ,
* and record the dma - offset in the struct device .
*
* dev : the pci device we are checking
* pdn : the parent pe node with the ibm , dma_window property
* Future : also check if we can remap the base window for our base page size
*
* returns the dma offset for use by dma_set_mask
*/
static u64 enable_ddw ( struct pci_dev * dev , struct device_node * pdn )
{
int len , ret ;
struct ddw_query_response query ;
struct ddw_create_response create ;
int page_shift ;
u64 dma_addr , max_addr ;
struct device_node * dn ;
2011-05-11 16:25:00 +04:00
const u32 * uninitialized_var ( ddw_avail ) ;
2012-05-15 11:04:32 +04:00
const u32 * uninitialized_var ( ddw_extensions ) ;
u32 ddw_restore_token = 0 ;
2011-02-10 12:10:47 +03:00
struct direct_window * window ;
2011-05-06 17:27:30 +04:00
struct property * win64 ;
2011-02-10 12:10:47 +03:00
struct dynamic_dma_window_prop * ddwprop ;
2012-05-15 11:04:32 +04:00
const void * dma_window = NULL ;
unsigned long liobn , offset , size ;
2011-02-10 12:10:47 +03:00
mutex_lock ( & direct_window_init_mutex ) ;
2011-05-11 16:25:00 +04:00
dma_addr = find_existing_ddw ( pdn ) ;
2011-02-10 12:10:47 +03:00
if ( dma_addr ! = 0 )
goto out_unlock ;
/*
* the ibm , ddw - applicable property holds the tokens for :
* ibm , query - pe - dma - window
* ibm , create - pe - dma - window
* ibm , remove - pe - dma - window
* for the given node in that order .
* the property is actually in the parent , not the PE
*/
2011-05-11 16:25:00 +04:00
ddw_avail = of_get_property ( pdn , " ibm,ddw-applicable " , & len ) ;
if ( ! ddw_avail | | len < 3 * sizeof ( u32 ) )
2011-02-10 12:10:47 +03:00
goto out_unlock ;
2012-05-15 11:04:32 +04:00
/*
* the extensions property is only required to exist in certain
* levels of firmware and later
* the ibm , ddw - extensions property is a list with the first
* element containing the number of extensions and each
* subsequent entry is a value corresponding to that extension
*/
ddw_extensions = of_get_property ( pdn , " ibm,ddw-extensions " , & len ) ;
if ( ddw_extensions ) {
/*
* each new defined extension length should be added to
* the top of the switch so the " earlier " entries also
* get picked up
*/
switch ( ddw_extensions [ 0 ] ) {
/* ibm,reset-pe-dma-windows */
case 1 :
ddw_restore_token = ddw_extensions [ 1 ] ;
break ;
}
}
/*
* Only remove the existing DMA window if we can restore back to
* the default state . Removing the existing window maximizes the
* resources available to firmware for dynamic window creation .
*/
if ( ddw_restore_token ) {
dma_window = of_get_property ( pdn , " ibm,dma-window " , NULL ) ;
of_parse_dma_window ( pdn , dma_window , & liobn , & offset , & size ) ;
__remove_ddw ( pdn , ddw_avail , liobn ) ;
}
/*
2011-02-10 12:10:47 +03:00
* Query if there is a second window of size to map the
* whole partition . Query returns number of windows , largest
* block assigned to PE ( partition endpoint ) , and two bitmasks
* of page sizes : supported and supported for migrate - dma .
*/
dn = pci_device_to_OF_node ( dev ) ;
2011-05-11 16:25:00 +04:00
ret = query_ddw ( dev , ddw_avail , & query ) ;
2011-02-10 12:10:47 +03:00
if ( ret ! = 0 )
2012-05-15 11:04:32 +04:00
goto out_restore_window ;
2011-02-10 12:10:47 +03:00
if ( query . windows_available = = 0 ) {
/*
* no additional windows are available for this device .
* We might be able to reallocate the existing window ,
* trading in for a larger page size .
*/
dev_dbg ( & dev - > dev , " no free dynamic windows " ) ;
2012-05-15 11:04:32 +04:00
goto out_restore_window ;
2011-02-10 12:10:47 +03:00
}
if ( query . page_size & 4 ) {
page_shift = 24 ; /* 16MB */
} else if ( query . page_size & 2 ) {
page_shift = 16 ; /* 64kB */
} else if ( query . page_size & 1 ) {
page_shift = 12 ; /* 4kB */
} else {
dev_dbg ( & dev - > dev , " no supported direct page size in mask %x " ,
query . page_size ) ;
2012-05-15 11:04:32 +04:00
goto out_restore_window ;
2011-02-10 12:10:47 +03:00
}
/* verify the window * number of ptes will map the partition */
/* check largest block * page size > max memory hotplug addr */
max_addr = memory_hotplug_max ( ) ;
if ( query . largest_available_block < ( max_addr > > page_shift ) ) {
dev_dbg ( & dev - > dev , " can't map partiton max 0x%llx with %u "
" %llu-sized pages \n " , max_addr , query . largest_available_block ,
1ULL < < page_shift ) ;
2012-05-15 11:04:32 +04:00
goto out_restore_window ;
2011-02-10 12:10:47 +03:00
}
len = order_base_2 ( max_addr ) ;
win64 = kzalloc ( sizeof ( struct property ) , GFP_KERNEL ) ;
if ( ! win64 ) {
dev_info ( & dev - > dev ,
" couldn't allocate property for 64bit dma window \n " ) ;
2012-05-15 11:04:32 +04:00
goto out_restore_window ;
2011-02-10 12:10:47 +03:00
}
win64 - > name = kstrdup ( DIRECT64_PROPNAME , GFP_KERNEL ) ;
win64 - > value = ddwprop = kmalloc ( sizeof ( * ddwprop ) , GFP_KERNEL ) ;
2011-05-06 17:27:30 +04:00
win64 - > length = sizeof ( * ddwprop ) ;
2011-02-10 12:10:47 +03:00
if ( ! win64 - > name | | ! win64 - > value ) {
dev_info ( & dev - > dev ,
" couldn't allocate property name and value \n " ) ;
goto out_free_prop ;
}
2011-05-11 16:25:00 +04:00
ret = create_ddw ( dev , ddw_avail , & create , page_shift , len ) ;
2011-02-10 12:10:47 +03:00
if ( ret ! = 0 )
goto out_free_prop ;
ddwprop - > liobn = cpu_to_be32 ( create . liobn ) ;
ddwprop - > dma_base = cpu_to_be64 ( of_read_number ( & create . addr_hi , 2 ) ) ;
ddwprop - > tce_shift = cpu_to_be32 ( page_shift ) ;
ddwprop - > window_shift = cpu_to_be32 ( len ) ;
dev_dbg ( & dev - > dev , " created tce table LIOBN 0x%x for %s \n " ,
create . liobn , dn - > full_name ) ;
window = kzalloc ( sizeof ( * window ) , GFP_KERNEL ) ;
if ( ! window )
goto out_clear_window ;
ret = walk_system_ram_range ( 0 , memblock_end_of_DRAM ( ) > > PAGE_SHIFT ,
win64 - > value , tce_setrange_multi_pSeriesLP_walk ) ;
if ( ret ) {
dev_info ( & dev - > dev , " failed to map direct window for %s: %d \n " ,
dn - > full_name , ret ) ;
2011-08-08 05:18:00 +04:00
goto out_free_window ;
2011-02-10 12:10:47 +03:00
}
2012-10-02 20:58:46 +04:00
ret = of_add_property ( pdn , win64 ) ;
2011-02-10 12:10:47 +03:00
if ( ret ) {
dev_err ( & dev - > dev , " unable to add dma window property for %s: %d " ,
pdn - > full_name , ret ) ;
2011-08-08 05:18:00 +04:00
goto out_free_window ;
2011-02-10 12:10:47 +03:00
}
window - > device = pdn ;
window - > prop = ddwprop ;
spin_lock ( & direct_window_list_lock ) ;
list_add ( & window - > list , & direct_window_list ) ;
spin_unlock ( & direct_window_list_lock ) ;
dma_addr = of_read_number ( & create . addr_hi , 2 ) ;
goto out_unlock ;
2011-08-08 05:18:00 +04:00
out_free_window :
kfree ( window ) ;
2011-02-10 12:10:47 +03:00
out_clear_window :
remove_ddw ( pdn ) ;
out_free_prop :
kfree ( win64 - > name ) ;
kfree ( win64 - > value ) ;
kfree ( win64 ) ;
2012-05-15 11:04:32 +04:00
out_restore_window :
if ( ddw_restore_token )
restore_default_window ( dev , ddw_restore_token , liobn ) ;
2011-02-10 12:10:47 +03:00
out_unlock :
mutex_unlock ( & direct_window_init_mutex ) ;
return dma_addr ;
}
2006-11-11 09:25:02 +03:00
static void pci_dma_dev_setup_pSeriesLP ( struct pci_dev * dev )
2005-04-17 02:20:36 +04:00
{
struct device_node * pdn , * dn ;
struct iommu_table * tbl ;
2006-07-12 09:39:43 +04:00
const void * dma_window = NULL ;
2005-09-06 07:17:54 +04:00
struct pci_dn * pci ;
2005-04-17 02:20:36 +04:00
2008-04-24 09:13:19 +04:00
pr_debug ( " pci_dma_dev_setup_pSeriesLP: %s \n " , pci_name ( dev ) ) ;
2006-11-11 09:25:02 +03:00
2005-04-17 02:20:36 +04:00
/* dev setup for LPAR is a little tricky, since the device tree might
2011-03-31 05:57:33 +04:00
* contain the dma - window properties per - device and not necessarily
2005-04-17 02:20:36 +04:00
* for the bus . So we need to search upwards in the tree until we
* either hit a dma - window property , OR find a parent with a table
* already allocated .
*/
dn = pci_device_to_OF_node ( dev ) ;
2008-04-24 09:13:19 +04:00
pr_debug ( " node is %s \n " , dn - > full_name ) ;
2006-10-30 08:15:59 +03:00
2005-12-06 04:37:35 +03:00
for ( pdn = dn ; pdn & & PCI_DN ( pdn ) & & ! PCI_DN ( pdn ) - > iommu_table ;
2005-09-06 07:17:54 +04:00
pdn = pdn - > parent ) {
2007-04-03 16:26:41 +04:00
dma_window = of_get_property ( pdn , " ibm,dma-window " , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( dma_window )
break ;
}
2007-04-11 00:11:23 +04:00
if ( ! pdn | | ! PCI_DN ( pdn ) ) {
printk ( KERN_WARNING " pci_dma_dev_setup_pSeriesLP: "
" no DMA window found for pci dev=%s dn=%s \n " ,
2012-06-15 21:50:25 +04:00
pci_name ( dev ) , of_node_full_name ( dn ) ) ;
2007-04-11 00:11:23 +04:00
return ;
}
2008-04-24 09:13:19 +04:00
pr_debug ( " parent is %s \n " , pdn - > full_name ) ;
2006-11-11 09:25:02 +03:00
2005-12-06 04:37:35 +03:00
pci = PCI_DN ( pdn ) ;
2005-09-06 07:17:54 +04:00
if ( ! pci - > iommu_table ) {
2010-08-11 20:42:48 +04:00
tbl = kzalloc_node ( sizeof ( struct iommu_table ) , GFP_KERNEL ,
2006-06-10 14:58:08 +04:00
pci - > phb - > node ) ;
2010-12-09 07:24:01 +03:00
iommu_table_setparms_lpar ( pci - > phb , pdn , tbl , dma_window ) ;
2006-06-10 14:58:08 +04:00
pci - > iommu_table = iommu_init_table ( tbl , pci - > phb - > node ) ;
2008-04-24 09:13:19 +04:00
pr_debug ( " created table: %p \n " , pci - > iommu_table ) ;
2007-05-10 09:16:27 +04:00
} else {
2008-04-24 09:13:19 +04:00
pr_debug ( " found DMA window, table: %p \n " , pci - > iommu_table ) ;
2005-04-17 02:20:36 +04:00
}
2009-09-21 12:26:35 +04:00
set_iommu_table_base ( & dev - > dev , pci - > iommu_table ) ;
2005-04-17 02:20:36 +04:00
}
2011-02-10 12:10:47 +03:00
static int dma_set_mask_pSeriesLP ( struct device * dev , u64 dma_mask )
{
bool ddw_enabled = false ;
struct device_node * pdn , * dn ;
struct pci_dev * pdev ;
const void * dma_window = NULL ;
u64 dma_offset ;
2011-05-11 16:24:57 +04:00
if ( ! dev - > dma_mask )
2011-02-10 12:10:47 +03:00
return - EIO ;
2011-05-11 16:24:57 +04:00
if ( ! dev_is_pci ( dev ) )
goto check_mask ;
2011-05-09 16:58:03 +04:00
pdev = to_pci_dev ( dev ) ;
2011-02-10 12:10:47 +03:00
/* only attempt to use a new window if 64-bit DMA is requested */
if ( ! disable_ddw & & dma_mask = = DMA_BIT_MASK ( 64 ) ) {
dn = pci_device_to_OF_node ( pdev ) ;
dev_dbg ( dev , " node is %s \n " , dn - > full_name ) ;
/*
* the device tree might contain the dma - window properties
2011-03-31 05:57:33 +04:00
* per - device and not necessarily for the bus . So we need to
2011-02-10 12:10:47 +03:00
* search upwards in the tree until we either hit a dma - window
* property , OR find a parent with a table already allocated .
*/
for ( pdn = dn ; pdn & & PCI_DN ( pdn ) & & ! PCI_DN ( pdn ) - > iommu_table ;
pdn = pdn - > parent ) {
dma_window = of_get_property ( pdn , " ibm,dma-window " , NULL ) ;
if ( dma_window )
break ;
}
if ( pdn & & PCI_DN ( pdn ) ) {
dma_offset = enable_ddw ( pdev , pdn ) ;
if ( dma_offset ! = 0 ) {
dev_info ( dev , " Using 64-bit direct DMA at offset %llx \n " , dma_offset ) ;
set_dma_offset ( dev , dma_offset ) ;
set_dma_ops ( dev , & dma_direct_ops ) ;
ddw_enabled = true ;
}
}
}
2011-05-11 16:24:57 +04:00
/* fall back on iommu ops, restore table pointer with ops */
if ( ! ddw_enabled & & get_dma_ops ( dev ) ! = & dma_iommu_ops ) {
dev_info ( dev , " Restoring 32-bit DMA via iommu \n " ) ;
2011-02-10 12:10:47 +03:00
set_dma_ops ( dev , & dma_iommu_ops ) ;
2011-05-09 16:58:03 +04:00
pci_dma_dev_setup_pSeriesLP ( pdev ) ;
2011-02-10 12:10:47 +03:00
}
2011-05-11 16:24:57 +04:00
check_mask :
if ( ! dma_supported ( dev , dma_mask ) )
return - EIO ;
2011-02-10 12:10:47 +03:00
* dev - > dma_mask = dma_mask ;
return 0 ;
}
2011-06-24 13:05:22 +04:00
static u64 dma_get_required_mask_pSeriesLP ( struct device * dev )
{
if ( ! dev - > dma_mask )
return 0 ;
if ( ! disable_ddw & & dev_is_pci ( dev ) ) {
struct pci_dev * pdev = to_pci_dev ( dev ) ;
struct device_node * dn ;
dn = pci_device_to_OF_node ( pdev ) ;
/* search upwards for ibm,dma-window */
for ( ; dn & & PCI_DN ( dn ) & & ! PCI_DN ( dn ) - > iommu_table ;
dn = dn - > parent )
if ( of_get_property ( dn , " ibm,dma-window " , NULL ) )
break ;
/* if there is a ibm,ddw-applicable property require 64 bits */
if ( dn & & PCI_DN ( dn ) & &
of_get_property ( dn , " ibm,ddw-applicable " , NULL ) )
return DMA_BIT_MASK ( 64 ) ;
}
2011-06-24 13:05:24 +04:00
return dma_iommu_ops . get_required_mask ( dev ) ;
2011-06-24 13:05:22 +04:00
}
2007-03-04 09:04:44 +03:00
# else /* CONFIG_PCI */
# define pci_dma_bus_setup_pSeries NULL
# define pci_dma_dev_setup_pSeries NULL
# define pci_dma_bus_setup_pSeriesLP NULL
# define pci_dma_dev_setup_pSeriesLP NULL
2011-02-10 12:10:47 +03:00
# define dma_set_mask_pSeriesLP NULL
2011-06-24 13:05:22 +04:00
# define dma_get_required_mask_pSeriesLP NULL
2007-03-04 09:04:44 +03:00
# endif /* !CONFIG_PCI */
2011-02-10 12:10:47 +03:00
static int iommu_mem_notifier ( struct notifier_block * nb , unsigned long action ,
void * data )
{
struct direct_window * window ;
struct memory_notify * arg = data ;
int ret = 0 ;
switch ( action ) {
case MEM_GOING_ONLINE :
spin_lock ( & direct_window_list_lock ) ;
list_for_each_entry ( window , & direct_window_list , list ) {
ret | = tce_setrange_multi_pSeriesLP ( arg - > start_pfn ,
arg - > nr_pages , window - > prop ) ;
/* XXX log error */
}
spin_unlock ( & direct_window_list_lock ) ;
break ;
case MEM_CANCEL_ONLINE :
case MEM_OFFLINE :
spin_lock ( & direct_window_list_lock ) ;
list_for_each_entry ( window , & direct_window_list , list ) {
ret | = tce_clearrange_multi_pSeriesLP ( arg - > start_pfn ,
arg - > nr_pages , window - > prop ) ;
/* XXX log error */
}
spin_unlock ( & direct_window_list_lock ) ;
break ;
default :
break ;
}
if ( ret & & action ! = MEM_CANCEL_ONLINE )
return NOTIFY_BAD ;
return NOTIFY_OK ;
}
static struct notifier_block iommu_mem_nb = {
. notifier_call = iommu_mem_notifier ,
} ;
2007-03-04 09:04:44 +03:00
static int iommu_reconfig_notifier ( struct notifier_block * nb , unsigned long action , void * node )
{
int err = NOTIFY_OK ;
struct device_node * np = node ;
struct pci_dn * pci = PCI_DN ( np ) ;
2011-02-10 12:10:47 +03:00
struct direct_window * window ;
2007-03-04 09:04:44 +03:00
switch ( action ) {
2012-10-02 20:57:57 +04:00
case OF_RECONFIG_DETACH_NODE :
2013-01-18 13:17:36 +04:00
remove_ddw ( np ) ;
2010-10-26 21:35:13 +04:00
if ( pci & & pci - > iommu_table )
2007-12-06 05:39:19 +03:00
iommu_free_table ( pci - > iommu_table , np - > full_name ) ;
2011-02-10 12:10:47 +03:00
spin_lock ( & direct_window_list_lock ) ;
list_for_each_entry ( window , & direct_window_list , list ) {
if ( window - > device = = np ) {
list_del ( & window - > list ) ;
kfree ( window ) ;
break ;
}
}
spin_unlock ( & direct_window_list_lock ) ;
2007-03-04 09:04:44 +03:00
break ;
default :
err = NOTIFY_DONE ;
break ;
}
return err ;
}
static struct notifier_block iommu_reconfig_nb = {
. notifier_call = iommu_reconfig_notifier ,
} ;
2005-04-17 02:20:36 +04:00
/* These are called very early. */
void iommu_init_early_pSeries ( void )
{
2010-10-18 11:27:03 +04:00
if ( of_chosen & & of_get_property ( of_chosen , " linux,iommu-off " , NULL ) )
2005-04-17 02:20:36 +04:00
return ;
2006-03-21 12:45:59 +03:00
if ( firmware_has_feature ( FW_FEATURE_LPAR ) ) {
2005-08-03 08:35:25 +04:00
if ( firmware_has_feature ( FW_FEATURE_MULTITCE ) ) {
2005-04-17 02:20:36 +04:00
ppc_md . tce_build = tce_buildmulti_pSeriesLP ;
ppc_md . tce_free = tce_freemulti_pSeriesLP ;
} else {
ppc_md . tce_build = tce_build_pSeriesLP ;
ppc_md . tce_free = tce_free_pSeriesLP ;
}
2006-06-23 10:35:10 +04:00
ppc_md . tce_get = tce_get_pSeriesLP ;
2006-11-11 09:25:02 +03:00
ppc_md . pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP ;
ppc_md . pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP ;
2011-02-10 12:10:47 +03:00
ppc_md . dma_set_mask = dma_set_mask_pSeriesLP ;
2011-06-24 13:05:22 +04:00
ppc_md . dma_get_required_mask = dma_get_required_mask_pSeriesLP ;
2005-04-17 02:20:36 +04:00
} else {
ppc_md . tce_build = tce_build_pSeries ;
ppc_md . tce_free = tce_free_pSeries ;
2006-06-23 10:35:10 +04:00
ppc_md . tce_get = tce_get_pseries ;
2006-11-11 09:25:02 +03:00
ppc_md . pci_dma_bus_setup = pci_dma_bus_setup_pSeries ;
ppc_md . pci_dma_dev_setup = pci_dma_dev_setup_pSeries ;
2005-04-17 02:20:36 +04:00
}
2012-10-02 20:57:57 +04:00
of_reconfig_notifier_register ( & iommu_reconfig_nb ) ;
2011-02-10 12:10:47 +03:00
register_memory_notifier ( & iommu_mem_nb ) ;
2005-04-17 02:20:36 +04:00
2007-03-04 08:58:39 +03:00
set_pci_dma_ops ( & dma_iommu_ops ) ;
2005-04-17 02:20:36 +04:00
}
2010-09-28 19:33:12 +04:00
static int __init disable_multitce ( char * str )
{
if ( strcmp ( str , " off " ) = = 0 & &
firmware_has_feature ( FW_FEATURE_LPAR ) & &
firmware_has_feature ( FW_FEATURE_MULTITCE ) ) {
printk ( KERN_INFO " Disabling MULTITCE firmware feature \n " ) ;
ppc_md . tce_build = tce_build_pSeriesLP ;
ppc_md . tce_free = tce_free_pSeriesLP ;
powerpc_firmware_features & = ~ FW_FEATURE_MULTITCE ;
}
return 1 ;
}
__setup ( " multitce= " , disable_multitce ) ;