2006-02-10 08:32:07 +03:00
/* pci_sun4v.c: SUN4V specific PCI controller support.
*
* Copyright ( C ) 2006 David S . Miller ( davem @ davemloft . net )
*/
# include <linux/kernel.h>
# include <linux/types.h>
# include <linux/pci.h>
# include <linux/init.h>
# include <linux/slab.h>
# include <linux/interrupt.h>
2006-02-10 11:08:26 +03:00
# include <linux/percpu.h>
2006-02-10 08:32:07 +03:00
# include <asm/pbm.h>
# include <asm/iommu.h>
# include <asm/irq.h>
# include <asm/upa.h>
# include <asm/pstate.h>
# include <asm/oplib.h>
# include <asm/hypervisor.h>
2006-06-22 05:18:47 +04:00
# include <asm/prom.h>
2006-02-10 08:32:07 +03:00
# include "pci_impl.h"
# include "iommu_common.h"
2006-02-10 09:05:54 +03:00
# include "pci_sun4v.h"
2006-02-14 08:50:27 +03:00
# define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
2006-02-10 11:08:26 +03:00
2006-02-20 09:21:32 +03:00
struct pci_iommu_batch {
struct pci_dev * pdev ; /* Device mapping is for. */
unsigned long prot ; /* IOMMU page protections */
unsigned long entry ; /* Index into IOTSB. */
u64 * pglist ; /* List of physical pages */
unsigned long npages ; /* Number of pages in list. */
2006-02-10 11:08:26 +03:00
} ;
2006-02-20 09:21:32 +03:00
static DEFINE_PER_CPU ( struct pci_iommu_batch , pci_iommu_batch ) ;
/* Interrupts must be disabled. */
static inline void pci_iommu_batch_start ( struct pci_dev * pdev , unsigned long prot , unsigned long entry )
{
struct pci_iommu_batch * p = & __get_cpu_var ( pci_iommu_batch ) ;
p - > pdev = pdev ;
p - > prot = prot ;
p - > entry = entry ;
p - > npages = 0 ;
}
/* Interrupts must be disabled. */
static long pci_iommu_batch_flush ( struct pci_iommu_batch * p )
{
struct pcidev_cookie * pcp = p - > pdev - > sysdata ;
unsigned long devhandle = pcp - > pbm - > devhandle ;
unsigned long prot = p - > prot ;
unsigned long entry = p - > entry ;
u64 * pglist = p - > pglist ;
unsigned long npages = p - > npages ;
2006-02-20 12:42:51 +03:00
while ( npages ! = 0 ) {
2006-02-20 09:21:32 +03:00
long num ;
num = pci_sun4v_iommu_map ( devhandle , HV_PCI_TSBID ( 0 , entry ) ,
npages , prot , __pa ( pglist ) ) ;
if ( unlikely ( num < 0 ) ) {
if ( printk_ratelimit ( ) )
printk ( " pci_iommu_batch_flush: IOMMU map of "
" [%08lx:%08lx:%lx:%lx:%lx] failed with "
" status %ld \n " ,
devhandle , HV_PCI_TSBID ( 0 , entry ) ,
npages , prot , __pa ( pglist ) , num ) ;
return - 1 ;
}
entry + = num ;
npages - = num ;
pglist + = num ;
2006-02-20 12:42:51 +03:00
}
2006-02-20 09:21:32 +03:00
p - > entry = entry ;
p - > npages = 0 ;
return 0 ;
}
/* Interrupts must be disabled. */
static inline long pci_iommu_batch_add ( u64 phys_page )
{
struct pci_iommu_batch * p = & __get_cpu_var ( pci_iommu_batch ) ;
BUG_ON ( p - > npages > = PGLIST_NENTS ) ;
p - > pglist [ p - > npages + + ] = phys_page ;
if ( p - > npages = = PGLIST_NENTS )
return pci_iommu_batch_flush ( p ) ;
return 0 ;
}
/* Interrupts must be disabled. */
static inline long pci_iommu_batch_end ( void )
{
struct pci_iommu_batch * p = & __get_cpu_var ( pci_iommu_batch ) ;
BUG_ON ( p - > npages > = PGLIST_NENTS ) ;
return pci_iommu_batch_flush ( p ) ;
}
2006-02-10 11:08:26 +03:00
static long pci_arena_alloc ( struct pci_iommu_arena * arena , unsigned long npages )
{
unsigned long n , i , start , end , limit ;
int pass ;
limit = arena - > limit ;
start = arena - > hint ;
pass = 0 ;
again :
n = find_next_zero_bit ( arena - > map , limit , start ) ;
end = n + npages ;
if ( unlikely ( end > = limit ) ) {
if ( likely ( pass < 1 ) ) {
limit = start ;
start = 0 ;
pass + + ;
goto again ;
} else {
/* Scanned the whole thing, give up. */
return - 1 ;
}
}
for ( i = n ; i < end ; i + + ) {
if ( test_bit ( i , arena - > map ) ) {
start = i + 1 ;
goto again ;
}
}
for ( i = n ; i < end ; i + + )
__set_bit ( i , arena - > map ) ;
arena - > hint = end ;
return n ;
}
static void pci_arena_free ( struct pci_iommu_arena * arena , unsigned long base , unsigned long npages )
{
unsigned long i ;
for ( i = base ; i < ( base + npages ) ; i + + )
__clear_bit ( i , arena - > map ) ;
}
2006-05-23 13:07:22 +04:00
static void * pci_4v_alloc_consistent ( struct pci_dev * pdev , size_t size , dma_addr_t * dma_addrp , gfp_t gfp )
2006-02-10 08:32:07 +03:00
{
2006-02-10 11:08:26 +03:00
struct pcidev_cookie * pcp ;
struct pci_iommu * iommu ;
2006-02-14 08:50:27 +03:00
unsigned long flags , order , first_page , npages , n ;
2006-02-10 11:08:26 +03:00
void * ret ;
long entry ;
size = IO_PAGE_ALIGN ( size ) ;
order = get_order ( size ) ;
2006-02-20 09:21:32 +03:00
if ( unlikely ( order > = MAX_ORDER ) )
2006-02-10 11:08:26 +03:00
return NULL ;
npages = size > > IO_PAGE_SHIFT ;
2006-05-23 13:07:22 +04:00
first_page = __get_free_pages ( gfp , order ) ;
2006-02-20 09:21:32 +03:00
if ( unlikely ( first_page = = 0UL ) )
2006-02-10 11:08:26 +03:00
return NULL ;
2006-02-16 09:25:27 +03:00
2006-02-10 11:08:26 +03:00
memset ( ( char * ) first_page , 0 , PAGE_SIZE < < order ) ;
pcp = pdev - > sysdata ;
iommu = pcp - > pbm - > iommu ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
entry = pci_arena_alloc ( & iommu - > arena , npages ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
2006-02-20 09:21:32 +03:00
if ( unlikely ( entry < 0L ) )
goto arena_alloc_fail ;
2006-02-10 11:08:26 +03:00
* dma_addrp = ( iommu - > page_table_map_base +
( entry < < IO_PAGE_SHIFT ) ) ;
ret = ( void * ) first_page ;
first_page = __pa ( first_page ) ;
2006-02-20 09:21:32 +03:00
local_irq_save ( flags ) ;
2006-02-10 11:08:26 +03:00
2006-02-20 09:21:32 +03:00
pci_iommu_batch_start ( pdev ,
( HV_PCI_MAP_ATTR_READ |
HV_PCI_MAP_ATTR_WRITE ) ,
entry ) ;
2006-02-10 11:08:26 +03:00
2006-02-20 09:21:32 +03:00
for ( n = 0 ; n < npages ; n + + ) {
long err = pci_iommu_batch_add ( first_page + ( n * PAGE_SIZE ) ) ;
if ( unlikely ( err < 0L ) )
goto iommu_map_fail ;
}
2006-02-10 11:08:26 +03:00
2006-02-20 09:21:32 +03:00
if ( unlikely ( pci_iommu_batch_end ( ) < 0L ) )
goto iommu_map_fail ;
2006-02-10 11:08:26 +03:00
2006-02-20 09:21:32 +03:00
local_irq_restore ( flags ) ;
2006-02-10 11:08:26 +03:00
return ret ;
2006-02-20 09:21:32 +03:00
iommu_map_fail :
/* Interrupts are disabled. */
spin_lock ( & iommu - > lock ) ;
pci_arena_free ( & iommu - > arena , entry , npages ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
arena_alloc_fail :
free_pages ( first_page , order ) ;
return NULL ;
2006-02-10 08:32:07 +03:00
}
static void pci_4v_free_consistent ( struct pci_dev * pdev , size_t size , void * cpu , dma_addr_t dvma )
{
2006-02-10 11:08:26 +03:00
struct pcidev_cookie * pcp ;
struct pci_iommu * iommu ;
2006-02-14 08:50:27 +03:00
unsigned long flags , order , npages , entry ;
u32 devhandle ;
2006-02-10 11:08:26 +03:00
npages = IO_PAGE_ALIGN ( size ) > > IO_PAGE_SHIFT ;
pcp = pdev - > sysdata ;
iommu = pcp - > pbm - > iommu ;
devhandle = pcp - > pbm - > devhandle ;
entry = ( ( dvma - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ) ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
pci_arena_free ( & iommu - > arena , entry , npages ) ;
do {
unsigned long num ;
num = pci_sun4v_iommu_demap ( devhandle , HV_PCI_TSBID ( 0 , entry ) ,
npages ) ;
entry + = num ;
npages - = num ;
} while ( npages ! = 0 ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
order = get_order ( size ) ;
if ( order < 10 )
free_pages ( ( unsigned long ) cpu , order ) ;
2006-02-10 08:32:07 +03:00
}
static dma_addr_t pci_4v_map_single ( struct pci_dev * pdev , void * ptr , size_t sz , int direction )
{
2006-02-10 11:08:26 +03:00
struct pcidev_cookie * pcp ;
struct pci_iommu * iommu ;
unsigned long flags , npages , oaddr ;
2006-02-14 08:50:27 +03:00
unsigned long i , base_paddr ;
2006-02-20 09:21:32 +03:00
u32 bus_addr , ret ;
2006-02-10 11:08:26 +03:00
unsigned long prot ;
long entry ;
pcp = pdev - > sysdata ;
iommu = pcp - > pbm - > iommu ;
if ( unlikely ( direction = = PCI_DMA_NONE ) )
goto bad ;
oaddr = ( unsigned long ) ptr ;
npages = IO_PAGE_ALIGN ( oaddr + sz ) - ( oaddr & IO_PAGE_MASK ) ;
npages > > = IO_PAGE_SHIFT ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
entry = pci_arena_alloc ( & iommu - > arena , npages ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
if ( unlikely ( entry < 0L ) )
goto bad ;
bus_addr = ( iommu - > page_table_map_base +
( entry < < IO_PAGE_SHIFT ) ) ;
ret = bus_addr | ( oaddr & ~ IO_PAGE_MASK ) ;
base_paddr = __pa ( oaddr & IO_PAGE_MASK ) ;
prot = HV_PCI_MAP_ATTR_READ ;
if ( direction ! = PCI_DMA_TODEVICE )
prot | = HV_PCI_MAP_ATTR_WRITE ;
2006-02-20 09:21:32 +03:00
local_irq_save ( flags ) ;
2006-02-10 11:08:26 +03:00
2006-02-20 09:21:32 +03:00
pci_iommu_batch_start ( pdev , prot , entry ) ;
2006-02-10 11:08:26 +03:00
2006-02-20 09:21:32 +03:00
for ( i = 0 ; i < npages ; i + + , base_paddr + = IO_PAGE_SIZE ) {
long err = pci_iommu_batch_add ( base_paddr ) ;
if ( unlikely ( err < 0L ) )
goto iommu_map_fail ;
}
if ( unlikely ( pci_iommu_batch_end ( ) < 0L ) )
goto iommu_map_fail ;
2006-02-10 11:08:26 +03:00
2006-02-20 09:21:32 +03:00
local_irq_restore ( flags ) ;
2006-02-10 11:08:26 +03:00
return ret ;
bad :
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
return PCI_DMA_ERROR_CODE ;
2006-02-20 09:21:32 +03:00
iommu_map_fail :
/* Interrupts are disabled. */
spin_lock ( & iommu - > lock ) ;
pci_arena_free ( & iommu - > arena , entry , npages ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
return PCI_DMA_ERROR_CODE ;
2006-02-10 08:32:07 +03:00
}
static void pci_4v_unmap_single ( struct pci_dev * pdev , dma_addr_t bus_addr , size_t sz , int direction )
{
2006-02-10 11:08:26 +03:00
struct pcidev_cookie * pcp ;
struct pci_iommu * iommu ;
2006-02-14 08:50:27 +03:00
unsigned long flags , npages ;
2006-02-10 11:08:26 +03:00
long entry ;
2006-02-14 08:50:27 +03:00
u32 devhandle ;
2006-02-10 11:08:26 +03:00
if ( unlikely ( direction = = PCI_DMA_NONE ) ) {
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
return ;
}
pcp = pdev - > sysdata ;
iommu = pcp - > pbm - > iommu ;
devhandle = pcp - > pbm - > devhandle ;
npages = IO_PAGE_ALIGN ( bus_addr + sz ) - ( bus_addr & IO_PAGE_MASK ) ;
npages > > = IO_PAGE_SHIFT ;
bus_addr & = IO_PAGE_MASK ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
entry = ( bus_addr - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ;
pci_arena_free ( & iommu - > arena , entry , npages ) ;
do {
unsigned long num ;
num = pci_sun4v_iommu_demap ( devhandle , HV_PCI_TSBID ( 0 , entry ) ,
npages ) ;
entry + = num ;
npages - = num ;
} while ( npages ! = 0 ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
}
# define SG_ENT_PHYS_ADDRESS(SG) \
( __pa ( page_address ( ( SG ) - > page ) ) + ( SG ) - > offset )
2006-02-20 09:21:32 +03:00
static inline long fill_sg ( long entry , struct pci_dev * pdev ,
2006-02-10 11:08:26 +03:00
struct scatterlist * sg ,
int nused , int nelems , unsigned long prot )
{
struct scatterlist * dma_sg = sg ;
struct scatterlist * sg_end = sg + nelems ;
2006-02-20 09:21:32 +03:00
unsigned long flags ;
int i ;
local_irq_save ( flags ) ;
pci_iommu_batch_start ( pdev , prot , entry ) ;
2006-02-10 11:08:26 +03:00
for ( i = 0 ; i < nused ; i + + ) {
unsigned long pteval = ~ 0UL ;
u32 dma_npages ;
dma_npages = ( ( dma_sg - > dma_address & ( IO_PAGE_SIZE - 1UL ) ) +
dma_sg - > dma_length +
( ( IO_PAGE_SIZE - 1UL ) ) ) > > IO_PAGE_SHIFT ;
do {
unsigned long offset ;
signed int len ;
/* If we are here, we know we have at least one
* more page to map . So walk forward until we
* hit a page crossing , and begin creating new
* mappings from that spot .
*/
for ( ; ; ) {
unsigned long tmp ;
tmp = SG_ENT_PHYS_ADDRESS ( sg ) ;
len = sg - > length ;
if ( ( ( tmp ^ pteval ) > > IO_PAGE_SHIFT ) ! = 0UL ) {
pteval = tmp & IO_PAGE_MASK ;
offset = tmp & ( IO_PAGE_SIZE - 1UL ) ;
break ;
}
if ( ( ( tmp ^ ( tmp + len - 1UL ) ) > > IO_PAGE_SHIFT ) ! = 0UL ) {
pteval = ( tmp + IO_PAGE_SIZE ) & IO_PAGE_MASK ;
offset = 0UL ;
len - = ( IO_PAGE_SIZE - ( tmp & ( IO_PAGE_SIZE - 1UL ) ) ) ;
break ;
}
sg + + ;
}
pteval = ( pteval & IOPTE_PAGE ) ;
while ( len > 0 ) {
2006-02-20 09:21:32 +03:00
long err ;
err = pci_iommu_batch_add ( pteval ) ;
if ( unlikely ( err < 0L ) )
goto iommu_map_failed ;
2006-02-10 11:08:26 +03:00
pteval + = IO_PAGE_SIZE ;
len - = ( IO_PAGE_SIZE - offset ) ;
offset = 0 ;
dma_npages - - ;
}
pteval = ( pteval & IOPTE_PAGE ) + len ;
sg + + ;
/* Skip over any tail mappings we've fully mapped,
* adjusting pteval along the way . Stop when we
* detect a page crossing event .
*/
while ( sg < sg_end & &
( pteval < < ( 64 - IO_PAGE_SHIFT ) ) ! = 0UL & &
( pteval = = SG_ENT_PHYS_ADDRESS ( sg ) ) & &
( ( pteval ^
( SG_ENT_PHYS_ADDRESS ( sg ) + sg - > length - 1UL ) ) > > IO_PAGE_SHIFT ) = = 0UL ) {
pteval + = sg - > length ;
sg + + ;
}
if ( ( pteval < < ( 64 - IO_PAGE_SHIFT ) ) = = 0UL )
pteval = ~ 0UL ;
} while ( dma_npages ! = 0 ) ;
dma_sg + + ;
}
2006-02-20 09:21:32 +03:00
if ( unlikely ( pci_iommu_batch_end ( ) < 0L ) )
goto iommu_map_failed ;
2006-02-10 11:08:26 +03:00
2006-02-20 09:21:32 +03:00
local_irq_restore ( flags ) ;
return 0 ;
2006-02-10 11:08:26 +03:00
2006-02-20 09:21:32 +03:00
iommu_map_failed :
local_irq_restore ( flags ) ;
return - 1L ;
2006-02-10 08:32:07 +03:00
}
static int pci_4v_map_sg ( struct pci_dev * pdev , struct scatterlist * sglist , int nelems , int direction )
{
2006-02-10 11:08:26 +03:00
struct pcidev_cookie * pcp ;
struct pci_iommu * iommu ;
2006-02-14 08:50:27 +03:00
unsigned long flags , npages , prot ;
2006-02-20 09:21:32 +03:00
u32 dma_base ;
2006-02-10 11:08:26 +03:00
struct scatterlist * sgtmp ;
2006-02-20 09:21:32 +03:00
long entry , err ;
2006-02-10 11:08:26 +03:00
int used ;
/* Fast path single entry scatterlists. */
if ( nelems = = 1 ) {
sglist - > dma_address =
pci_4v_map_single ( pdev ,
( page_address ( sglist - > page ) + sglist - > offset ) ,
sglist - > length , direction ) ;
if ( unlikely ( sglist - > dma_address = = PCI_DMA_ERROR_CODE ) )
return 0 ;
sglist - > dma_length = sglist - > length ;
return 1 ;
}
pcp = pdev - > sysdata ;
iommu = pcp - > pbm - > iommu ;
if ( unlikely ( direction = = PCI_DMA_NONE ) )
goto bad ;
/* Step 1: Prepare scatter list. */
npages = prepare_sg ( sglist , nelems ) ;
/* Step 2: Allocate a cluster and context, if necessary. */
spin_lock_irqsave ( & iommu - > lock , flags ) ;
entry = pci_arena_alloc ( & iommu - > arena , npages ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
if ( unlikely ( entry < 0L ) )
goto bad ;
dma_base = iommu - > page_table_map_base +
( entry < < IO_PAGE_SHIFT ) ;
/* Step 3: Normalize DMA addresses. */
used = nelems ;
sgtmp = sglist ;
while ( used & & sgtmp - > dma_length ) {
sgtmp - > dma_address + = dma_base ;
sgtmp + + ;
used - - ;
}
used = nelems - used ;
/* Step 4: Create the mappings. */
prot = HV_PCI_MAP_ATTR_READ ;
if ( direction ! = PCI_DMA_TODEVICE )
prot | = HV_PCI_MAP_ATTR_WRITE ;
2006-02-20 09:21:32 +03:00
err = fill_sg ( entry , pdev , sglist , used , nelems , prot ) ;
if ( unlikely ( err < 0L ) )
goto iommu_map_failed ;
2006-02-10 11:08:26 +03:00
return used ;
bad :
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
return 0 ;
2006-02-20 09:21:32 +03:00
iommu_map_failed :
spin_lock_irqsave ( & iommu - > lock , flags ) ;
pci_arena_free ( & iommu - > arena , entry , npages ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
return 0 ;
2006-02-10 08:32:07 +03:00
}
static void pci_4v_unmap_sg ( struct pci_dev * pdev , struct scatterlist * sglist , int nelems , int direction )
{
2006-02-10 11:08:26 +03:00
struct pcidev_cookie * pcp ;
struct pci_iommu * iommu ;
2006-02-14 08:50:27 +03:00
unsigned long flags , i , npages ;
2006-02-10 11:08:26 +03:00
long entry ;
2006-02-14 08:50:27 +03:00
u32 devhandle , bus_addr ;
2006-02-10 11:08:26 +03:00
if ( unlikely ( direction = = PCI_DMA_NONE ) ) {
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
}
pcp = pdev - > sysdata ;
iommu = pcp - > pbm - > iommu ;
devhandle = pcp - > pbm - > devhandle ;
bus_addr = sglist - > dma_address & IO_PAGE_MASK ;
for ( i = 1 ; i < nelems ; i + + )
if ( sglist [ i ] . dma_length = = 0 )
break ;
i - - ;
npages = ( IO_PAGE_ALIGN ( sglist [ i ] . dma_address + sglist [ i ] . dma_length ) -
bus_addr ) > > IO_PAGE_SHIFT ;
entry = ( ( bus_addr - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ) ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
pci_arena_free ( & iommu - > arena , entry , npages ) ;
do {
unsigned long num ;
num = pci_sun4v_iommu_demap ( devhandle , HV_PCI_TSBID ( 0 , entry ) ,
npages ) ;
entry + = num ;
npages - = num ;
} while ( npages ! = 0 ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
2006-02-10 08:32:07 +03:00
}
static void pci_4v_dma_sync_single_for_cpu ( struct pci_dev * pdev , dma_addr_t bus_addr , size_t sz , int direction )
{
2006-02-10 11:08:26 +03:00
/* Nothing to do... */
2006-02-10 08:32:07 +03:00
}
static void pci_4v_dma_sync_sg_for_cpu ( struct pci_dev * pdev , struct scatterlist * sglist , int nelems , int direction )
{
2006-02-10 11:08:26 +03:00
/* Nothing to do... */
2006-02-10 08:32:07 +03:00
}
struct pci_iommu_ops pci_sun4v_iommu_ops = {
. alloc_consistent = pci_4v_alloc_consistent ,
. free_consistent = pci_4v_free_consistent ,
. map_single = pci_4v_map_single ,
. unmap_single = pci_4v_unmap_single ,
. map_sg = pci_4v_map_sg ,
. unmap_sg = pci_4v_unmap_sg ,
. dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu ,
. dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu ,
} ;
2006-02-10 09:05:54 +03:00
/* SUN4V PCI configuration space accessors. */
2006-06-10 12:06:25 +04:00
struct pdev_entry {
struct pdev_entry * next ;
u32 devhandle ;
unsigned int bus ;
unsigned int device ;
unsigned int func ;
} ;
# define PDEV_HTAB_SIZE 16
# define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
static struct pdev_entry * pdev_htab [ PDEV_HTAB_SIZE ] ;
static inline unsigned int pdev_hashfn ( u32 devhandle , unsigned int bus , unsigned int device , unsigned int func )
2006-02-13 10:49:18 +03:00
{
2006-06-10 12:06:25 +04:00
unsigned int val ;
val = ( devhandle ^ ( devhandle > > 4 ) ) ;
val ^ = bus ;
val ^ = device ;
val ^ = func ;
return val & PDEV_HTAB_MASK ;
}
static int pdev_htab_add ( u32 devhandle , unsigned int bus , unsigned int device , unsigned int func )
{
struct pdev_entry * p = kmalloc ( sizeof ( * p ) , GFP_KERNEL ) ;
struct pdev_entry * * slot ;
if ( ! p )
return - ENOMEM ;
slot = & pdev_htab [ pdev_hashfn ( devhandle , bus , device , func ) ] ;
p - > next = * slot ;
* slot = p ;
p - > devhandle = devhandle ;
p - > bus = bus ;
p - > device = device ;
p - > func = func ;
return 0 ;
}
/* Recursively descend into the OBP device tree, rooted at toplevel_node,
* looking for a PCI device matching bus and devfn .
*/
2006-06-22 05:18:47 +04:00
static int obp_find ( struct device_node * toplevel_node , unsigned int bus , unsigned int devfn )
2006-06-10 12:06:25 +04:00
{
2006-06-22 05:18:47 +04:00
toplevel_node = toplevel_node - > child ;
2006-06-10 12:06:25 +04:00
2006-06-22 05:18:47 +04:00
while ( toplevel_node ! = NULL ) {
struct linux_prom_pci_registers * regs ;
struct property * prop ;
int ret ;
2006-06-10 12:06:25 +04:00
2006-06-22 05:18:47 +04:00
ret = obp_find ( toplevel_node , bus , devfn ) ;
2006-06-10 12:06:25 +04:00
if ( ret ! = 0 )
return ret ;
2006-06-22 05:18:47 +04:00
prop = of_find_property ( toplevel_node , " reg " , NULL ) ;
if ( ! prop )
2006-06-10 12:06:25 +04:00
goto next_sibling ;
2006-06-22 05:18:47 +04:00
regs = prop - > value ;
if ( ( ( regs - > phys_hi > > 16 ) & 0xff ) = = bus & &
( ( regs - > phys_hi > > 8 ) & 0xff ) = = devfn )
2006-06-10 12:06:25 +04:00
break ;
next_sibling :
2006-06-22 05:18:47 +04:00
toplevel_node = toplevel_node - > sibling ;
2006-02-15 03:42:11 +03:00
}
2006-06-22 05:18:47 +04:00
return toplevel_node ! = NULL ;
2006-06-10 12:06:25 +04:00
}
static int pdev_htab_populate ( struct pci_pbm_info * pbm )
{
u32 devhandle = pbm - > devhandle ;
unsigned int bus ;
for ( bus = pbm - > pci_first_busno ; bus < = pbm - > pci_last_busno ; bus + + ) {
unsigned int devfn ;
for ( devfn = 0 ; devfn < 256 ; devfn + + ) {
unsigned int device = PCI_SLOT ( devfn ) ;
unsigned int func = PCI_FUNC ( devfn ) ;
2006-06-22 05:18:47 +04:00
if ( obp_find ( pbm - > prom_node , bus , devfn ) ) {
2006-06-10 12:06:25 +04:00
int err = pdev_htab_add ( devhandle , bus ,
device , func ) ;
if ( err )
return err ;
}
}
}
return 0 ;
}
static struct pdev_entry * pdev_find ( u32 devhandle , unsigned int bus , unsigned int device , unsigned int func )
{
struct pdev_entry * p ;
p = pdev_htab [ pdev_hashfn ( devhandle , bus , device , func ) ] ;
while ( p ) {
if ( p - > devhandle = = devhandle & &
p - > bus = = bus & &
p - > device = = device & &
p - > func = = func )
break ;
p = p - > next ;
}
return p ;
}
static inline int pci_sun4v_out_of_range ( struct pci_pbm_info * pbm , unsigned int bus , unsigned int device , unsigned int func )
{
2006-02-13 10:49:18 +03:00
if ( bus < pbm - > pci_first_busno | |
bus > pbm - > pci_last_busno )
return 1 ;
2006-06-10 12:06:25 +04:00
return pdev_find ( pbm - > devhandle , bus , device , func ) = = NULL ;
2006-02-13 10:49:18 +03:00
}
2006-02-10 09:05:54 +03:00
static int pci_sun4v_read_pci_cfg ( struct pci_bus * bus_dev , unsigned int devfn ,
int where , int size , u32 * value )
{
2006-02-10 09:20:01 +03:00
struct pci_pbm_info * pbm = bus_dev - > sysdata ;
2006-02-13 10:49:18 +03:00
u32 devhandle = pbm - > devhandle ;
2006-02-10 09:20:01 +03:00
unsigned int bus = bus_dev - > number ;
unsigned int device = PCI_SLOT ( devfn ) ;
unsigned int func = PCI_FUNC ( devfn ) ;
unsigned long ret ;
2006-02-15 03:42:11 +03:00
if ( pci_sun4v_out_of_range ( pbm , bus , device , func ) ) {
2006-02-13 10:49:18 +03:00
ret = ~ 0UL ;
} else {
ret = pci_sun4v_config_get ( devhandle ,
HV_PCI_DEVICE_BUILD ( bus , device , func ) ,
where , size ) ;
2006-02-14 05:09:44 +03:00
#if 0
2006-02-15 03:42:11 +03:00
printk ( " rcfg: [%x:%x:%x:%d]=[%lx] \n " ,
2006-02-14 05:09:44 +03:00
devhandle , HV_PCI_DEVICE_BUILD ( bus , device , func ) ,
where , size , ret ) ;
# endif
2006-02-13 10:49:18 +03:00
}
2006-02-10 09:20:01 +03:00
switch ( size ) {
case 1 :
* value = ret & 0xff ;
break ;
case 2 :
* value = ret & 0xffff ;
break ;
case 4 :
* value = ret & 0xffffffff ;
break ;
} ;
return PCIBIOS_SUCCESSFUL ;
2006-02-10 09:05:54 +03:00
}
static int pci_sun4v_write_pci_cfg ( struct pci_bus * bus_dev , unsigned int devfn ,
int where , int size , u32 value )
{
2006-02-10 09:20:01 +03:00
struct pci_pbm_info * pbm = bus_dev - > sysdata ;
2006-02-13 10:49:18 +03:00
u32 devhandle = pbm - > devhandle ;
2006-02-10 09:20:01 +03:00
unsigned int bus = bus_dev - > number ;
unsigned int device = PCI_SLOT ( devfn ) ;
unsigned int func = PCI_FUNC ( devfn ) ;
unsigned long ret ;
2006-02-15 03:42:11 +03:00
if ( pci_sun4v_out_of_range ( pbm , bus , device , func ) ) {
2006-02-13 10:49:18 +03:00
/* Do nothing. */
} else {
ret = pci_sun4v_config_put ( devhandle ,
HV_PCI_DEVICE_BUILD ( bus , device , func ) ,
where , size , value ) ;
2006-02-14 05:09:44 +03:00
#if 0
2006-02-15 03:42:11 +03:00
printk ( " wcfg: [%x:%x:%x:%d] v[%x] == [%lx] \n " ,
2006-02-14 05:09:44 +03:00
devhandle , HV_PCI_DEVICE_BUILD ( bus , device , func ) ,
where , size , value , ret ) ;
# endif
2006-02-13 10:49:18 +03:00
}
2006-02-10 09:20:01 +03:00
return PCIBIOS_SUCCESSFUL ;
2006-02-10 09:05:54 +03:00
}
static struct pci_ops pci_sun4v_ops = {
. read = pci_sun4v_read_pci_cfg ,
. write = pci_sun4v_write_pci_cfg ,
} ;
2006-02-13 09:18:52 +03:00
static void pbm_scan_bus ( struct pci_controller_info * p ,
struct pci_pbm_info * pbm )
{
2006-12-01 04:13:09 +03:00
struct pcidev_cookie * cookie = kzalloc ( sizeof ( * cookie ) , GFP_KERNEL ) ;
2006-02-13 09:18:52 +03:00
if ( ! cookie ) {
prom_printf ( " %s: Critical allocation failure. \n " , pbm - > name ) ;
prom_halt ( ) ;
}
/* All we care about is the PBM. */
cookie - > pbm = pbm ;
2006-02-15 03:42:11 +03:00
pbm - > pci_bus = pci_scan_bus ( pbm - > pci_first_busno , p - > pci_ops , pbm ) ;
2006-02-14 05:09:44 +03:00
#if 0
2006-02-13 09:18:52 +03:00
pci_fixup_host_bridge_self ( pbm - > pci_bus ) ;
pbm - > pci_bus - > self - > sysdata = cookie ;
2006-02-14 05:09:44 +03:00
# endif
2006-06-23 03:18:54 +04:00
pci_fill_in_pbm_cookies ( pbm - > pci_bus , pbm , pbm - > prom_node ) ;
2006-02-13 09:18:52 +03:00
pci_record_assignments ( pbm , pbm - > pci_bus ) ;
pci_assign_unassigned ( pbm , pbm - > pci_bus ) ;
pci_fixup_irq ( pbm , pbm - > pci_bus ) ;
pci_determine_66mhz_disposition ( pbm , pbm - > pci_bus ) ;
pci_setup_busmastering ( pbm , pbm - > pci_bus ) ;
}
2006-02-10 09:05:54 +03:00
static void pci_sun4v_scan_bus ( struct pci_controller_info * p )
{
2006-06-22 05:18:47 +04:00
struct property * prop ;
struct device_node * dp ;
if ( ( dp = p - > pbm_A . prom_node ) ! = NULL ) {
prop = of_find_property ( dp , " 66mhz-capable " , NULL ) ;
p - > pbm_A . is_66mhz_capable = ( prop ! = NULL ) ;
2006-02-13 09:18:52 +03:00
pbm_scan_bus ( p , & p - > pbm_A ) ;
}
2006-06-22 05:18:47 +04:00
if ( ( dp = p - > pbm_B . prom_node ) ! = NULL ) {
prop = of_find_property ( dp , " 66mhz-capable " , NULL ) ;
p - > pbm_B . is_66mhz_capable = ( prop ! = NULL ) ;
2006-02-13 09:18:52 +03:00
pbm_scan_bus ( p , & p - > pbm_B ) ;
}
/* XXX register error interrupt handlers XXX */
2006-02-10 09:05:54 +03:00
}
static void pci_sun4v_base_address_update ( struct pci_dev * pdev , int resource )
{
struct pcidev_cookie * pcp = pdev - > sysdata ;
struct pci_pbm_info * pbm = pcp - > pbm ;
struct resource * res , * root ;
u32 reg ;
int where , size , is_64bit ;
res = & pdev - > resource [ resource ] ;
if ( resource < 6 ) {
where = PCI_BASE_ADDRESS_0 + ( resource * 4 ) ;
} else if ( resource = = PCI_ROM_RESOURCE ) {
where = pdev - > rom_base_reg ;
} else {
/* Somebody might have asked allocation of a non-standard resource */
return ;
}
2006-02-13 09:18:52 +03:00
/* XXX 64-bit MEM handling is not %100 correct... XXX */
2006-02-10 09:05:54 +03:00
is_64bit = 0 ;
if ( res - > flags & IORESOURCE_IO )
root = & pbm - > io_space ;
else {
root = & pbm - > mem_space ;
if ( ( res - > flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK )
= = PCI_BASE_ADDRESS_MEM_TYPE_64 )
is_64bit = 1 ;
}
size = res - > end - res - > start ;
pci_read_config_dword ( pdev , where , & reg ) ;
reg = ( ( reg & size ) |
( ( ( u32 ) ( res - > start - root - > start ) ) & ~ size ) ) ;
if ( resource = = PCI_ROM_RESOURCE ) {
reg | = PCI_ROM_ADDRESS_ENABLE ;
res - > flags | = IORESOURCE_ROM_ENABLE ;
}
pci_write_config_dword ( pdev , where , reg ) ;
/* This knows that the upper 32-bits of the address
* must be zero . Our PCI common layer enforces this .
*/
if ( is_64bit )
pci_write_config_dword ( pdev , where + 4 , 0 ) ;
}
static void pci_sun4v_resource_adjust ( struct pci_dev * pdev ,
struct resource * res ,
struct resource * root )
{
res - > start + = root - > start ;
res - > end + = root - > start ;
}
/* Use ranges property to determine where PCI MEM, I/O, and Config
* space are for this PCI bus module .
*/
static void pci_sun4v_determine_mem_io_space ( struct pci_pbm_info * pbm )
{
2006-02-12 10:38:00 +03:00
int i , saw_mem , saw_io ;
2006-02-10 09:05:54 +03:00
2006-02-12 10:38:00 +03:00
saw_mem = saw_io = 0 ;
2006-02-10 09:05:54 +03:00
for ( i = 0 ; i < pbm - > num_pbm_ranges ; i + + ) {
struct linux_prom_pci_ranges * pr = & pbm - > pbm_ranges [ i ] ;
unsigned long a ;
int type ;
type = ( pr - > child_phys_hi > > 24 ) & 0x3 ;
a = ( ( ( unsigned long ) pr - > parent_phys_hi < < 32UL ) |
( ( unsigned long ) pr - > parent_phys_lo < < 0UL ) ) ;
switch ( type ) {
case 1 :
/* 16-bit IO space, 16MB */
pbm - > io_space . start = a ;
pbm - > io_space . end = a + ( ( 16UL * 1024UL * 1024UL ) - 1UL ) ;
pbm - > io_space . flags = IORESOURCE_IO ;
saw_io = 1 ;
break ;
case 2 :
/* 32-bit MEM space, 2GB */
pbm - > mem_space . start = a ;
pbm - > mem_space . end = a + ( 0x80000000UL - 1UL ) ;
pbm - > mem_space . flags = IORESOURCE_MEM ;
saw_mem = 1 ;
break ;
2006-02-13 09:18:52 +03:00
case 3 :
/* XXX 64-bit MEM handling XXX */
2006-02-10 09:05:54 +03:00
default :
break ;
} ;
}
2006-02-12 10:38:00 +03:00
if ( ! saw_io | | ! saw_mem ) {
2006-02-10 09:05:54 +03:00
prom_printf ( " %s: Fatal error, missing %s PBM range. \n " ,
pbm - > name ,
2006-02-12 10:38:00 +03:00
( ! saw_io ? " IO " : " MEM " ) ) ;
2006-02-10 09:05:54 +03:00
prom_halt ( ) ;
}
2006-02-12 10:38:00 +03:00
printk ( " %s: PCI IO[%lx] MEM[%lx] \n " ,
2006-02-10 09:05:54 +03:00
pbm - > name ,
pbm - > io_space . start ,
pbm - > mem_space . start ) ;
}
static void pbm_register_toplevel_resources ( struct pci_controller_info * p ,
struct pci_pbm_info * pbm )
{
pbm - > io_space . name = pbm - > mem_space . name = pbm - > name ;
request_resource ( & ioport_resource , & pbm - > io_space ) ;
request_resource ( & iomem_resource , & pbm - > mem_space ) ;
pci_register_legacy_regions ( & pbm - > io_space ,
& pbm - > mem_space ) ;
}
2006-02-16 09:25:27 +03:00
static unsigned long probe_existing_entries ( struct pci_pbm_info * pbm ,
struct pci_iommu * iommu )
2006-02-10 11:08:26 +03:00
{
struct pci_iommu_arena * arena = & iommu - > arena ;
2006-02-16 09:25:27 +03:00
unsigned long i , cnt = 0 ;
2006-02-14 08:50:27 +03:00
u32 devhandle ;
2006-02-10 11:08:26 +03:00
devhandle = pbm - > devhandle ;
for ( i = 0 ; i < arena - > limit ; i + + ) {
unsigned long ret , io_attrs , ra ;
ret = pci_sun4v_iommu_getmap ( devhandle ,
HV_PCI_TSBID ( 0 , i ) ,
& io_attrs , & ra ) ;
2006-02-16 09:25:27 +03:00
if ( ret = = HV_EOK ) {
2006-06-22 11:01:56 +04:00
if ( page_in_phys_avail ( ra ) ) {
pci_sun4v_iommu_demap ( devhandle ,
HV_PCI_TSBID ( 0 , i ) , 1 ) ;
} else {
cnt + + ;
__set_bit ( i , arena - > map ) ;
}
2006-02-16 09:25:27 +03:00
}
2006-02-10 11:08:26 +03:00
}
2006-02-16 09:25:27 +03:00
return cnt ;
2006-02-10 11:08:26 +03:00
}
2006-02-10 09:05:54 +03:00
static void pci_sun4v_iommu_init ( struct pci_pbm_info * pbm )
{
2006-02-10 11:08:26 +03:00
struct pci_iommu * iommu = pbm - > iommu ;
2006-06-22 05:18:47 +04:00
struct property * prop ;
2006-02-10 11:08:26 +03:00
unsigned long num_tsb_entries , sz ;
u32 vdma [ 2 ] , dma_mask , dma_offset ;
2006-06-22 05:18:47 +04:00
int tsbsize ;
prop = of_find_property ( pbm - > prom_node , " virtual-dma " , NULL ) ;
if ( prop ) {
u32 * val = prop - > value ;
2006-02-10 11:08:26 +03:00
2006-06-22 05:18:47 +04:00
vdma [ 0 ] = val [ 0 ] ;
vdma [ 1 ] = val [ 1 ] ;
} else {
2006-02-10 11:08:26 +03:00
/* No property, use default values. */
vdma [ 0 ] = 0x80000000 ;
vdma [ 1 ] = 0x80000000 ;
}
dma_mask = vdma [ 0 ] ;
switch ( vdma [ 1 ] ) {
case 0x20000000 :
dma_mask | = 0x1fffffff ;
tsbsize = 64 ;
break ;
case 0x40000000 :
dma_mask | = 0x3fffffff ;
tsbsize = 128 ;
break ;
case 0x80000000 :
dma_mask | = 0x7fffffff ;
2006-02-16 09:25:27 +03:00
tsbsize = 256 ;
2006-02-10 11:08:26 +03:00
break ;
default :
prom_printf ( " PCI-SUN4V: strange virtual-dma size. \n " ) ;
prom_halt ( ) ;
} ;
2006-02-16 09:25:27 +03:00
tsbsize * = ( 8 * 1024 ) ;
2006-02-10 11:08:26 +03:00
num_tsb_entries = tsbsize / sizeof ( iopte_t ) ;
dma_offset = vdma [ 0 ] ;
/* Setup initial software IOMMU state. */
spin_lock_init ( & iommu - > lock ) ;
iommu - > ctx_lowest_free = 1 ;
iommu - > page_table_map_base = dma_offset ;
iommu - > dma_addr_mask = dma_mask ;
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8 ;
sz = ( sz + 7UL ) & ~ 7UL ;
2006-12-01 04:13:09 +03:00
iommu - > arena . map = kzalloc ( sz , GFP_KERNEL ) ;
2006-02-10 11:08:26 +03:00
if ( ! iommu - > arena . map ) {
prom_printf ( " PCI_IOMMU: Error, kmalloc(arena.map) failed. \n " ) ;
prom_halt ( ) ;
}
iommu - > arena . limit = num_tsb_entries ;
2006-02-16 09:25:27 +03:00
sz = probe_existing_entries ( pbm , iommu ) ;
2006-06-22 11:01:56 +04:00
if ( sz )
printk ( " %s: Imported %lu TSB entries from OBP \n " ,
pbm - > name , sz ) ;
2006-02-10 09:05:54 +03:00
}
2006-02-14 05:09:44 +03:00
static void pci_sun4v_get_bus_range ( struct pci_pbm_info * pbm )
{
2006-06-22 05:18:47 +04:00
struct property * prop ;
unsigned int * busrange ;
prop = of_find_property ( pbm - > prom_node , " bus-range " , NULL ) ;
busrange = prop - > value ;
2006-02-14 05:09:44 +03:00
pbm - > pci_first_busno = busrange [ 0 ] ;
pbm - > pci_last_busno = busrange [ 1 ] ;
}
2006-06-22 05:18:47 +04:00
static void pci_sun4v_pbm_init ( struct pci_controller_info * p , struct device_node * dp , u32 devhandle )
2006-02-10 09:05:54 +03:00
{
struct pci_pbm_info * pbm ;
2006-06-22 05:18:47 +04:00
struct property * prop ;
int len , i ;
2006-02-10 09:05:54 +03:00
2006-02-13 09:06:53 +03:00
if ( devhandle & 0x40 )
pbm = & p - > pbm_B ;
else
pbm = & p - > pbm_A ;
2006-02-10 09:05:54 +03:00
pbm - > parent = p ;
2006-06-22 05:18:47 +04:00
pbm - > prom_node = dp ;
2006-02-10 09:05:54 +03:00
pbm - > pci_first_slot = 1 ;
2006-02-13 09:06:53 +03:00
pbm - > devhandle = devhandle ;
2006-02-10 09:05:54 +03:00
2006-06-22 05:18:47 +04:00
pbm - > name = dp - > full_name ;
2006-02-10 09:05:54 +03:00
2006-06-22 05:18:47 +04:00
printk ( " %s: SUN4V PCI Bus Module \n " , pbm - > name ) ;
2006-02-10 09:05:54 +03:00
2006-06-22 05:18:47 +04:00
prop = of_find_property ( dp , " ranges " , & len ) ;
pbm - > pbm_ranges = prop - > value ;
2006-02-10 09:05:54 +03:00
pbm - > num_pbm_ranges =
2006-06-22 05:18:47 +04:00
( len / sizeof ( struct linux_prom_pci_ranges ) ) ;
2006-02-10 09:05:54 +03:00
2006-02-13 09:06:53 +03:00
/* Mask out the top 8 bits of the ranges, leaving the real
* physical address .
*/
for ( i = 0 ; i < pbm - > num_pbm_ranges ; i + + )
pbm - > pbm_ranges [ i ] . parent_phys_hi & = 0x0fffffff ;
2006-02-10 09:05:54 +03:00
pci_sun4v_determine_mem_io_space ( pbm ) ;
pbm_register_toplevel_resources ( p , pbm ) ;
2006-06-22 05:18:47 +04:00
prop = of_find_property ( dp , " interrupt-map " , & len ) ;
pbm - > pbm_intmap = prop - > value ;
pbm - > num_pbm_intmap =
( len / sizeof ( struct linux_prom_pci_intmap ) ) ;
2006-02-15 09:20:41 +03:00
2006-06-22 05:18:47 +04:00
prop = of_find_property ( dp , " interrupt-map-mask " , NULL ) ;
pbm - > pbm_intmask = prop - > value ;
2006-02-10 09:05:54 +03:00
2006-02-14 05:09:44 +03:00
pci_sun4v_get_bus_range ( pbm ) ;
2006-02-10 09:05:54 +03:00
pci_sun4v_iommu_init ( pbm ) ;
2006-06-10 12:06:25 +04:00
pdev_htab_populate ( pbm ) ;
2006-02-10 09:05:54 +03:00
}
2006-06-22 05:18:47 +04:00
void sun4v_pci_init ( struct device_node * dp , char * model_name )
2006-02-10 08:32:07 +03:00
{
2006-02-10 09:05:54 +03:00
struct pci_controller_info * p ;
struct pci_iommu * iommu ;
2006-06-22 05:18:47 +04:00
struct property * prop ;
struct linux_prom64_registers * regs ;
2006-02-14 08:50:27 +03:00
u32 devhandle ;
int i ;
2006-02-13 09:06:53 +03:00
2006-06-22 05:18:47 +04:00
prop = of_find_property ( dp , " reg " , NULL ) ;
regs = prop - > value ;
devhandle = ( regs - > phys_addr > > 32UL ) & 0x0fffffff ;
2006-02-13 09:06:53 +03:00
for ( p = pci_controller_root ; p ; p = p - > next ) {
struct pci_pbm_info * pbm ;
if ( p - > pbm_A . prom_node & & p - > pbm_B . prom_node )
continue ;
pbm = ( p - > pbm_A . prom_node ?
& p - > pbm_A :
& p - > pbm_B ) ;
2006-02-13 09:29:36 +03:00
if ( pbm - > devhandle = = ( devhandle ^ 0x40 ) ) {
2006-06-22 05:18:47 +04:00
pci_sun4v_pbm_init ( p , dp , devhandle ) ;
2006-02-13 09:29:36 +03:00
return ;
}
2006-02-13 09:06:53 +03:00
}
2006-02-10 09:05:54 +03:00
2006-04-11 09:52:52 +04:00
for_each_possible_cpu ( i ) {
2006-02-14 08:50:27 +03:00
unsigned long page = get_zeroed_page ( GFP_ATOMIC ) ;
if ( ! page )
goto fatal_memory_error ;
2006-02-20 09:21:32 +03:00
per_cpu ( pci_iommu_batch , i ) . pglist = ( u64 * ) page ;
2006-02-10 09:05:54 +03:00
}
2006-02-14 08:50:27 +03:00
2006-12-01 04:13:09 +03:00
p = kzalloc ( sizeof ( struct pci_controller_info ) , GFP_ATOMIC ) ;
2006-02-14 08:50:27 +03:00
if ( ! p )
goto fatal_memory_error ;
2006-12-01 04:13:09 +03:00
iommu = kzalloc ( sizeof ( struct pci_iommu ) , GFP_ATOMIC ) ;
2006-02-14 08:50:27 +03:00
if ( ! iommu )
goto fatal_memory_error ;
2006-02-10 09:05:54 +03:00
p - > pbm_A . iommu = iommu ;
2006-12-01 04:13:09 +03:00
iommu = kzalloc ( sizeof ( struct pci_iommu ) , GFP_ATOMIC ) ;
2006-02-14 08:50:27 +03:00
if ( ! iommu )
goto fatal_memory_error ;
2006-02-10 09:05:54 +03:00
p - > pbm_B . iommu = iommu ;
p - > next = pci_controller_root ;
pci_controller_root = p ;
p - > index = pci_num_controllers + + ;
p - > pbms_same_domain = 0 ;
p - > scan_bus = pci_sun4v_scan_bus ;
p - > base_address_update = pci_sun4v_base_address_update ;
p - > resource_adjust = pci_sun4v_resource_adjust ;
p - > pci_ops = & pci_sun4v_ops ;
/* Like PSYCHO and SCHIZO we have a 2GB aligned area
* for memory space .
*/
pci_memspace_mask = 0x7fffffffUL ;
2006-06-22 05:18:47 +04:00
pci_sun4v_pbm_init ( p , dp , devhandle ) ;
2006-02-14 08:50:27 +03:00
return ;
fatal_memory_error :
prom_printf ( " SUN4V_PCI: Fatal memory allocation error. \n " ) ;
prom_halt ( ) ;
2006-02-10 08:32:07 +03:00
}