2005-06-24 09:01:16 +04:00
/*
2006-10-04 01:01:26 +04:00
* arch / xtensa / kernel / pci . c
2005-06-24 09:01:16 +04:00
*
* PCI bios - type initialisation for PCI machines
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation ; either version 2 of the License , or ( at your
* option ) any later version .
*
* Copyright ( C ) 2001 - 2005 Tensilica Inc .
*
* Based largely on work from Cort ( ppc / kernel / pci . c )
* IO functions copied from sparc .
*
* Chris Zankel < chris @ zankel . net >
*
*/
# include <linux/kernel.h>
# include <linux/pci.h>
# include <linux/delay.h>
# include <linux/string.h>
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/errno.h>
# include <linux/bootmem.h>
# include <asm/pci-bridge.h>
# include <asm/platform.h>
# undef DEBUG
# ifdef DEBUG
# define DBG(x...) printk(x)
# else
# define DBG(x...)
# endif
/* PCI Controller */
/*
* pcibios_alloc_controller
* pcibios_enable_device
* pcibios_fixups
* pcibios_align_resource
* pcibios_fixup_bus
* pci_bus_add_device
* pci_mmap_page_range
*/
struct pci_controller * pci_ctrl_head ;
struct pci_controller * * pci_ctrl_tail = & pci_ctrl_head ;
static int pci_bus_count ;
/*
* We need to avoid collisions with ` mirrored ' VGA ports
* and other strange ISA hardware , so we always want the
* addresses to be allocated in the 0x000 - 0x0ff region
* modulo 0x400 .
*
* Why ? Because some silly external IO cards only decode
* the low 10 bits of the IO address . The 0x00 - 0xff region
* is reserved for motherboard devices that decode all 16
* bits , so it ' s ok to allocate at , say , 0x2800 - 0x28ff ,
* but we want to try to avoid allocating at 0x2900 - 0x2bff
* which might have be mirrored at 0x0100 - 0x03ff . .
*/
2010-01-01 19:40:49 +03:00
resource_size_t
2010-01-01 19:40:50 +03:00
pcibios_align_resource ( void * data , const struct resource * res ,
resource_size_t size , resource_size_t align )
2005-06-24 09:01:16 +04:00
{
struct pci_dev * dev = data ;
2010-01-01 19:40:49 +03:00
resource_size_t start = res - > start ;
2005-06-24 09:01:16 +04:00
if ( res - > flags & IORESOURCE_IO ) {
if ( size > 0x100 ) {
printk ( KERN_ERR " PCI: I/O Region %s/%d too large "
2005-06-30 13:59:00 +04:00
" (%ld bytes) \n " , pci_name ( dev ) ,
2005-06-24 09:01:16 +04:00
dev - > resource - res , size ) ;
}
2010-01-01 19:40:49 +03:00
if ( start & 0x300 )
2005-06-24 09:01:16 +04:00
start = ( start + 0x3ff ) & ~ 0x3ff ;
}
2010-01-01 19:40:49 +03:00
return start ;
2005-06-24 09:01:16 +04:00
}
int
pcibios_enable_resources ( struct pci_dev * dev , int mask )
{
u16 cmd , old_cmd ;
int idx ;
struct resource * r ;
pci_read_config_word ( dev , PCI_COMMAND , & cmd ) ;
old_cmd = cmd ;
for ( idx = 0 ; idx < 6 ; idx + + ) {
r = & dev - > resource [ idx ] ;
if ( ! r - > start & & r - > end ) {
printk ( KERN_ERR " PCI: Device %s not available because "
2005-06-30 13:59:00 +04:00
" of resource collisions \n " , pci_name ( dev ) ) ;
2005-06-24 09:01:16 +04:00
return - EINVAL ;
}
if ( r - > flags & IORESOURCE_IO )
cmd | = PCI_COMMAND_IO ;
if ( r - > flags & IORESOURCE_MEM )
cmd | = PCI_COMMAND_MEMORY ;
}
if ( dev - > resource [ PCI_ROM_RESOURCE ] . start )
cmd | = PCI_COMMAND_MEMORY ;
if ( cmd ! = old_cmd ) {
printk ( " PCI: Enabling device %s (%04x -> %04x) \n " ,
2005-06-30 13:59:00 +04:00
pci_name ( dev ) , old_cmd , cmd ) ;
2005-06-24 09:01:16 +04:00
pci_write_config_word ( dev , PCI_COMMAND , cmd ) ;
}
return 0 ;
}
struct pci_controller * __init pcibios_alloc_controller ( void )
{
struct pci_controller * pci_ctrl ;
pci_ctrl = ( struct pci_controller * ) alloc_bootmem ( sizeof ( * pci_ctrl ) ) ;
memset ( pci_ctrl , 0 , sizeof ( struct pci_controller ) ) ;
* pci_ctrl_tail = pci_ctrl ;
pci_ctrl_tail = & pci_ctrl - > next ;
return pci_ctrl ;
}
2011-10-29 02:28:19 +04:00
static void __init pci_controller_apertures ( struct pci_controller * pci_ctrl ,
struct list_head * resources )
{
struct resource * res ;
unsigned long io_offset ;
int i ;
io_offset = ( unsigned long ) pci_ctrl - > io_space . base ;
res = & pci_ctrl - > io_resource ;
if ( ! res - > flags ) {
if ( io_offset )
printk ( KERN_ERR " I/O resource not set for host "
" bridge %d \n " , pci_ctrl - > index ) ;
res - > start = 0 ;
res - > end = IO_SPACE_LIMIT ;
res - > flags = IORESOURCE_IO ;
}
res - > start + = io_offset ;
res - > end + = io_offset ;
2012-02-24 07:19:04 +04:00
pci_add_resource_offset ( resources , res , io_offset ) ;
2011-10-29 02:28:19 +04:00
for ( i = 0 ; i < 3 ; i + + ) {
res = & pci_ctrl - > mem_resources [ i ] ;
if ( ! res - > flags ) {
if ( i > 0 )
continue ;
printk ( KERN_ERR " Memory resource not set for "
" host bridge %d \n " , pci_ctrl - > index ) ;
res - > start = 0 ;
res - > end = ~ 0U ;
res - > flags = IORESOURCE_MEM ;
}
pci_add_resource ( resources , res ) ;
}
}
2005-06-24 09:01:16 +04:00
static int __init pcibios_init ( void )
{
struct pci_controller * pci_ctrl ;
2011-10-29 02:28:19 +04:00
struct list_head resources ;
2005-06-24 09:01:16 +04:00
struct pci_bus * bus ;
int next_busno = 0 , i ;
printk ( " PCI: Probing PCI hardware \n " ) ;
/* Scan all of the recorded PCI controllers. */
for ( pci_ctrl = pci_ctrl_head ; pci_ctrl ; pci_ctrl = pci_ctrl - > next ) {
pci_ctrl - > last_busno = 0xff ;
2011-10-29 02:28:19 +04:00
INIT_LIST_HEAD ( & resources ) ;
pci_controller_apertures ( pci_ctrl , & resources ) ;
bus = pci_scan_root_bus ( NULL , pci_ctrl - > first_busno ,
pci_ctrl - > ops , pci_ctrl , & resources ) ;
2005-06-24 09:01:16 +04:00
pci_ctrl - > bus = bus ;
2012-05-18 05:51:11 +04:00
pci_ctrl - > last_busno = bus - > busn_res . end ;
2005-06-24 09:01:16 +04:00
if ( next_busno < = pci_ctrl - > last_busno )
next_busno = pci_ctrl - > last_busno + 1 ;
}
pci_bus_count = next_busno ;
return platform_pcibios_fixup ( ) ;
}
subsys_initcall ( pcibios_init ) ;
void __init pcibios_fixup_bus ( struct pci_bus * bus )
{
2011-10-29 02:28:19 +04:00
if ( bus - > parent ) {
2005-06-24 09:01:16 +04:00
/* This is a subordinate bridge */
pci_read_bridge_bases ( bus ) ;
}
}
2011-10-29 01:48:31 +04:00
void pcibios_set_master ( struct pci_dev * dev )
{
/* No special bus mastering setup handling */
}
2005-06-24 09:01:16 +04:00
int pcibios_enable_device ( struct pci_dev * dev , int mask )
{
u16 cmd , old_cmd ;
int idx ;
struct resource * r ;
pci_read_config_word ( dev , PCI_COMMAND , & cmd ) ;
old_cmd = cmd ;
for ( idx = 0 ; idx < 6 ; idx + + ) {
r = & dev - > resource [ idx ] ;
if ( ! r - > start & & r - > end ) {
printk ( KERN_ERR " PCI: Device %s not available because "
2005-06-30 13:59:00 +04:00
" of resource collisions \n " , pci_name ( dev ) ) ;
2005-06-24 09:01:16 +04:00
return - EINVAL ;
}
if ( r - > flags & IORESOURCE_IO )
cmd | = PCI_COMMAND_IO ;
if ( r - > flags & IORESOURCE_MEM )
cmd | = PCI_COMMAND_MEMORY ;
}
if ( cmd ! = old_cmd ) {
printk ( " PCI: Enabling device %s (%04x -> %04x) \n " ,
2005-06-30 13:59:00 +04:00
pci_name ( dev ) , old_cmd , cmd ) ;
2005-06-24 09:01:16 +04:00
pci_write_config_word ( dev , PCI_COMMAND , cmd ) ;
}
return 0 ;
}
# ifdef CONFIG_PROC_FS
/*
* Return the index of the PCI controller for device pdev .
*/
int
pci_controller_num ( struct pci_dev * dev )
{
struct pci_controller * pci_ctrl = ( struct pci_controller * ) dev - > sysdata ;
return pci_ctrl - > index ;
}
# endif /* CONFIG_PROC_FS */
/*
* Platform support for / proc / bus / pci / X / Y mmap ( ) s ,
* modelled on the sparc64 implementation by Dave Miller .
* - - paulus .
*/
/*
* Adjust vm_pgoff of VMA such that it is the physical page offset
* corresponding to the 32 - bit pci bus offset for DEV requested by the user .
*
* Basically , the user finds the base address for his device which he wishes
* to mmap . They read the 32 - bit value from the config space base register ,
* add whatever PAGE_SIZE multiple offset they wish , and feed this into the
* offset parameter of mmap on / proc / bus / pci / XXX for that device .
*
* Returns negative error code on failure , zero on success .
*/
static __inline__ int
__pci_mmap_make_offset ( struct pci_dev * dev , struct vm_area_struct * vma ,
enum pci_mmap_state mmap_state )
{
struct pci_controller * pci_ctrl = ( struct pci_controller * ) dev - > sysdata ;
unsigned long offset = vma - > vm_pgoff < < PAGE_SHIFT ;
unsigned long io_offset = 0 ;
int i , res_bit ;
if ( pci_ctrl = = 0 )
return - EINVAL ; /* should never happen */
/* If memory, add on the PCI bridge address offset */
if ( mmap_state = = pci_mmap_mem ) {
res_bit = IORESOURCE_MEM ;
} else {
io_offset = ( unsigned long ) pci_ctrl - > io_space . base ;
offset + = io_offset ;
res_bit = IORESOURCE_IO ;
}
/*
* Check that the offset requested corresponds to one of the
* resources of the device .
*/
for ( i = 0 ; i < = PCI_ROM_RESOURCE ; i + + ) {
struct resource * rp = & dev - > resource [ i ] ;
int flags = rp - > flags ;
/* treat ROM as memory (should be already) */
if ( i = = PCI_ROM_RESOURCE )
flags | = IORESOURCE_MEM ;
/* Active and same type? */
if ( ( flags & res_bit ) = = 0 )
continue ;
/* In the range of this resource? */
if ( offset < ( rp - > start & PAGE_MASK ) | | offset > rp - > end )
continue ;
/* found it! construct the final physical address */
if ( mmap_state = = pci_mmap_io )
offset + = pci_ctrl - > io_space . start - io_offset ;
vma - > vm_pgoff = offset > > PAGE_SHIFT ;
return 0 ;
}
return - EINVAL ;
}
/*
* Set vm_page_prot of VMA , as appropriate for this architecture , for a pci
* device mapping .
*/
static __inline__ void
__pci_mmap_set_pgprot ( struct pci_dev * dev , struct vm_area_struct * vma ,
enum pci_mmap_state mmap_state , int write_combine )
{
int prot = pgprot_val ( vma - > vm_page_prot ) ;
/* Set to write-through */
2012-09-17 05:44:33 +04:00
prot = ( prot & _PAGE_CA_MASK ) | _PAGE_CA_WT ;
2005-06-24 09:01:16 +04:00
#if 0
if ( ! write_combine )
prot | = _PAGE_WRITETHRU ;
# endif
vma - > vm_page_prot = __pgprot ( prot ) ;
}
/*
* Perform the actual remap of the pages for a PCI device mapping , as
* appropriate for this architecture . The region in the process to map
* is described by vm_start and vm_end members of VMA , the base physical
* address is found in vm_pgoff .
* The pci device structure is provided so that architectures may make mapping
* decisions on a per - device or per - bus basis .
*
* Returns a negative error code on failure , zero on success .
*/
int pci_mmap_page_range ( struct pci_dev * dev , struct vm_area_struct * vma ,
enum pci_mmap_state mmap_state ,
int write_combine )
{
int ret ;
ret = __pci_mmap_make_offset ( dev , vma , mmap_state ) ;
if ( ret < 0 )
return ret ;
__pci_mmap_set_pgprot ( dev , vma , mmap_state , write_combine ) ;
2005-09-23 08:44:23 +04:00
ret = io_remap_pfn_range ( vma , vma - > vm_start , vma - > vm_pgoff ,
vma - > vm_end - vma - > vm_start , vma - > vm_page_prot ) ;
2005-06-24 09:01:16 +04:00
return ret ;
}