2008-05-20 03:53:02 +04:00
/*
2005-04-17 02:20:36 +04:00
* ioport . c : Simple io mapping allocator .
*
* Copyright ( C ) 1995 David S . Miller ( davem @ caip . rutgers . edu )
* Copyright ( C ) 1995 Miguel de Icaza ( miguel @ nuclecu . unam . mx )
*
* 1996 : sparc_free_io , 1999 : ioremap ( ) / iounmap ( ) by Pete Zaitcev .
*
* 2000 / 01 / 29
* < rth > zait : as long as pci_alloc_consistent produces something addressable ,
* things are ok .
* < zaitcev > rth : no , it is relevant , because get_free_pages returns you a
* pointer into the big page mapping
* < rth > zait : so what ?
* < rth > zait : remap_it_my_way ( virt_to_phys ( get_free_page ( ) ) )
* < zaitcev > Hmm
* < zaitcev > Suppose I did this remap_it_my_way ( virt_to_phys ( get_free_page ( ) ) ) .
* So far so good .
* < zaitcev > Now , driver calls pci_free_consistent ( with result of
* remap_it_my_way ( ) ) .
* < zaitcev > How do you find the address to pass to free_pages ( ) ?
* < rth > zait : walk the page tables ? It ' s only two or three level after all .
* < rth > zait : you have to walk them anyway to remove the mapping .
* < zaitcev > Hmm
* < zaitcev > Sounds reasonable
*/
2006-06-30 01:35:33 +04:00
# include <linux/module.h>
2005-04-17 02:20:36 +04:00
# include <linux/sched.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/types.h>
# include <linux/ioport.h>
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/pci.h> /* struct pci_dev */
# include <linux/proc_fs.h>
2007-05-14 17:44:38 +04:00
# include <linux/scatterlist.h>
2008-08-08 02:33:36 +04:00
# include <linux/of_device.h>
2005-04-17 02:20:36 +04:00
# include <asm/io.h>
# include <asm/vaddrs.h>
# include <asm/oplib.h>
2006-06-24 02:55:45 +04:00
# include <asm/prom.h>
2005-04-17 02:20:36 +04:00
# include <asm/page.h>
# include <asm/pgalloc.h>
# include <asm/dma.h>
2008-08-26 09:47:20 +04:00
# include <asm/iommu.h>
# include <asm/io-unit.h>
2005-04-17 02:20:36 +04:00
2008-08-28 05:01:36 +04:00
# include "dma.h"
2005-04-17 02:20:36 +04:00
# define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
2008-06-05 22:40:58 +04:00
static struct resource * _sparc_find_resource ( struct resource * r ,
unsigned long ) ;
2005-04-17 02:20:36 +04:00
static void __iomem * _sparc_ioremap ( struct resource * res , u32 bus , u32 pa , int sz ) ;
static void __iomem * _sparc_alloc_io ( unsigned int busno , unsigned long phys ,
unsigned long size , char * name ) ;
static void _sparc_free_io ( struct resource * res ) ;
2008-06-05 22:40:58 +04:00
static void register_proc_sparc_ioport ( void ) ;
2005-04-17 02:20:36 +04:00
/* This points to the next to use virtual memory for DVMA mappings */
static struct resource _sparc_dvma = {
. name = " sparc_dvma " , . start = DVMA_VADDR , . end = DVMA_END - 1
} ;
/* This points to the start of I/O mappings, cluable from outside. */
/*ext*/ struct resource sparc_iomap = {
. name = " sparc_iomap " , . start = IOBASE_VADDR , . end = IOBASE_END - 1
} ;
/*
* Our mini - allocator . . .
* Boy this is gross ! We need it because we must map I / O for
* timers and interrupt controller before the kmalloc is available .
*/
# define XNMLN 15
# define XNRES 10 /* SS-10 uses 8 */
struct xresource {
struct resource xres ; /* Must be first */
int xflag ; /* 1 == used */
char xname [ XNMLN + 1 ] ;
} ;
static struct xresource xresv [ XNRES ] ;
static struct xresource * xres_alloc ( void ) {
struct xresource * xrp ;
int n ;
xrp = xresv ;
for ( n = 0 ; n < XNRES ; n + + ) {
if ( xrp - > xflag = = 0 ) {
xrp - > xflag = 1 ;
return xrp ;
}
xrp + + ;
}
return NULL ;
}
static void xres_free ( struct xresource * xrp ) {
xrp - > xflag = 0 ;
}
/*
* These are typically used in PCI drivers
* which are trying to be cross - platform .
*
* Bus type is always zero on IIep .
*/
void __iomem * ioremap ( unsigned long offset , unsigned long size )
{
char name [ 14 ] ;
sprintf ( name , " phys_%08x " , ( u32 ) offset ) ;
return _sparc_alloc_io ( 0 , offset , size , name ) ;
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( ioremap ) ;
2005-04-17 02:20:36 +04:00
/*
* Comlimentary to ioremap ( ) .
*/
void iounmap ( volatile void __iomem * virtual )
{
unsigned long vaddr = ( unsigned long ) virtual & PAGE_MASK ;
struct resource * res ;
if ( ( res = _sparc_find_resource ( & sparc_iomap , vaddr ) ) = = NULL ) {
printk ( " free_io/iounmap: cannot free %lx \n " , vaddr ) ;
return ;
}
_sparc_free_io ( res ) ;
if ( ( char * ) res > = ( char * ) xresv & & ( char * ) res < ( char * ) & xresv [ XNRES ] ) {
xres_free ( ( struct xresource * ) res ) ;
} else {
kfree ( res ) ;
}
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( iounmap ) ;
2005-04-17 02:20:36 +04:00
2006-06-30 01:35:33 +04:00
void __iomem * of_ioremap ( struct resource * res , unsigned long offset ,
unsigned long size , char * name )
{
return _sparc_alloc_io ( res - > flags & 0xF ,
res - > start + offset ,
size , name ) ;
}
EXPORT_SYMBOL ( of_ioremap ) ;
2006-12-29 08:01:32 +03:00
void of_iounmap ( struct resource * res , void __iomem * base , unsigned long size )
2006-06-30 01:35:33 +04:00
{
iounmap ( base ) ;
}
EXPORT_SYMBOL ( of_iounmap ) ;
2005-04-17 02:20:36 +04:00
/*
* Meat of mapping
*/
static void __iomem * _sparc_alloc_io ( unsigned int busno , unsigned long phys ,
unsigned long size , char * name )
{
static int printed_full ;
struct xresource * xres ;
struct resource * res ;
char * tack ;
int tlen ;
void __iomem * va ; /* P3 diag */
if ( name = = NULL ) name = " ??? " ;
if ( ( xres = xres_alloc ( ) ) ! = 0 ) {
tack = xres - > xname ;
res = & xres - > xres ;
} else {
if ( ! printed_full ) {
printk ( " ioremap: done with statics, switching to malloc \n " ) ;
printed_full = 1 ;
}
tlen = strlen ( name ) ;
tack = kmalloc ( sizeof ( struct resource ) + tlen + 1 , GFP_KERNEL ) ;
if ( tack = = NULL ) return NULL ;
memset ( tack , 0 , sizeof ( struct resource ) ) ;
res = ( struct resource * ) tack ;
tack + = sizeof ( struct resource ) ;
}
strlcpy ( tack , name , XNMLN + 1 ) ;
res - > name = tack ;
va = _sparc_ioremap ( res , busno , phys , size ) ;
/* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
return va ;
}
/*
*/
static void __iomem *
_sparc_ioremap ( struct resource * res , u32 bus , u32 pa , int sz )
{
unsigned long offset = ( ( unsigned long ) pa ) & ( ~ PAGE_MASK ) ;
if ( allocate_resource ( & sparc_iomap , res ,
( offset + sz + PAGE_SIZE - 1 ) & PAGE_MASK ,
sparc_iomap . start , sparc_iomap . end , PAGE_SIZE , NULL , NULL ) ! = 0 ) {
/* Usually we cannot see printks in this case. */
prom_printf ( " alloc_io_res(%s): cannot occupy \n " ,
( res - > name ! = NULL ) ? res - > name : " ??? " ) ;
prom_halt ( ) ;
}
pa & = PAGE_MASK ;
sparc_mapiorange ( bus , pa , res - > start , res - > end - res - > start + 1 ) ;
2006-06-13 03:09:23 +04:00
return ( void __iomem * ) ( unsigned long ) ( res - > start + offset ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Comlimentary to _sparc_ioremap ( ) .
*/
static void _sparc_free_io ( struct resource * res )
{
unsigned long plen ;
plen = res - > end - res - > start + 1 ;
2006-03-10 13:55:20 +03:00
BUG_ON ( ( plen & ( PAGE_SIZE - 1 ) ) ! = 0 ) ;
2005-04-17 02:20:36 +04:00
sparc_unmapiorange ( res - > start , plen ) ;
release_resource ( res ) ;
}
# ifdef CONFIG_SBUS
2008-08-27 10:33:42 +04:00
void sbus_set_sbus64 ( struct device * dev , int x )
2006-06-21 02:23:28 +04:00
{
2005-04-17 02:20:36 +04:00
printk ( " sbus_set_sbus64: unsupported \n " ) ;
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( sbus_set_sbus64 ) ;
2005-04-17 02:20:36 +04:00
/*
* Allocate a chunk of memory suitable for DMA .
* Typically devices use them for control blocks .
* CPU may access them without any explicit flushing .
*/
2008-08-28 05:37:58 +04:00
void * sbus_alloc_consistent ( struct device * dev , long len , u32 * dma_addrp )
2005-04-17 02:20:36 +04:00
{
2008-08-28 05:37:58 +04:00
struct of_device * op = to_of_device ( dev ) ;
2005-04-17 02:20:36 +04:00
unsigned long len_total = ( len + PAGE_SIZE - 1 ) & PAGE_MASK ;
unsigned long va ;
struct resource * res ;
int order ;
2008-02-03 16:42:53 +03:00
/* XXX why are some lengths signed, others unsigned? */
2005-04-17 02:20:36 +04:00
if ( len < = 0 ) {
return NULL ;
}
/* XXX So what is maxphys for us and how do drivers know it? */
if ( len > 256 * 1024 ) { /* __get_free_pages() limit */
return NULL ;
}
order = get_order ( len_total ) ;
2005-11-22 08:32:22 +03:00
if ( ( va = __get_free_pages ( GFP_KERNEL | __GFP_COMP , order ) ) = = 0 )
2005-04-17 02:20:36 +04:00
goto err_nopages ;
2006-12-01 04:07:04 +03:00
if ( ( res = kzalloc ( sizeof ( struct resource ) , GFP_KERNEL ) ) = = NULL )
2005-04-17 02:20:36 +04:00
goto err_nomem ;
if ( allocate_resource ( & _sparc_dvma , res , len_total ,
_sparc_dvma . start , _sparc_dvma . end , PAGE_SIZE , NULL , NULL ) ! = 0 ) {
printk ( " sbus_alloc_consistent: cannot occupy 0x%lx " , len_total ) ;
goto err_nova ;
}
mmu_inval_dma_area ( va , len_total ) ;
// XXX The mmu_map_dma_area does this for us below, see comments.
// sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
/*
* XXX That ' s where sdev would be used . Currently we load
* all iommu tables with the same translations .
*/
2008-08-28 05:40:38 +04:00
if ( mmu_map_dma_area ( dev , dma_addrp , va , res - > start , len_total ) ! = 0 )
2005-04-17 02:20:36 +04:00
goto err_noiommu ;
2008-08-28 05:37:58 +04:00
res - > name = op - > node - > name ;
2006-05-08 10:43:19 +04:00
2006-06-13 03:09:23 +04:00
return ( void * ) ( unsigned long ) res - > start ;
2005-04-17 02:20:36 +04:00
err_noiommu :
release_resource ( res ) ;
err_nova :
free_pages ( va , order ) ;
err_nomem :
kfree ( res ) ;
err_nopages :
return NULL ;
}
2008-08-28 05:37:58 +04:00
void sbus_free_consistent ( struct device * dev , long n , void * p , u32 ba )
2005-04-17 02:20:36 +04:00
{
struct resource * res ;
struct page * pgv ;
if ( ( res = _sparc_find_resource ( & _sparc_dvma ,
( unsigned long ) p ) ) = = NULL ) {
printk ( " sbus_free_consistent: cannot free %p \n " , p ) ;
return ;
}
if ( ( ( unsigned long ) p & ( PAGE_SIZE - 1 ) ) ! = 0 ) {
printk ( " sbus_free_consistent: unaligned va %p \n " , p ) ;
return ;
}
n = ( n + PAGE_SIZE - 1 ) & PAGE_MASK ;
if ( ( res - > end - res - > start ) + 1 ! = n ) {
printk ( " sbus_free_consistent: region 0x%lx asked 0x%lx \n " ,
( long ) ( ( res - > end - res - > start ) + 1 ) , n ) ;
return ;
}
release_resource ( res ) ;
kfree ( res ) ;
/* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
2008-08-27 13:20:35 +04:00
pgv = virt_to_page ( p ) ;
2008-08-28 05:40:38 +04:00
mmu_unmap_dma_area ( dev , ba , n ) ;
2005-04-17 02:20:36 +04:00
__free_pages ( pgv , get_order ( n ) ) ;
}
/*
* Map a chunk of memory so that devices can see it .
* CPU view of this memory may be inconsistent with
* a device view and explicit flushing is necessary .
*/
2008-08-28 05:37:58 +04:00
dma_addr_t sbus_map_single ( struct device * dev , void * va , size_t len , int direction )
2005-04-17 02:20:36 +04:00
{
2008-02-03 16:42:53 +03:00
/* XXX why are some lengths signed, others unsigned? */
2005-04-17 02:20:36 +04:00
if ( len < = 0 ) {
return 0 ;
}
/* XXX So what is maxphys for us and how do drivers know it? */
if ( len > 256 * 1024 ) { /* __get_free_pages() limit */
return 0 ;
}
2008-08-27 10:00:58 +04:00
return mmu_get_scsi_one ( dev , va , len ) ;
2005-04-17 02:20:36 +04:00
}
2008-08-28 05:37:58 +04:00
void sbus_unmap_single ( struct device * dev , dma_addr_t ba , size_t n , int direction )
2005-04-17 02:20:36 +04:00
{
2008-08-27 10:00:58 +04:00
mmu_release_scsi_one ( dev , ba , n ) ;
2005-04-17 02:20:36 +04:00
}
2008-08-28 05:37:58 +04:00
int sbus_map_sg ( struct device * dev , struct scatterlist * sg , int n , int direction )
2005-04-17 02:20:36 +04:00
{
2008-08-27 10:00:58 +04:00
mmu_get_scsi_sgl ( dev , sg , n ) ;
2005-04-17 02:20:36 +04:00
/*
* XXX sparc64 can return a partial length here . sun4c should do this
* but it currently panics if it can ' t fulfill the request - Anton
*/
return n ;
}
2008-08-28 05:37:58 +04:00
void sbus_unmap_sg ( struct device * dev , struct scatterlist * sg , int n , int direction )
2005-04-17 02:20:36 +04:00
{
2008-08-27 10:00:58 +04:00
mmu_release_scsi_sgl ( dev , sg , n ) ;
2005-04-17 02:20:36 +04:00
}
2008-08-28 05:37:58 +04:00
void sbus_dma_sync_single_for_cpu ( struct device * dev , dma_addr_t ba , size_t size , int direction )
2005-04-17 02:20:36 +04:00
{
}
2008-08-28 05:37:58 +04:00
void sbus_dma_sync_single_for_device ( struct device * dev , dma_addr_t ba , size_t size , int direction )
2005-04-17 02:20:36 +04:00
{
}
2008-08-27 15:20:14 +04:00
static int __init sparc_register_ioport ( void )
2006-06-24 02:55:45 +04:00
{
register_proc_sparc_ioport ( ) ;
return 0 ;
}
2008-08-27 15:20:14 +04:00
arch_initcall ( sparc_register_ioport ) ;
2005-04-17 02:20:36 +04:00
# endif /* CONFIG_SBUS */
# ifdef CONFIG_PCI
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices .
*/
void * pci_alloc_consistent ( struct pci_dev * pdev , size_t len , dma_addr_t * pba )
{
unsigned long len_total = ( len + PAGE_SIZE - 1 ) & PAGE_MASK ;
unsigned long va ;
struct resource * res ;
int order ;
if ( len = = 0 ) {
return NULL ;
}
if ( len > 256 * 1024 ) { /* __get_free_pages() limit */
return NULL ;
}
order = get_order ( len_total ) ;
va = __get_free_pages ( GFP_KERNEL , order ) ;
if ( va = = 0 ) {
printk ( " pci_alloc_consistent: no %ld pages \n " , len_total > > PAGE_SHIFT ) ;
return NULL ;
}
2006-12-01 04:07:04 +03:00
if ( ( res = kzalloc ( sizeof ( struct resource ) , GFP_KERNEL ) ) = = NULL ) {
2005-04-17 02:20:36 +04:00
free_pages ( va , order ) ;
printk ( " pci_alloc_consistent: no core \n " ) ;
return NULL ;
}
if ( allocate_resource ( & _sparc_dvma , res , len_total ,
_sparc_dvma . start , _sparc_dvma . end , PAGE_SIZE , NULL , NULL ) ! = 0 ) {
printk ( " pci_alloc_consistent: cannot occupy 0x%lx " , len_total ) ;
free_pages ( va , order ) ;
kfree ( res ) ;
return NULL ;
}
mmu_inval_dma_area ( va , len_total ) ;
#if 0
/* P3 */ printk ( " pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx \n " ,
( long ) va , ( long ) res - > start , ( long ) virt_to_phys ( va ) , len_total ) ;
# endif
sparc_mapiorange ( 0 , virt_to_phys ( va ) , res - > start , len_total ) ;
* pba = virt_to_phys ( va ) ; /* equals virt_to_bus (R.I.P.) for us. */
return ( void * ) res - > start ;
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( pci_alloc_consistent ) ;
2005-04-17 02:20:36 +04:00
/* Free and unmap a consistent DMA buffer.
* cpu_addr is what was returned from pci_alloc_consistent ,
* size must be the same as what as passed into pci_alloc_consistent ,
* and likewise dma_addr must be the same as what * dma_addrp was set to .
*
2007-05-12 00:51:23 +04:00
* References to the memory and mappings associated with cpu_addr / dma_addr
2005-04-17 02:20:36 +04:00
* past this call are illegal .
*/
void pci_free_consistent ( struct pci_dev * pdev , size_t n , void * p , dma_addr_t ba )
{
struct resource * res ;
unsigned long pgp ;
if ( ( res = _sparc_find_resource ( & _sparc_dvma ,
( unsigned long ) p ) ) = = NULL ) {
printk ( " pci_free_consistent: cannot free %p \n " , p ) ;
return ;
}
if ( ( ( unsigned long ) p & ( PAGE_SIZE - 1 ) ) ! = 0 ) {
printk ( " pci_free_consistent: unaligned va %p \n " , p ) ;
return ;
}
n = ( n + PAGE_SIZE - 1 ) & PAGE_MASK ;
if ( ( res - > end - res - > start ) + 1 ! = n ) {
printk ( " pci_free_consistent: region 0x%lx asked 0x%lx \n " ,
( long ) ( ( res - > end - res - > start ) + 1 ) , ( long ) n ) ;
return ;
}
pgp = ( unsigned long ) phys_to_virt ( ba ) ; /* bus_to_virt actually */
mmu_inval_dma_area ( pgp , n ) ;
sparc_unmapiorange ( ( unsigned long ) p , n ) ;
release_resource ( res ) ;
kfree ( res ) ;
free_pages ( pgp , get_order ( n ) ) ;
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( pci_free_consistent ) ;
2005-04-17 02:20:36 +04:00
/* Map a single buffer of the indicated size for DMA in streaming mode.
* The 32 - bit bus address to use is returned .
*
* Once the device is given the dma address , the device owns this memory
* until either pci_unmap_single or pci_dma_sync_single_ * is performed .
*/
dma_addr_t pci_map_single ( struct pci_dev * hwdev , void * ptr , size_t size ,
int direction )
{
2006-03-10 13:55:20 +03:00
BUG_ON ( direction = = PCI_DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
/* IIep is write-through, not flushing. */
return virt_to_phys ( ptr ) ;
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( pci_map_single ) ;
2005-04-17 02:20:36 +04:00
/* Unmap a single streaming mode DMA translation. The dma_addr and size
* must match what was provided for in a previous pci_map_single call . All
* other usages are undefined .
*
* After this call , reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there .
*/
void pci_unmap_single ( struct pci_dev * hwdev , dma_addr_t ba , size_t size ,
int direction )
{
2006-03-10 13:55:20 +03:00
BUG_ON ( direction = = PCI_DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
if ( direction ! = PCI_DMA_TODEVICE ) {
mmu_inval_dma_area ( ( unsigned long ) phys_to_virt ( ba ) ,
( size + PAGE_SIZE - 1 ) & PAGE_MASK ) ;
}
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( pci_unmap_single ) ;
2005-04-17 02:20:36 +04:00
/*
* Same as pci_map_single , but with pages .
*/
dma_addr_t pci_map_page ( struct pci_dev * hwdev , struct page * page ,
unsigned long offset , size_t size , int direction )
{
2006-03-10 13:55:20 +03:00
BUG_ON ( direction = = PCI_DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
/* IIep is write-through, not flushing. */
return page_to_phys ( page ) + offset ;
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( pci_map_page ) ;
2005-04-17 02:20:36 +04:00
void pci_unmap_page ( struct pci_dev * hwdev ,
dma_addr_t dma_address , size_t size , int direction )
{
2006-03-10 13:55:20 +03:00
BUG_ON ( direction = = PCI_DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
/* mmu_inval_dma_area XXX */
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( pci_unmap_page ) ;
2005-04-17 02:20:36 +04:00
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA . This is the scather - gather version of the
* above pci_map_single interface . Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length . They are obtained via sg_dma_ { address , length } ( SG ) .
*
* NOTE : An implementation may be able to use a smaller number of
* DMA address / length pairs than there are SG table elements .
* ( for example via virtual mapping capabilities )
* The routine returns the number of addr / length pairs actually
* used , at most nents .
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here .
*/
2007-05-14 17:44:38 +04:00
int pci_map_sg ( struct pci_dev * hwdev , struct scatterlist * sgl , int nents ,
2005-04-17 02:20:36 +04:00
int direction )
{
2007-05-14 17:44:38 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
int n ;
2006-03-10 13:55:20 +03:00
BUG_ON ( direction = = PCI_DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
/* IIep is write-through, not flushing. */
2007-05-14 17:44:38 +04:00
for_each_sg ( sgl , sg , nents , n ) {
2007-10-22 22:02:46 +04:00
BUG_ON ( page_address ( sg_page ( sg ) ) = = NULL ) ;
2008-12-12 07:24:58 +03:00
sg - > dma_address = virt_to_phys ( sg_virt ( sg ) ) ;
sg - > dma_length = sg - > length ;
2005-04-17 02:20:36 +04:00
}
return nents ;
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( pci_map_sg ) ;
2005-04-17 02:20:36 +04:00
/* Unmap a set of streaming mode DMA translations.
* Again , cpu read rules concerning calls here are the same as for
* pci_unmap_single ( ) above .
*/
2007-05-14 17:44:38 +04:00
void pci_unmap_sg ( struct pci_dev * hwdev , struct scatterlist * sgl , int nents ,
2005-04-17 02:20:36 +04:00
int direction )
{
2007-05-14 17:44:38 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
int n ;
2006-03-10 13:55:20 +03:00
BUG_ON ( direction = = PCI_DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
if ( direction ! = PCI_DMA_TODEVICE ) {
2007-05-14 17:44:38 +04:00
for_each_sg ( sgl , sg , nents , n ) {
2007-10-22 22:02:46 +04:00
BUG_ON ( page_address ( sg_page ( sg ) ) = = NULL ) ;
2005-04-17 02:20:36 +04:00
mmu_inval_dma_area (
2007-10-22 22:02:46 +04:00
( unsigned long ) page_address ( sg_page ( sg ) ) ,
2005-04-17 02:20:36 +04:00
( sg - > length + PAGE_SIZE - 1 ) & PAGE_MASK ) ;
}
}
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( pci_unmap_sg ) ;
2005-04-17 02:20:36 +04:00
/* Make physical memory consistent for a single
* streaming mode DMA translation before or after a transfer .
*
* If you perform a pci_map_single ( ) but wish to interrogate the
* buffer using the cpu , yet do not wish to teardown the PCI dma
* mapping , you must call this function before doing so . At the
* next point you give the PCI dma address back to the card , you
* must first perform a pci_dma_sync_for_device , and then the
* device again owns the buffer .
*/
void pci_dma_sync_single_for_cpu ( struct pci_dev * hwdev , dma_addr_t ba , size_t size , int direction )
{
2006-03-10 13:55:20 +03:00
BUG_ON ( direction = = PCI_DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
if ( direction ! = PCI_DMA_TODEVICE ) {
mmu_inval_dma_area ( ( unsigned long ) phys_to_virt ( ba ) ,
( size + PAGE_SIZE - 1 ) & PAGE_MASK ) ;
}
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( pci_dma_sync_single_for_cpu ) ;
2005-04-17 02:20:36 +04:00
void pci_dma_sync_single_for_device ( struct pci_dev * hwdev , dma_addr_t ba , size_t size , int direction )
{
2006-03-10 13:55:20 +03:00
BUG_ON ( direction = = PCI_DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
if ( direction ! = PCI_DMA_TODEVICE ) {
mmu_inval_dma_area ( ( unsigned long ) phys_to_virt ( ba ) ,
( size + PAGE_SIZE - 1 ) & PAGE_MASK ) ;
}
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( pci_dma_sync_single_for_device ) ;
2005-04-17 02:20:36 +04:00
/* Make physical memory consistent for a set of streaming
* mode DMA translations after a transfer .
*
* The same as pci_dma_sync_single_ * but for a scatter - gather list ,
* same rules and usage .
*/
2007-05-14 17:44:38 +04:00
void pci_dma_sync_sg_for_cpu ( struct pci_dev * hwdev , struct scatterlist * sgl , int nents , int direction )
2005-04-17 02:20:36 +04:00
{
2007-05-14 17:44:38 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
int n ;
2006-03-10 13:55:20 +03:00
BUG_ON ( direction = = PCI_DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
if ( direction ! = PCI_DMA_TODEVICE ) {
2007-05-14 17:44:38 +04:00
for_each_sg ( sgl , sg , nents , n ) {
2007-10-22 22:02:46 +04:00
BUG_ON ( page_address ( sg_page ( sg ) ) = = NULL ) ;
2005-04-17 02:20:36 +04:00
mmu_inval_dma_area (
2007-10-22 22:02:46 +04:00
( unsigned long ) page_address ( sg_page ( sg ) ) ,
2005-04-17 02:20:36 +04:00
( sg - > length + PAGE_SIZE - 1 ) & PAGE_MASK ) ;
}
}
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( pci_dma_sync_sg_for_cpu ) ;
2005-04-17 02:20:36 +04:00
2007-05-14 17:44:38 +04:00
void pci_dma_sync_sg_for_device ( struct pci_dev * hwdev , struct scatterlist * sgl , int nents , int direction )
2005-04-17 02:20:36 +04:00
{
2007-05-14 17:44:38 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
int n ;
2006-03-10 13:55:20 +03:00
BUG_ON ( direction = = PCI_DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
if ( direction ! = PCI_DMA_TODEVICE ) {
2007-05-14 17:44:38 +04:00
for_each_sg ( sgl , sg , nents , n ) {
2007-10-22 22:02:46 +04:00
BUG_ON ( page_address ( sg_page ( sg ) ) = = NULL ) ;
2005-04-17 02:20:36 +04:00
mmu_inval_dma_area (
2007-10-22 22:02:46 +04:00
( unsigned long ) page_address ( sg_page ( sg ) ) ,
2005-04-17 02:20:36 +04:00
( sg - > length + PAGE_SIZE - 1 ) & PAGE_MASK ) ;
}
}
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( pci_dma_sync_sg_for_device ) ;
2005-04-17 02:20:36 +04:00
# endif /* CONFIG_PCI */
# ifdef CONFIG_PROC_FS
static int
_sparc_io_get_info ( char * buf , char * * start , off_t fpos , int length , int * eof ,
void * data )
{
char * p = buf , * e = buf + length ;
struct resource * r ;
const char * nm ;
for ( r = ( ( struct resource * ) data ) - > child ; r ! = NULL ; r = r - > sibling ) {
if ( p + 32 > = e ) /* Better than nothing */
break ;
if ( ( nm = r - > name ) = = 0 ) nm = " ??? " ;
2006-06-13 02:18:31 +04:00
p + = sprintf ( p , " %016llx-%016llx: %s \n " ,
( unsigned long long ) r - > start ,
( unsigned long long ) r - > end , nm ) ;
2005-04-17 02:20:36 +04:00
}
return p - buf ;
}
# endif /* CONFIG_PROC_FS */
/*
* This is a version of find_resource and it belongs to kernel / resource . c .
* Until we have agreement with Linus and Martin , it lingers here .
*
* XXX Too slow . Can have 8192 DVMA pages on sun4m in the worst case .
* This probably warrants some sort of hashing .
*/
2008-06-05 22:40:58 +04:00
static struct resource * _sparc_find_resource ( struct resource * root ,
unsigned long hit )
2005-04-17 02:20:36 +04:00
{
struct resource * tmp ;
for ( tmp = root - > child ; tmp ! = 0 ; tmp = tmp - > sibling ) {
if ( tmp - > start < = hit & & tmp - > end > = hit )
return tmp ;
}
return NULL ;
}
2008-06-05 22:40:58 +04:00
static void register_proc_sparc_ioport ( void )
2005-04-17 02:20:36 +04:00
{
# ifdef CONFIG_PROC_FS
create_proc_read_entry ( " io_map " , 0 , NULL , _sparc_io_get_info , & sparc_iomap ) ;
create_proc_read_entry ( " dvma_map " , 0 , NULL , _sparc_io_get_info , & _sparc_dvma ) ;
# endif
}