2008-05-20 03:53:02 +04:00
/*
2005-04-17 02:20:36 +04:00
* ioport . c : Simple io mapping allocator .
*
* Copyright ( C ) 1995 David S . Miller ( davem @ caip . rutgers . edu )
* Copyright ( C ) 1995 Miguel de Icaza ( miguel @ nuclecu . unam . mx )
*
* 1996 : sparc_free_io , 1999 : ioremap ( ) / iounmap ( ) by Pete Zaitcev .
*
* 2000 / 01 / 29
* < rth > zait : as long as pci_alloc_consistent produces something addressable ,
* things are ok .
* < zaitcev > rth : no , it is relevant , because get_free_pages returns you a
* pointer into the big page mapping
* < rth > zait : so what ?
* < rth > zait : remap_it_my_way ( virt_to_phys ( get_free_page ( ) ) )
* < zaitcev > Hmm
* < zaitcev > Suppose I did this remap_it_my_way ( virt_to_phys ( get_free_page ( ) ) ) .
* So far so good .
* < zaitcev > Now , driver calls pci_free_consistent ( with result of
* remap_it_my_way ( ) ) .
* < zaitcev > How do you find the address to pass to free_pages ( ) ?
* < rth > zait : walk the page tables ? It ' s only two or three level after all .
* < rth > zait : you have to walk them anyway to remove the mapping .
* < zaitcev > Hmm
* < zaitcev > Sounds reasonable
*/
2006-06-30 01:35:33 +04:00
# include <linux/module.h>
2005-04-17 02:20:36 +04:00
# include <linux/sched.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/types.h>
# include <linux/ioport.h>
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/pci.h> /* struct pci_dev */
# include <linux/proc_fs.h>
2009-09-02 04:54:07 +04:00
# include <linux/seq_file.h>
2007-05-14 17:44:38 +04:00
# include <linux/scatterlist.h>
2008-08-08 02:33:36 +04:00
# include <linux/of_device.h>
2005-04-17 02:20:36 +04:00
# include <asm/io.h>
# include <asm/vaddrs.h>
# include <asm/oplib.h>
2006-06-24 02:55:45 +04:00
# include <asm/prom.h>
2005-04-17 02:20:36 +04:00
# include <asm/page.h>
# include <asm/pgalloc.h>
# include <asm/dma.h>
2008-08-26 09:47:20 +04:00
# include <asm/iommu.h>
# include <asm/io-unit.h>
2009-09-01 02:08:13 +04:00
# include <asm/leon.h>
2005-04-17 02:20:36 +04:00
2012-05-14 00:57:05 +04:00
const struct sparc32_dma_ops * sparc32_dma_ops ;
2011-04-29 02:17:00 +04:00
/* This function must make sure that caches and memory are coherent after DMA
* On LEON systems without cache snooping it flushes the entire D - CACHE .
*/
static inline void dma_make_coherent ( unsigned long pa , unsigned long len )
{
2012-05-26 08:43:29 +04:00
if ( sparc_cpu_model = = sparc_leon ) {
if ( ! sparc_leon3_snooping_enabled ( ) )
leon_flush_dcache_all ( ) ;
}
2011-04-29 02:17:00 +04:00
}
2005-04-17 02:20:36 +04:00
static void __iomem * _sparc_ioremap ( struct resource * res , u32 bus , u32 pa , int sz ) ;
static void __iomem * _sparc_alloc_io ( unsigned int busno , unsigned long phys ,
unsigned long size , char * name ) ;
static void _sparc_free_io ( struct resource * res ) ;
2008-06-05 22:40:58 +04:00
static void register_proc_sparc_ioport ( void ) ;
2005-04-17 02:20:36 +04:00
/* This points to the next to use virtual memory for DVMA mappings */
static struct resource _sparc_dvma = {
. name = " sparc_dvma " , . start = DVMA_VADDR , . end = DVMA_END - 1
} ;
/* This points to the start of I/O mappings, cluable from outside. */
/*ext*/ struct resource sparc_iomap = {
. name = " sparc_iomap " , . start = IOBASE_VADDR , . end = IOBASE_END - 1
} ;
/*
* Our mini - allocator . . .
* Boy this is gross ! We need it because we must map I / O for
* timers and interrupt controller before the kmalloc is available .
*/
# define XNMLN 15
# define XNRES 10 /* SS-10 uses 8 */
struct xresource {
struct resource xres ; /* Must be first */
int xflag ; /* 1 == used */
char xname [ XNMLN + 1 ] ;
} ;
static struct xresource xresv [ XNRES ] ;
static struct xresource * xres_alloc ( void ) {
struct xresource * xrp ;
int n ;
xrp = xresv ;
for ( n = 0 ; n < XNRES ; n + + ) {
if ( xrp - > xflag = = 0 ) {
xrp - > xflag = 1 ;
return xrp ;
}
xrp + + ;
}
return NULL ;
}
static void xres_free ( struct xresource * xrp ) {
xrp - > xflag = 0 ;
}
/*
* These are typically used in PCI drivers
* which are trying to be cross - platform .
*
* Bus type is always zero on IIep .
*/
void __iomem * ioremap ( unsigned long offset , unsigned long size )
{
char name [ 14 ] ;
sprintf ( name , " phys_%08x " , ( u32 ) offset ) ;
return _sparc_alloc_io ( 0 , offset , size , name ) ;
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( ioremap ) ;
2005-04-17 02:20:36 +04:00
/*
* Comlimentary to ioremap ( ) .
*/
void iounmap ( volatile void __iomem * virtual )
{
unsigned long vaddr = ( unsigned long ) virtual & PAGE_MASK ;
struct resource * res ;
2011-05-07 22:58:02 +04:00
/*
* XXX Too slow . Can have 8192 DVMA pages on sun4m in the worst case .
* This probably warrants some sort of hashing .
*/
if ( ( res = lookup_resource ( & sparc_iomap , vaddr ) ) = = NULL ) {
2005-04-17 02:20:36 +04:00
printk ( " free_io/iounmap: cannot free %lx \n " , vaddr ) ;
return ;
}
_sparc_free_io ( res ) ;
if ( ( char * ) res > = ( char * ) xresv & & ( char * ) res < ( char * ) & xresv [ XNRES ] ) {
xres_free ( ( struct xresource * ) res ) ;
} else {
kfree ( res ) ;
}
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( iounmap ) ;
2005-04-17 02:20:36 +04:00
2006-06-30 01:35:33 +04:00
void __iomem * of_ioremap ( struct resource * res , unsigned long offset ,
unsigned long size , char * name )
{
return _sparc_alloc_io ( res - > flags & 0xF ,
res - > start + offset ,
size , name ) ;
}
EXPORT_SYMBOL ( of_ioremap ) ;
2006-12-29 08:01:32 +03:00
void of_iounmap ( struct resource * res , void __iomem * base , unsigned long size )
2006-06-30 01:35:33 +04:00
{
iounmap ( base ) ;
}
EXPORT_SYMBOL ( of_iounmap ) ;
2005-04-17 02:20:36 +04:00
/*
* Meat of mapping
*/
static void __iomem * _sparc_alloc_io ( unsigned int busno , unsigned long phys ,
unsigned long size , char * name )
{
static int printed_full ;
struct xresource * xres ;
struct resource * res ;
char * tack ;
int tlen ;
void __iomem * va ; /* P3 diag */
if ( name = = NULL ) name = " ??? " ;
if ( ( xres = xres_alloc ( ) ) ! = 0 ) {
tack = xres - > xname ;
res = & xres - > xres ;
} else {
if ( ! printed_full ) {
printk ( " ioremap: done with statics, switching to malloc \n " ) ;
printed_full = 1 ;
}
tlen = strlen ( name ) ;
tack = kmalloc ( sizeof ( struct resource ) + tlen + 1 , GFP_KERNEL ) ;
if ( tack = = NULL ) return NULL ;
memset ( tack , 0 , sizeof ( struct resource ) ) ;
res = ( struct resource * ) tack ;
tack + = sizeof ( struct resource ) ;
}
strlcpy ( tack , name , XNMLN + 1 ) ;
res - > name = tack ;
va = _sparc_ioremap ( res , busno , phys , size ) ;
/* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
return va ;
}
/*
*/
static void __iomem *
_sparc_ioremap ( struct resource * res , u32 bus , u32 pa , int sz )
{
unsigned long offset = ( ( unsigned long ) pa ) & ( ~ PAGE_MASK ) ;
if ( allocate_resource ( & sparc_iomap , res ,
( offset + sz + PAGE_SIZE - 1 ) & PAGE_MASK ,
sparc_iomap . start , sparc_iomap . end , PAGE_SIZE , NULL , NULL ) ! = 0 ) {
/* Usually we cannot see printks in this case. */
prom_printf ( " alloc_io_res(%s): cannot occupy \n " ,
( res - > name ! = NULL ) ? res - > name : " ??? " ) ;
prom_halt ( ) ;
}
pa & = PAGE_MASK ;
2012-05-13 12:21:25 +04:00
srmmu_mapiorange ( bus , pa , res - > start , resource_size ( res ) ) ;
2005-04-17 02:20:36 +04:00
2006-06-13 03:09:23 +04:00
return ( void __iomem * ) ( unsigned long ) ( res - > start + offset ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Comlimentary to _sparc_ioremap ( ) .
*/
static void _sparc_free_io ( struct resource * res )
{
unsigned long plen ;
2011-06-09 20:13:32 +04:00
plen = resource_size ( res ) ;
2006-03-10 13:55:20 +03:00
BUG_ON ( ( plen & ( PAGE_SIZE - 1 ) ) ! = 0 ) ;
2012-05-13 12:21:25 +04:00
srmmu_unmapiorange ( res - > start , plen ) ;
2005-04-17 02:20:36 +04:00
release_resource ( res ) ;
}
# ifdef CONFIG_SBUS
2008-08-27 10:33:42 +04:00
void sbus_set_sbus64 ( struct device * dev , int x )
2006-06-21 02:23:28 +04:00
{
2005-04-17 02:20:36 +04:00
printk ( " sbus_set_sbus64: unsupported \n " ) ;
}
2009-01-09 03:58:05 +03:00
EXPORT_SYMBOL ( sbus_set_sbus64 ) ;
2005-04-17 02:20:36 +04:00
/*
* Allocate a chunk of memory suitable for DMA .
* Typically devices use them for control blocks .
* CPU may access them without any explicit flushing .
*/
2009-08-10 06:53:16 +04:00
static void * sbus_alloc_coherent ( struct device * dev , size_t len ,
2012-03-27 16:56:55 +04:00
dma_addr_t * dma_addrp , gfp_t gfp ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2010-07-23 02:04:30 +04:00
struct platform_device * op = to_platform_device ( dev ) ;
2011-01-18 07:10:24 +03:00
unsigned long len_total = PAGE_ALIGN ( len ) ;
2005-04-17 02:20:36 +04:00
unsigned long va ;
struct resource * res ;
int order ;
2008-02-03 16:42:53 +03:00
/* XXX why are some lengths signed, others unsigned? */
2005-04-17 02:20:36 +04:00
if ( len < = 0 ) {
return NULL ;
}
/* XXX So what is maxphys for us and how do drivers know it? */
if ( len > 256 * 1024 ) { /* __get_free_pages() limit */
return NULL ;
}
order = get_order ( len_total ) ;
2005-11-22 08:32:22 +03:00
if ( ( va = __get_free_pages ( GFP_KERNEL | __GFP_COMP , order ) ) = = 0 )
2005-04-17 02:20:36 +04:00
goto err_nopages ;
2006-12-01 04:07:04 +03:00
if ( ( res = kzalloc ( sizeof ( struct resource ) , GFP_KERNEL ) ) = = NULL )
2005-04-17 02:20:36 +04:00
goto err_nomem ;
if ( allocate_resource ( & _sparc_dvma , res , len_total ,
_sparc_dvma . start , _sparc_dvma . end , PAGE_SIZE , NULL , NULL ) ! = 0 ) {
printk ( " sbus_alloc_consistent: cannot occupy 0x%lx " , len_total ) ;
goto err_nova ;
}
2011-01-18 07:10:24 +03:00
2012-05-14 00:57:05 +04:00
// XXX The sbus_map_dma_area does this for us below, see comments.
2012-05-13 12:21:25 +04:00
// srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
2005-04-17 02:20:36 +04:00
/*
* XXX That ' s where sdev would be used . Currently we load
* all iommu tables with the same translations .
*/
2012-05-14 00:57:05 +04:00
if ( sbus_map_dma_area ( dev , dma_addrp , va , res - > start , len_total ) ! = 0 )
2005-04-17 02:20:36 +04:00
goto err_noiommu ;
2010-04-14 03:12:29 +04:00
res - > name = op - > dev . of_node - > name ;
2006-05-08 10:43:19 +04:00
2006-06-13 03:09:23 +04:00
return ( void * ) ( unsigned long ) res - > start ;
2005-04-17 02:20:36 +04:00
err_noiommu :
release_resource ( res ) ;
err_nova :
kfree ( res ) ;
2011-01-18 07:10:29 +03:00
err_nomem :
free_pages ( va , order ) ;
2005-04-17 02:20:36 +04:00
err_nopages :
return NULL ;
}
2009-08-10 06:53:16 +04:00
static void sbus_free_coherent ( struct device * dev , size_t n , void * p ,
2012-03-27 16:56:55 +04:00
dma_addr_t ba , struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
struct resource * res ;
struct page * pgv ;
2011-05-07 22:58:02 +04:00
if ( ( res = lookup_resource ( & _sparc_dvma ,
2005-04-17 02:20:36 +04:00
( unsigned long ) p ) ) = = NULL ) {
printk ( " sbus_free_consistent: cannot free %p \n " , p ) ;
return ;
}
if ( ( ( unsigned long ) p & ( PAGE_SIZE - 1 ) ) ! = 0 ) {
printk ( " sbus_free_consistent: unaligned va %p \n " , p ) ;
return ;
}
2011-01-18 07:10:24 +03:00
n = PAGE_ALIGN ( n ) ;
2011-06-09 20:13:32 +04:00
if ( resource_size ( res ) ! = n ) {
2009-08-10 06:53:16 +04:00
printk ( " sbus_free_consistent: region 0x%lx asked 0x%zx \n " ,
2011-06-09 20:13:32 +04:00
( long ) resource_size ( res ) , n ) ;
2005-04-17 02:20:36 +04:00
return ;
}
release_resource ( res ) ;
kfree ( res ) ;
2008-08-27 13:20:35 +04:00
pgv = virt_to_page ( p ) ;
2012-05-14 00:57:05 +04:00
sbus_unmap_dma_area ( dev , ba , n ) ;
2005-04-17 02:20:36 +04:00
__free_pages ( pgv , get_order ( n ) ) ;
}
/*
* Map a chunk of memory so that devices can see it .
* CPU view of this memory may be inconsistent with
* a device view and explicit flushing is necessary .
*/
2009-08-10 06:53:16 +04:00
static dma_addr_t sbus_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t len ,
enum dma_data_direction dir ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2009-08-10 06:53:15 +04:00
void * va = page_address ( page ) + offset ;
2008-02-03 16:42:53 +03:00
/* XXX why are some lengths signed, others unsigned? */
2005-04-17 02:20:36 +04:00
if ( len < = 0 ) {
return 0 ;
}
/* XXX So what is maxphys for us and how do drivers know it? */
if ( len > 256 * 1024 ) { /* __get_free_pages() limit */
return 0 ;
}
2008-08-27 10:00:58 +04:00
return mmu_get_scsi_one ( dev , va , len ) ;
2005-04-17 02:20:36 +04:00
}
2009-08-10 06:53:16 +04:00
static void sbus_unmap_page ( struct device * dev , dma_addr_t ba , size_t n ,
enum dma_data_direction dir , struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2008-08-27 10:00:58 +04:00
mmu_release_scsi_one ( dev , ba , n ) ;
2005-04-17 02:20:36 +04:00
}
2009-08-10 06:53:16 +04:00
static int sbus_map_sg ( struct device * dev , struct scatterlist * sg , int n ,
enum dma_data_direction dir , struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2008-08-27 10:00:58 +04:00
mmu_get_scsi_sgl ( dev , sg , n ) ;
2005-04-17 02:20:36 +04:00
return n ;
}
2009-08-10 06:53:16 +04:00
static void sbus_unmap_sg ( struct device * dev , struct scatterlist * sg , int n ,
enum dma_data_direction dir , struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2008-08-27 10:00:58 +04:00
mmu_release_scsi_sgl ( dev , sg , n ) ;
2005-04-17 02:20:36 +04:00
}
2009-08-10 06:53:16 +04:00
static void sbus_sync_sg_for_cpu ( struct device * dev , struct scatterlist * sg ,
int n , enum dma_data_direction dir )
2005-04-17 02:20:36 +04:00
{
2009-08-10 06:53:16 +04:00
BUG ( ) ;
2005-04-17 02:20:36 +04:00
}
2009-08-10 06:53:16 +04:00
static void sbus_sync_sg_for_device ( struct device * dev , struct scatterlist * sg ,
int n , enum dma_data_direction dir )
2005-04-17 02:20:36 +04:00
{
2009-08-10 06:53:16 +04:00
BUG ( ) ;
2005-04-17 02:20:36 +04:00
}
2009-08-10 06:53:16 +04:00
struct dma_map_ops sbus_dma_ops = {
2012-03-27 16:56:55 +04:00
. alloc = sbus_alloc_coherent ,
. free = sbus_free_coherent ,
2009-08-10 06:53:16 +04:00
. map_page = sbus_map_page ,
. unmap_page = sbus_unmap_page ,
. map_sg = sbus_map_sg ,
. unmap_sg = sbus_unmap_sg ,
. sync_sg_for_cpu = sbus_sync_sg_for_cpu ,
. sync_sg_for_device = sbus_sync_sg_for_device ,
} ;
2008-08-27 15:20:14 +04:00
static int __init sparc_register_ioport ( void )
2006-06-24 02:55:45 +04:00
{
register_proc_sparc_ioport ( ) ;
return 0 ;
}
2008-08-27 15:20:14 +04:00
arch_initcall ( sparc_register_ioport ) ;
2005-04-17 02:20:36 +04:00
# endif /* CONFIG_SBUS */
2011-01-18 07:10:26 +03:00
2005-04-17 02:20:36 +04:00
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices .
*/
2009-08-10 06:53:16 +04:00
static void * pci32_alloc_coherent ( struct device * dev , size_t len ,
2012-03-27 16:56:55 +04:00
dma_addr_t * pba , gfp_t gfp ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2011-01-18 07:10:24 +03:00
unsigned long len_total = PAGE_ALIGN ( len ) ;
2011-01-18 07:10:28 +03:00
void * va ;
2005-04-17 02:20:36 +04:00
struct resource * res ;
int order ;
if ( len = = 0 ) {
return NULL ;
}
if ( len > 256 * 1024 ) { /* __get_free_pages() limit */
return NULL ;
}
order = get_order ( len_total ) ;
2011-01-18 07:10:28 +03:00
va = ( void * ) __get_free_pages ( GFP_KERNEL , order ) ;
if ( va = = NULL ) {
2005-04-17 02:20:36 +04:00
printk ( " pci_alloc_consistent: no %ld pages \n " , len_total > > PAGE_SHIFT ) ;
2011-01-18 07:10:28 +03:00
goto err_nopages ;
2005-04-17 02:20:36 +04:00
}
2006-12-01 04:07:04 +03:00
if ( ( res = kzalloc ( sizeof ( struct resource ) , GFP_KERNEL ) ) = = NULL ) {
2005-04-17 02:20:36 +04:00
printk ( " pci_alloc_consistent: no core \n " ) ;
2011-01-18 07:10:28 +03:00
goto err_nomem ;
2005-04-17 02:20:36 +04:00
}
if ( allocate_resource ( & _sparc_dvma , res , len_total ,
_sparc_dvma . start , _sparc_dvma . end , PAGE_SIZE , NULL , NULL ) ! = 0 ) {
printk ( " pci_alloc_consistent: cannot occupy 0x%lx " , len_total ) ;
2011-01-18 07:10:28 +03:00
goto err_nova ;
2005-04-17 02:20:36 +04:00
}
2012-05-13 12:21:25 +04:00
srmmu_mapiorange ( 0 , virt_to_phys ( va ) , res - > start , len_total ) ;
2005-04-17 02:20:36 +04:00
* pba = virt_to_phys ( va ) ; /* equals virt_to_bus (R.I.P.) for us. */
return ( void * ) res - > start ;
2011-01-18 07:10:28 +03:00
err_nova :
kfree ( res ) ;
err_nomem :
free_pages ( ( unsigned long ) va , order ) ;
err_nopages :
return NULL ;
2005-04-17 02:20:36 +04:00
}
/* Free and unmap a consistent DMA buffer.
* cpu_addr is what was returned from pci_alloc_consistent ,
* size must be the same as what as passed into pci_alloc_consistent ,
* and likewise dma_addr must be the same as what * dma_addrp was set to .
*
2007-05-12 00:51:23 +04:00
* References to the memory and mappings associated with cpu_addr / dma_addr
2005-04-17 02:20:36 +04:00
* past this call are illegal .
*/
2009-08-10 06:53:16 +04:00
static void pci32_free_coherent ( struct device * dev , size_t n , void * p ,
2012-03-27 16:56:55 +04:00
dma_addr_t ba , struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
struct resource * res ;
2011-05-07 22:58:02 +04:00
if ( ( res = lookup_resource ( & _sparc_dvma ,
2005-04-17 02:20:36 +04:00
( unsigned long ) p ) ) = = NULL ) {
printk ( " pci_free_consistent: cannot free %p \n " , p ) ;
return ;
}
if ( ( ( unsigned long ) p & ( PAGE_SIZE - 1 ) ) ! = 0 ) {
printk ( " pci_free_consistent: unaligned va %p \n " , p ) ;
return ;
}
2011-01-18 07:10:24 +03:00
n = PAGE_ALIGN ( n ) ;
2011-06-09 20:13:32 +04:00
if ( resource_size ( res ) ! = n ) {
2005-04-17 02:20:36 +04:00
printk ( " pci_free_consistent: region 0x%lx asked 0x%lx \n " ,
2011-06-09 20:13:32 +04:00
( long ) resource_size ( res ) , ( long ) n ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2011-04-29 02:17:00 +04:00
dma_make_coherent ( ba , n ) ;
2012-05-13 12:21:25 +04:00
srmmu_unmapiorange ( ( unsigned long ) p , n ) ;
2005-04-17 02:20:36 +04:00
release_resource ( res ) ;
kfree ( res ) ;
2011-04-29 02:17:00 +04:00
free_pages ( ( unsigned long ) phys_to_virt ( ba ) , get_order ( n ) ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Same as pci_map_single , but with pages .
*/
2009-08-10 06:53:16 +04:00
static dma_addr_t pci32_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size ,
enum dma_data_direction dir ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
/* IIep is write-through, not flushing. */
return page_to_phys ( page ) + offset ;
}
2011-01-18 07:10:25 +03:00
static void pci32_unmap_page ( struct device * dev , dma_addr_t ba , size_t size ,
enum dma_data_direction dir , struct dma_attrs * attrs )
{
if ( dir ! = PCI_DMA_TODEVICE )
2011-04-29 02:17:00 +04:00
dma_make_coherent ( ba , PAGE_ALIGN ( size ) ) ;
2011-01-18 07:10:25 +03:00
}
2005-04-17 02:20:36 +04:00
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA . This is the scather - gather version of the
* above pci_map_single interface . Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length . They are obtained via sg_dma_ { address , length } ( SG ) .
*
* NOTE : An implementation may be able to use a smaller number of
* DMA address / length pairs than there are SG table elements .
* ( for example via virtual mapping capabilities )
* The routine returns the number of addr / length pairs actually
* used , at most nents .
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here .
*/
2009-08-10 06:53:16 +04:00
static int pci32_map_sg ( struct device * device , struct scatterlist * sgl ,
int nents , enum dma_data_direction dir ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2007-05-14 17:44:38 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
int n ;
/* IIep is write-through, not flushing. */
2007-05-14 17:44:38 +04:00
for_each_sg ( sgl , sg , nents , n ) {
2011-04-29 02:17:00 +04:00
sg - > dma_address = sg_phys ( sg ) ;
2008-12-12 07:24:58 +03:00
sg - > dma_length = sg - > length ;
2005-04-17 02:20:36 +04:00
}
return nents ;
}
/* Unmap a set of streaming mode DMA translations.
* Again , cpu read rules concerning calls here are the same as for
* pci_unmap_single ( ) above .
*/
2009-08-10 06:53:16 +04:00
static void pci32_unmap_sg ( struct device * dev , struct scatterlist * sgl ,
int nents , enum dma_data_direction dir ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2007-05-14 17:44:38 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
int n ;
2009-08-10 06:53:16 +04:00
if ( dir ! = PCI_DMA_TODEVICE ) {
2007-05-14 17:44:38 +04:00
for_each_sg ( sgl , sg , nents , n ) {
2011-04-29 02:17:00 +04:00
dma_make_coherent ( sg_phys ( sg ) , PAGE_ALIGN ( sg - > length ) ) ;
2005-04-17 02:20:36 +04:00
}
}
}
/* Make physical memory consistent for a single
* streaming mode DMA translation before or after a transfer .
*
* If you perform a pci_map_single ( ) but wish to interrogate the
* buffer using the cpu , yet do not wish to teardown the PCI dma
* mapping , you must call this function before doing so . At the
* next point you give the PCI dma address back to the card , you
* must first perform a pci_dma_sync_for_device , and then the
* device again owns the buffer .
*/
2009-08-10 06:53:16 +04:00
static void pci32_sync_single_for_cpu ( struct device * dev , dma_addr_t ba ,
size_t size , enum dma_data_direction dir )
2005-04-17 02:20:36 +04:00
{
2009-08-10 06:53:16 +04:00
if ( dir ! = PCI_DMA_TODEVICE ) {
2011-04-29 02:17:00 +04:00
dma_make_coherent ( ba , PAGE_ALIGN ( size ) ) ;
2005-04-17 02:20:36 +04:00
}
}
2009-08-10 06:53:16 +04:00
static void pci32_sync_single_for_device ( struct device * dev , dma_addr_t ba ,
size_t size , enum dma_data_direction dir )
2005-04-17 02:20:36 +04:00
{
2009-08-10 06:53:16 +04:00
if ( dir ! = PCI_DMA_TODEVICE ) {
2011-04-29 02:17:00 +04:00
dma_make_coherent ( ba , PAGE_ALIGN ( size ) ) ;
2005-04-17 02:20:36 +04:00
}
}
/* Make physical memory consistent for a set of streaming
* mode DMA translations after a transfer .
*
* The same as pci_dma_sync_single_ * but for a scatter - gather list ,
* same rules and usage .
*/
2009-08-10 06:53:16 +04:00
static void pci32_sync_sg_for_cpu ( struct device * dev , struct scatterlist * sgl ,
int nents , enum dma_data_direction dir )
2005-04-17 02:20:36 +04:00
{
2007-05-14 17:44:38 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
int n ;
2009-08-10 06:53:16 +04:00
if ( dir ! = PCI_DMA_TODEVICE ) {
2007-05-14 17:44:38 +04:00
for_each_sg ( sgl , sg , nents , n ) {
2011-04-29 02:17:00 +04:00
dma_make_coherent ( sg_phys ( sg ) , PAGE_ALIGN ( sg - > length ) ) ;
2005-04-17 02:20:36 +04:00
}
}
}
2009-08-10 06:53:16 +04:00
static void pci32_sync_sg_for_device ( struct device * device , struct scatterlist * sgl ,
int nents , enum dma_data_direction dir )
2005-04-17 02:20:36 +04:00
{
2007-05-14 17:44:38 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
int n ;
2009-08-10 06:53:16 +04:00
if ( dir ! = PCI_DMA_TODEVICE ) {
2007-05-14 17:44:38 +04:00
for_each_sg ( sgl , sg , nents , n ) {
2011-04-29 02:17:00 +04:00
dma_make_coherent ( sg_phys ( sg ) , PAGE_ALIGN ( sg - > length ) ) ;
2005-04-17 02:20:36 +04:00
}
}
}
2009-08-10 06:53:16 +04:00
struct dma_map_ops pci32_dma_ops = {
2012-03-27 16:56:55 +04:00
. alloc = pci32_alloc_coherent ,
. free = pci32_free_coherent ,
2009-08-10 06:53:16 +04:00
. map_page = pci32_map_page ,
2011-01-18 07:10:25 +03:00
. unmap_page = pci32_unmap_page ,
2009-08-10 06:53:16 +04:00
. map_sg = pci32_map_sg ,
. unmap_sg = pci32_unmap_sg ,
. sync_single_for_cpu = pci32_sync_single_for_cpu ,
. sync_single_for_device = pci32_sync_single_for_device ,
. sync_sg_for_cpu = pci32_sync_sg_for_cpu ,
. sync_sg_for_device = pci32_sync_sg_for_device ,
} ;
EXPORT_SYMBOL ( pci32_dma_ops ) ;
2012-05-26 17:56:03 +04:00
/* leon re-uses pci32_dma_ops */
struct dma_map_ops * leon_dma_ops = & pci32_dma_ops ;
2012-05-28 11:07:03 +04:00
EXPORT_SYMBOL ( leon_dma_ops ) ;
2011-01-18 07:10:26 +03:00
2012-05-28 11:07:03 +04:00
struct dma_map_ops * dma_ops = & sbus_dma_ops ;
2011-01-18 07:10:26 +03:00
EXPORT_SYMBOL ( dma_ops ) ;
2005-04-17 02:20:36 +04:00
2009-08-10 06:53:17 +04:00
/*
* Return whether the given PCI device DMA address mask can be
* supported properly . For example , if your device can only drive the
* low 24 - bits during PCI bus mastering , then you would pass
* 0x00ffffff as the mask to this function .
*/
int dma_supported ( struct device * dev , u64 mask )
{
# ifdef CONFIG_PCI
if ( dev - > bus = = & pci_bus_type )
return 1 ;
# endif
return 0 ;
}
EXPORT_SYMBOL ( dma_supported ) ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_PROC_FS
2009-09-02 04:54:07 +04:00
static int sparc_io_proc_show ( struct seq_file * m , void * v )
2005-04-17 02:20:36 +04:00
{
2009-09-02 04:54:07 +04:00
struct resource * root = m - > private , * r ;
2005-04-17 02:20:36 +04:00
const char * nm ;
2009-09-02 04:54:07 +04:00
for ( r = root - > child ; r ! = NULL ; r = r - > sibling ) {
2005-04-17 02:20:36 +04:00
if ( ( nm = r - > name ) = = 0 ) nm = " ??? " ;
2009-09-02 04:54:07 +04:00
seq_printf ( m , " %016llx-%016llx: %s \n " ,
2006-06-13 02:18:31 +04:00
( unsigned long long ) r - > start ,
( unsigned long long ) r - > end , nm ) ;
2005-04-17 02:20:36 +04:00
}
2009-09-02 04:54:07 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2009-09-02 04:54:07 +04:00
static int sparc_io_proc_open ( struct inode * inode , struct file * file )
{
return single_open ( file , sparc_io_proc_show , PDE ( inode ) - > data ) ;
}
static const struct file_operations sparc_io_proc_fops = {
. owner = THIS_MODULE ,
. open = sparc_io_proc_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2005-04-17 02:20:36 +04:00
# endif /* CONFIG_PROC_FS */
2008-06-05 22:40:58 +04:00
static void register_proc_sparc_ioport ( void )
2005-04-17 02:20:36 +04:00
{
# ifdef CONFIG_PROC_FS
2009-09-02 04:54:07 +04:00
proc_create_data ( " io_map " , 0 , NULL , & sparc_io_proc_fops , & sparc_iomap ) ;
proc_create_data ( " dvma_map " , 0 , NULL , & sparc_io_proc_fops , & _sparc_dvma ) ;
2005-04-17 02:20:36 +04:00
# endif
}