2005-04-16 15:20:36 -07:00
# ifndef __ASM_SH_IO_H
# define __ASM_SH_IO_H
/*
* Convention :
* read { b , w , l } / write { b , w , l } are for PCI ,
* while in { b , w , l } / out { b , w , l } are for ISA
* These may ( will ) be platform specific function .
* In addition we have ' pausing ' versions : in { b , w , l } _p / out { b , w , l } _p
* and ' string ' versions : ins { b , w , l } / outs { b , w , l }
* For read { b , w , l } and write { b , w , l } there are also __raw versions , which
* do not have a memory barrier after them .
*
2006-01-16 22:14:15 -08:00
* In addition , we have
2005-04-16 15:20:36 -07:00
* ctrl_in { b , w , l } / ctrl_out { b , w , l } for SuperH specific I / O .
* which are processor specific .
*/
/*
* We follow the Alpha convention here :
* __inb expands to an inline function call ( which calls via the mv )
* _inb is a real function call ( note ___raw fns are _ version of __raw )
* inb by default expands to _inb , but the machine specific code may
* define it to __inb if it chooses .
*/
# include <asm/cache.h>
# include <asm/system.h>
# include <asm/addrspace.h>
# include <asm/machvec.h>
2006-01-16 22:14:15 -08:00
# include <asm/pgtable.h>
# include <asm-generic/iomap.h>
# ifdef __KERNEL__
2005-04-16 15:20:36 -07:00
/*
* Depending on which platform we are running on , we need different
* I / O functions .
*/
2006-01-16 22:14:15 -08:00
# define __IO_PREFIX generic
# include <asm/io_generic.h>
2008-02-07 20:18:21 +09:00
# include <asm/io_trapped.h>
2006-01-16 22:14:15 -08:00
# define maybebadio(port) \
printk ( KERN_ERR " bad PC-like io %s:%u for port 0x%lx at 0x%08x \n " , \
__FUNCTION__ , __LINE__ , ( port ) , ( u32 ) __builtin_return_address ( 0 ) )
2005-04-16 15:20:36 -07:00
/*
* Since boards are able to define their own set of I / O routines through
* their respective machine vector , we always wrap through the mv .
*
* Also , in the event that a board hasn ' t provided its own definition for
* a given routine , it will be wrapped to generic code at run - time .
*/
2006-01-16 22:14:15 -08:00
# define __inb(p) sh_mv.mv_inb((p))
# define __inw(p) sh_mv.mv_inw((p))
# define __inl(p) sh_mv.mv_inl((p))
# define __outb(x,p) sh_mv.mv_outb((x),(p))
# define __outw(x,p) sh_mv.mv_outw((x),(p))
# define __outl(x,p) sh_mv.mv_outl((x),(p))
# define __inb_p(p) sh_mv.mv_inb_p((p))
# define __inw_p(p) sh_mv.mv_inw_p((p))
# define __inl_p(p) sh_mv.mv_inl_p((p))
# define __outb_p(x,p) sh_mv.mv_outb_p((x),(p))
# define __outw_p(x,p) sh_mv.mv_outw_p((x),(p))
# define __outl_p(x,p) sh_mv.mv_outl_p((x),(p))
# define __insb(p,b,c) sh_mv.mv_insb((p), (b), (c))
# define __insw(p,b,c) sh_mv.mv_insw((p), (b), (c))
# define __insl(p,b,c) sh_mv.mv_insl((p), (b), (c))
# define __outsb(p,b,c) sh_mv.mv_outsb((p), (b), (c))
# define __outsw(p,b,c) sh_mv.mv_outsw((p), (b), (c))
# define __outsl(p,b,c) sh_mv.mv_outsl((p), (b), (c))
# define __readb(a) sh_mv.mv_readb((a))
# define __readw(a) sh_mv.mv_readw((a))
# define __readl(a) sh_mv.mv_readl((a))
# define __writeb(v,a) sh_mv.mv_writeb((v),(a))
# define __writew(v,a) sh_mv.mv_writew((v),(a))
# define __writel(v,a) sh_mv.mv_writel((v),(a))
# define inb __inb
# define inw __inw
# define inl __inl
# define outb __outb
# define outw __outw
# define outl __outl
# define inb_p __inb_p
# define inw_p __inw_p
# define inl_p __inl_p
# define outb_p __outb_p
# define outw_p __outw_p
# define outl_p __outl_p
# define insb __insb
# define insw __insw
# define insl __insl
# define outsb __outsb
# define outsw __outsw
# define outsl __outsl
# define __raw_readb(a) __readb((void __iomem *)(a))
# define __raw_readw(a) __readw((void __iomem *)(a))
# define __raw_readl(a) __readl((void __iomem *)(a))
# define __raw_writeb(v, a) __writeb(v, (void __iomem *)(a))
# define __raw_writew(v, a) __writew(v, (void __iomem *)(a))
# define __raw_writel(v, a) __writel(v, (void __iomem *)(a))
2005-04-16 15:20:36 -07:00
2006-09-27 18:25:24 +09:00
void __raw_writesl ( unsigned long addr , const void * data , int longlen ) ;
void __raw_readsl ( unsigned long addr , void * data , int longlen ) ;
2005-04-16 15:20:36 -07:00
/*
* The platform header files may define some of these macros to use
* the inlined versions where appropriate . These macros may also be
* redefined by userlevel programs .
*/
2006-01-16 22:14:15 -08:00
# ifdef __readb
2007-05-31 13:46:21 +09:00
# define readb(a) ({ unsigned int r_ = __raw_readb(a); mb(); r_; })
2005-04-16 15:20:36 -07:00
# endif
# ifdef __raw_readw
2007-05-31 13:46:21 +09:00
# define readw(a) ({ unsigned int r_ = __raw_readw(a); mb(); r_; })
2005-04-16 15:20:36 -07:00
# endif
# ifdef __raw_readl
2007-05-31 13:46:21 +09:00
# define readl(a) ({ unsigned int r_ = __raw_readl(a); mb(); r_; })
2005-04-16 15:20:36 -07:00
# endif
# ifdef __raw_writeb
2006-01-16 22:14:15 -08:00
# define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); })
2005-04-16 15:20:36 -07:00
# endif
# ifdef __raw_writew
2006-01-16 22:14:15 -08:00
# define writew(v,a) ({ __raw_writew((v),(a)); mb(); })
2005-04-16 15:20:36 -07:00
# endif
# ifdef __raw_writel
2006-01-16 22:14:15 -08:00
# define writel(v,a) ({ __raw_writel((v),(a)); mb(); })
2005-04-16 15:20:36 -07:00
# endif
2007-09-10 12:08:42 +09:00
# define __BUILD_MEMORY_STRING(bwlq, type) \
\
static inline void writes # # bwlq ( volatile void __iomem * mem , \
const void * addr , unsigned int count ) \
{ \
const volatile type * __addr = addr ; \
\
while ( count - - ) { \
__raw_write # # bwlq ( * __addr , mem ) ; \
__addr + + ; \
} \
} \
\
static inline void reads # # bwlq ( volatile void __iomem * mem , void * addr , \
unsigned int count ) \
{ \
volatile type * __addr = addr ; \
\
while ( count - - ) { \
* __addr = __raw_read # # bwlq ( mem ) ; \
__addr + + ; \
} \
}
__BUILD_MEMORY_STRING ( b , u8 )
__BUILD_MEMORY_STRING ( w , u16 )
2006-09-27 18:25:24 +09:00
# define writesl __raw_writesl
# define readsl __raw_readsl
2005-04-16 15:20:36 -07:00
# define readb_relaxed(a) readb(a)
# define readw_relaxed(a) readw(a)
# define readl_relaxed(a) readl(a)
2006-01-16 22:14:15 -08:00
/* Simple MMIO */
# define ioread8(a) readb(a)
# define ioread16(a) readw(a)
# define ioread16be(a) be16_to_cpu(__raw_readw((a)))
# define ioread32(a) readl(a)
# define ioread32be(a) be32_to_cpu(__raw_readl((a)))
2005-04-16 15:20:36 -07:00
2006-01-16 22:14:15 -08:00
# define iowrite8(v,a) writeb((v),(a))
# define iowrite16(v,a) writew((v),(a))
# define iowrite16be(v,a) __raw_writew(cpu_to_be16((v)),(a))
# define iowrite32(v,a) writel((v),(a))
# define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a))
2008-02-14 13:59:02 +09:00
# define ioread8_rep(a, d, c) readsb((a), (d), (c))
# define ioread16_rep(a, d, c) readsw((a), (d), (c))
# define ioread32_rep(a, d, c) readsl((a), (d), (c))
2006-01-16 22:14:15 -08:00
2008-02-14 13:59:02 +09:00
# define iowrite8_rep(a, s, c) writesb((a), (s), (c))
# define iowrite16_rep(a, s, c) writesw((a), (s), (c))
# define iowrite32_rep(a, s, c) writesl((a), (s), (c))
2006-01-16 22:14:15 -08:00
# define mmiowb() wmb() /* synco on SH-4A, otherwise a nop */
2005-04-16 15:20:36 -07:00
2007-11-21 18:06:34 +09:00
# define IO_SPACE_LIMIT 0xffffffff
2005-04-16 15:20:36 -07:00
/*
* This function provides a method for the generic case where a board - specific
2006-01-16 22:14:15 -08:00
* ioport_map simply needs to return the port + some arbitrary port base .
2005-04-16 15:20:36 -07:00
*
* We use this at board setup time to implicitly set the port base , and
2006-01-16 22:14:15 -08:00
* as a result , we can use the generic ioport_map .
2005-04-16 15:20:36 -07:00
*/
static inline void __set_io_port_base ( unsigned long pbase )
{
extern unsigned long generic_io_base ;
generic_io_base = pbase ;
}
2008-02-07 20:18:21 +09:00
# define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n))
2005-04-16 15:20:36 -07:00
/* We really want to try and get these to memcpy etc */
2006-01-16 22:14:15 -08:00
extern void memcpy_fromio ( void * , volatile void __iomem * , unsigned long ) ;
extern void memcpy_toio ( volatile void __iomem * , const void * , unsigned long ) ;
extern void memset_io ( volatile void __iomem * , int , unsigned long ) ;
2005-04-16 15:20:36 -07:00
/* SuperH on-chip I/O functions */
2006-01-16 22:14:15 -08:00
static inline unsigned char ctrl_inb ( unsigned long addr )
2005-04-16 15:20:36 -07:00
{
return * ( volatile unsigned char * ) addr ;
}
2006-01-16 22:14:15 -08:00
static inline unsigned short ctrl_inw ( unsigned long addr )
2005-04-16 15:20:36 -07:00
{
return * ( volatile unsigned short * ) addr ;
}
2006-01-16 22:14:15 -08:00
static inline unsigned int ctrl_inl ( unsigned long addr )
2005-04-16 15:20:36 -07:00
{
return * ( volatile unsigned long * ) addr ;
}
2007-11-21 18:06:34 +09:00
static inline unsigned long long ctrl_inq ( unsigned long addr )
{
return * ( volatile unsigned long long * ) addr ;
}
2006-01-16 22:14:15 -08:00
static inline void ctrl_outb ( unsigned char b , unsigned long addr )
2005-04-16 15:20:36 -07:00
{
* ( volatile unsigned char * ) addr = b ;
}
2006-01-16 22:14:15 -08:00
static inline void ctrl_outw ( unsigned short b , unsigned long addr )
2005-04-16 15:20:36 -07:00
{
* ( volatile unsigned short * ) addr = b ;
}
2006-01-16 22:14:15 -08:00
static inline void ctrl_outl ( unsigned int b , unsigned long addr )
2005-04-16 15:20:36 -07:00
{
* ( volatile unsigned long * ) addr = b ;
}
2007-11-21 18:06:34 +09:00
static inline void ctrl_outq ( unsigned long long b , unsigned long addr )
{
* ( volatile unsigned long long * ) addr = b ;
}
2006-09-27 16:43:28 +09:00
static inline void ctrl_delay ( void )
{
2007-11-09 12:58:12 +09:00
# ifdef P2SEG
2006-09-27 16:43:28 +09:00
ctrl_inw ( P2SEG ) ;
2007-11-09 12:58:12 +09:00
# endif
2006-09-27 16:43:28 +09:00
}
2007-11-20 18:26:28 +09:00
/* Quad-word real-mode I/O, don't ask.. */
unsigned long long peek_real_address_q ( unsigned long long addr ) ;
unsigned long long poke_real_address_q ( unsigned long long addr ,
unsigned long long val ) ;
2007-11-09 12:58:12 +09:00
# if !defined(CONFIG_MMU)
# define virt_to_phys(address) ((unsigned long)(address))
# define phys_to_virt(address) ((void *)(address))
2007-11-30 17:52:53 +09:00
# else
2007-11-09 12:58:12 +09:00
# define virt_to_phys(address) (__pa(address))
# define phys_to_virt(address) (__va(address))
2006-09-27 17:25:07 +09:00
# endif
2005-04-16 15:20:36 -07:00
/*
2007-11-09 12:58:12 +09:00
* On 32 - bit SH , we traditionally have the whole physical address space
* mapped at all times ( as MIPS does ) , so " ioremap() " and " iounmap() " do
* not need to do anything but place the address in the proper segment .
* This is true for P1 and P2 addresses , as well as some P3 ones .
* However , most of the P3 addresses and newer cores using extended
* addressing need to map through page tables , so the ioremap ( )
* implementation becomes a bit more complicated .
2005-04-16 15:20:36 -07:00
*
2007-11-09 12:58:12 +09:00
* See arch / sh / mm / ioremap . c for additional notes on this .
2005-04-16 15:20:36 -07:00
*
* We cheat a bit and always return uncachable areas until we ' ve fixed
2006-01-16 22:14:15 -08:00
* the drivers to handle caching properly .
2007-11-09 12:58:12 +09:00
*
* On the SH - 5 the concept of segmentation in the 1 : 1 PXSEG sense simply
* doesn ' t exist , so everything must go through page tables .
2005-04-16 15:20:36 -07:00
*/
2006-01-16 22:14:15 -08:00
# ifdef CONFIG_MMU
void __iomem * __ioremap ( unsigned long offset , unsigned long size ,
unsigned long flags ) ;
void __iounmap ( void __iomem * addr ) ;
2008-04-25 12:58:40 +09:00
/* arch/sh/mm/ioremap_64.c */
unsigned long onchip_remap ( unsigned long addr , unsigned long size ,
const char * name ) ;
extern void onchip_unmap ( unsigned long vaddr ) ;
2006-01-16 22:14:15 -08:00
# else
# define __ioremap(offset, size, flags) ((void __iomem *)(offset))
# define __iounmap(addr) do { } while (0)
2008-04-25 12:58:40 +09:00
# define onchip_remap(addr, size, name) (addr)
# define onchip_unmap(addr) do { } while (0)
2006-01-16 22:14:15 -08:00
# endif /* CONFIG_MMU */
static inline void __iomem *
__ioremap_mode ( unsigned long offset , unsigned long size , unsigned long flags )
2005-04-16 15:20:36 -07:00
{
2007-11-09 12:58:12 +09:00
# ifdef CONFIG_SUPERH32
2006-01-16 22:14:15 -08:00
unsigned long last_addr = offset + size - 1 ;
2008-02-07 20:18:21 +09:00
# endif
void __iomem * ret ;
2006-01-16 22:14:15 -08:00
2008-02-07 20:18:21 +09:00
ret = __ioremap_trapped ( offset , size ) ;
if ( ret )
return ret ;
# ifdef CONFIG_SUPERH32
2006-01-16 22:14:15 -08:00
/*
* For P1 and P2 space this is trivial , as everything is already
* mapped . Uncached access for P1 addresses are done through P2 .
* In the P3 case or for addresses outside of the 29 - bit space ,
* mapping must be done by the PMB or by using page tables .
*/
if ( likely ( PXSEG ( offset ) < P3SEG & & PXSEG ( last_addr ) < P3SEG ) ) {
if ( unlikely ( flags & _PAGE_CACHABLE ) )
return ( void __iomem * ) P1SEGADDR ( offset ) ;
return ( void __iomem * ) P2SEGADDR ( offset ) ;
}
2007-11-09 12:58:12 +09:00
# endif
2006-01-16 22:14:15 -08:00
return __ioremap ( offset , size , flags ) ;
2005-04-16 15:20:36 -07:00
}
2006-01-16 22:14:15 -08:00
# define ioremap(offset, size) \
__ioremap_mode ( ( offset ) , ( size ) , 0 )
# define ioremap_nocache(offset, size) \
__ioremap_mode ( ( offset ) , ( size ) , 0 )
# define ioremap_cache(offset, size) \
__ioremap_mode ( ( offset ) , ( size ) , _PAGE_CACHABLE )
# define p3_ioremap(offset, size, flags) \
__ioremap ( ( offset ) , ( size ) , ( flags ) )
# define iounmap(addr) \
__iounmap ( ( addr ) )
2005-04-16 15:20:36 -07:00
/*
* Convert a physical pointer to a virtual kernel pointer for / dev / mem
* access
*/
# define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
# define xlate_dev_kmem_ptr(p) p
# endif /* __KERNEL__ */
# endif /* __ASM_SH_IO_H */