2005-04-16 15:20:36 -07:00
# ifndef __ASM_SH_IO_H
# define __ASM_SH_IO_H
/*
* Convention :
2008-10-04 05:25:52 +09:00
* read { b , w , l , q } / write { b , w , l , q } are for PCI ,
2005-04-16 15:20:36 -07:00
* while in { b , w , l } / out { b , w , l } are for ISA
2008-10-04 05:25:52 +09:00
*
2005-04-16 15:20:36 -07:00
* In addition we have ' pausing ' versions : in { b , w , l } _p / out { b , w , l } _p
* and ' string ' versions : ins { b , w , l } / outs { b , w , l }
*
2008-10-04 05:25:52 +09:00
* While read { b , w , l , q } and write { b , w , l , q } contain memory barriers
* automatically , there are also __raw versions , which do not .
*
* Historically , we have also had ctrl_in { b , w , l , q } / ctrl_out { b , w , l , q } for
* SuperH specific I / O ( raw I / O to on - chip CPU peripherals ) . In practice
* these have the same semantics as the __raw variants , and as such , all
* new code should be using the __raw versions .
*
* All ISA I / O routines are wrapped through the machine vector . If a
* board does not provide overrides , a generic set that are copied in
* from the default machine vector are used instead . These are largely
* for old compat code for I / O offseting to SuperIOs , all of which are
* better handled through the machvec ioport mapping routines these days .
2005-04-16 15:20:36 -07:00
*/
2010-01-18 21:30:29 +09:00
# include <linux/errno.h>
2005-04-16 15:20:36 -07:00
# include <asm/cache.h>
# include <asm/system.h>
# include <asm/addrspace.h>
# include <asm/machvec.h>
2006-01-16 22:14:15 -08:00
# include <asm/pgtable.h>
# include <asm-generic/iomap.h>
# ifdef __KERNEL__
2005-04-16 15:20:36 -07:00
/*
* Depending on which platform we are running on , we need different
* I / O functions .
*/
2006-01-16 22:14:15 -08:00
# define __IO_PREFIX generic
# include <asm/io_generic.h>
2008-02-07 20:18:21 +09:00
# include <asm/io_trapped.h>
2006-01-16 22:14:15 -08:00
2008-10-04 05:25:52 +09:00
# define inb(p) sh_mv.mv_inb((p))
# define inw(p) sh_mv.mv_inw((p))
# define inl(p) sh_mv.mv_inl((p))
# define outb(x,p) sh_mv.mv_outb((x),(p))
# define outw(x,p) sh_mv.mv_outw((x),(p))
# define outl(x,p) sh_mv.mv_outl((x),(p))
# define inb_p(p) sh_mv.mv_inb_p((p))
# define inw_p(p) sh_mv.mv_inw_p((p))
# define inl_p(p) sh_mv.mv_inl_p((p))
# define outb_p(x,p) sh_mv.mv_outb_p((x),(p))
# define outw_p(x,p) sh_mv.mv_outw_p((x),(p))
# define outl_p(x,p) sh_mv.mv_outl_p((x),(p))
# define insb(p,b,c) sh_mv.mv_insb((p), (b), (c))
# define insw(p,b,c) sh_mv.mv_insw((p), (b), (c))
# define insl(p,b,c) sh_mv.mv_insl((p), (b), (c))
# define outsb(p,b,c) sh_mv.mv_outsb((p), (b), (c))
# define outsw(p,b,c) sh_mv.mv_outsw((p), (b), (c))
# define outsl(p,b,c) sh_mv.mv_outsl((p), (b), (c))
# define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
# define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
# define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
# define __raw_writeq(v,a) (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
# define __raw_readb(a) (__chk_io_ptr(a), *(volatile u8 __force *)(a))
# define __raw_readw(a) (__chk_io_ptr(a), *(volatile u16 __force *)(a))
# define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a))
# define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a))
# define readb(a) ({ u8 r_ = __raw_readb(a); mb(); r_; })
# define readw(a) ({ u16 r_ = __raw_readw(a); mb(); r_; })
# define readl(a) ({ u32 r_ = __raw_readl(a); mb(); r_; })
# define readq(a) ({ u64 r_ = __raw_readq(a); mb(); r_; })
# define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); })
# define writew(v,a) ({ __raw_writew((v),(a)); mb(); })
# define writel(v,a) ({ __raw_writel((v),(a)); mb(); })
# define writeq(v,a) ({ __raw_writeq((v),(a)); mb(); })
2005-04-16 15:20:36 -07:00
2010-01-26 13:02:10 +09:00
/*
* Legacy SuperH on - chip I / O functions
*
* These are all deprecated , all new ( and especially cross - platform ) code
* should be using the __raw_xxx ( ) routines directly .
*/
static inline u8 __deprecated ctrl_inb ( unsigned long addr )
{
return __raw_readb ( addr ) ;
}
static inline u16 __deprecated ctrl_inw ( unsigned long addr )
{
return __raw_readw ( addr ) ;
}
static inline u32 __deprecated ctrl_inl ( unsigned long addr )
{
return __raw_readl ( addr ) ;
}
static inline u64 __deprecated ctrl_inq ( unsigned long addr )
{
return __raw_readq ( addr ) ;
}
static inline void __deprecated ctrl_outb ( u8 v , unsigned long addr )
{
__raw_writeb ( v , addr ) ;
}
static inline void __deprecated ctrl_outw ( u16 v , unsigned long addr )
{
__raw_writew ( v , addr ) ;
}
static inline void __deprecated ctrl_outl ( u32 v , unsigned long addr )
{
__raw_writel ( v , addr ) ;
}
static inline void __deprecated ctrl_outq ( u64 v , unsigned long addr )
{
__raw_writeq ( v , addr ) ;
}
2008-10-01 15:12:27 +09:00
2009-11-12 16:36:26 +09:00
extern unsigned long generic_io_base ;
2008-10-04 05:25:52 +09:00
static inline void ctrl_delay ( void )
{
2009-11-12 16:36:26 +09:00
__raw_readw ( generic_io_base ) ;
2008-10-04 05:25:52 +09:00
}
2005-04-16 15:20:36 -07:00
2010-02-18 13:23:30 +09:00
# define __BUILD_UNCACHED_IO(bwlq, type) \
static inline type read # # bwlq # # _uncached ( unsigned long addr ) \
{ \
type ret ; \
jump_to_uncached ( ) ; \
ret = __raw_read # # bwlq ( addr ) ; \
back_to_cached ( ) ; \
return ret ; \
} \
\
static inline void write # # bwlq # # _uncached ( type v , unsigned long addr ) \
{ \
jump_to_uncached ( ) ; \
__raw_write # # bwlq ( v , addr ) ; \
back_to_cached ( ) ; \
}
__BUILD_UNCACHED_IO ( b , u8 )
__BUILD_UNCACHED_IO ( w , u16 )
__BUILD_UNCACHED_IO ( l , u32 )
__BUILD_UNCACHED_IO ( q , u64 )
2007-09-10 12:08:42 +09:00
# define __BUILD_MEMORY_STRING(bwlq, type) \
\
2008-10-01 15:12:27 +09:00
static inline void __raw_writes # # bwlq ( volatile void __iomem * mem , \
2007-09-10 12:08:42 +09:00
const void * addr , unsigned int count ) \
{ \
const volatile type * __addr = addr ; \
\
while ( count - - ) { \
__raw_write # # bwlq ( * __addr , mem ) ; \
__addr + + ; \
} \
} \
\
2008-10-01 15:12:27 +09:00
static inline void __raw_reads # # bwlq ( volatile void __iomem * mem , \
void * addr , unsigned int count ) \
2007-09-10 12:08:42 +09:00
{ \
volatile type * __addr = addr ; \
\
while ( count - - ) { \
* __addr = __raw_read # # bwlq ( mem ) ; \
__addr + + ; \
} \
}
__BUILD_MEMORY_STRING ( b , u8 )
__BUILD_MEMORY_STRING ( w , u16 )
2008-10-04 05:25:52 +09:00
2009-05-09 14:44:30 +09:00
# ifdef CONFIG_SUPERH32
2008-10-04 05:25:52 +09:00
void __raw_writesl ( void __iomem * addr , const void * data , int longlen ) ;
void __raw_readsl ( const void __iomem * addr , void * data , int longlen ) ;
2009-05-09 14:44:30 +09:00
# else
__BUILD_MEMORY_STRING ( l , u32 )
# endif
__BUILD_MEMORY_STRING ( q , u64 )
2008-10-01 15:12:27 +09:00
2008-10-04 05:25:52 +09:00
# define writesb __raw_writesb
# define writesw __raw_writesw
# define writesl __raw_writesl
2008-10-01 15:12:27 +09:00
2008-10-04 05:25:52 +09:00
# define readsb __raw_readsb
# define readsw __raw_readsw
# define readsl __raw_readsl
2006-09-27 18:25:24 +09:00
2008-10-04 05:25:52 +09:00
# define readb_relaxed(a) readb(a)
# define readw_relaxed(a) readw(a)
# define readl_relaxed(a) readl(a)
# define readq_relaxed(a) readq(a)
2005-04-16 15:20:36 -07:00
2009-08-24 16:10:40 +09:00
# ifndef CONFIG_GENERIC_IOMAP
2006-01-16 22:14:15 -08:00
/* Simple MMIO */
2008-10-01 15:12:27 +09:00
# define ioread8(a) __raw_readb(a)
# define ioread16(a) __raw_readw(a)
2006-01-16 22:14:15 -08:00
# define ioread16be(a) be16_to_cpu(__raw_readw((a)))
2008-10-01 15:12:27 +09:00
# define ioread32(a) __raw_readl(a)
2006-01-16 22:14:15 -08:00
# define ioread32be(a) be32_to_cpu(__raw_readl((a)))
2005-04-16 15:20:36 -07:00
2008-10-01 15:12:27 +09:00
# define iowrite8(v,a) __raw_writeb((v),(a))
# define iowrite16(v,a) __raw_writew((v),(a))
2006-01-16 22:14:15 -08:00
# define iowrite16be(v,a) __raw_writew(cpu_to_be16((v)),(a))
2008-10-01 15:12:27 +09:00
# define iowrite32(v,a) __raw_writel((v),(a))
2006-01-16 22:14:15 -08:00
# define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a))
2008-10-01 15:12:27 +09:00
# define ioread8_rep(a, d, c) __raw_readsb((a), (d), (c))
# define ioread16_rep(a, d, c) __raw_readsw((a), (d), (c))
# define ioread32_rep(a, d, c) __raw_readsl((a), (d), (c))
2006-01-16 22:14:15 -08:00
2008-10-01 15:12:27 +09:00
# define iowrite8_rep(a, s, c) __raw_writesb((a), (s), (c))
# define iowrite16_rep(a, s, c) __raw_writesw((a), (s), (c))
# define iowrite32_rep(a, s, c) __raw_writesl((a), (s), (c))
2009-08-24 16:10:40 +09:00
# endif
# define mmio_insb(p,d,c) __raw_readsb(p,d,c)
# define mmio_insw(p,d,c) __raw_readsw(p,d,c)
# define mmio_insl(p,d,c) __raw_readsl(p,d,c)
# define mmio_outsb(p,s,c) __raw_writesb(p,s,c)
# define mmio_outsw(p,s,c) __raw_writesw(p,s,c)
# define mmio_outsl(p,s,c) __raw_writesl(p,s,c)
2006-01-16 22:14:15 -08:00
2008-10-04 05:25:52 +09:00
/* synco on SH-4A, otherwise a nop */
# define mmiowb() wmb()
2005-04-16 15:20:36 -07:00
2007-11-21 18:06:34 +09:00
# define IO_SPACE_LIMIT 0xffffffff
2005-04-16 15:20:36 -07:00
/*
2008-10-04 05:25:52 +09:00
* This function provides a method for the generic case where a
* board - specific ioport_map simply needs to return the port + some
* arbitrary port base .
2005-04-16 15:20:36 -07:00
*
* We use this at board setup time to implicitly set the port base , and
2006-01-16 22:14:15 -08:00
* as a result , we can use the generic ioport_map .
2005-04-16 15:20:36 -07:00
*/
static inline void __set_io_port_base ( unsigned long pbase )
{
generic_io_base = pbase ;
}
2008-02-07 20:18:21 +09:00
# define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n))
2005-04-16 15:20:36 -07:00
/* We really want to try and get these to memcpy etc */
2008-10-04 05:25:52 +09:00
void memcpy_fromio ( void * , const volatile void __iomem * , unsigned long ) ;
void memcpy_toio ( volatile void __iomem * , const void * , unsigned long ) ;
void memset_io ( volatile void __iomem * , int , unsigned long ) ;
2006-09-27 16:43:28 +09:00
2007-11-20 18:26:28 +09:00
/* Quad-word real-mode I/O, don't ask.. */
unsigned long long peek_real_address_q ( unsigned long long addr ) ;
unsigned long long poke_real_address_q ( unsigned long long addr ,
unsigned long long val ) ;
2007-11-09 12:58:12 +09:00
# if !defined(CONFIG_MMU)
# define virt_to_phys(address) ((unsigned long)(address))
# define phys_to_virt(address) ((void *)(address))
2007-11-30 17:52:53 +09:00
# else
2007-11-09 12:58:12 +09:00
# define virt_to_phys(address) (__pa(address))
# define phys_to_virt(address) (__va(address))
2006-09-27 17:25:07 +09:00
# endif
2005-04-16 15:20:36 -07:00
/*
2007-11-09 12:58:12 +09:00
* On 32 - bit SH , we traditionally have the whole physical address space
* mapped at all times ( as MIPS does ) , so " ioremap() " and " iounmap() " do
* not need to do anything but place the address in the proper segment .
* This is true for P1 and P2 addresses , as well as some P3 ones .
* However , most of the P3 addresses and newer cores using extended
* addressing need to map through page tables , so the ioremap ( )
* implementation becomes a bit more complicated .
2005-04-16 15:20:36 -07:00
*
2007-11-09 12:58:12 +09:00
* See arch / sh / mm / ioremap . c for additional notes on this .
2005-04-16 15:20:36 -07:00
*
* We cheat a bit and always return uncachable areas until we ' ve fixed
2006-01-16 22:14:15 -08:00
* the drivers to handle caching properly .
2007-11-09 12:58:12 +09:00
*
* On the SH - 5 the concept of segmentation in the 1 : 1 PXSEG sense simply
* doesn ' t exist , so everything must go through page tables .
2005-04-16 15:20:36 -07:00
*/
2006-01-16 22:14:15 -08:00
# ifdef CONFIG_MMU
2009-12-14 14:23:41 +09:00
void __iomem * __ioremap_caller ( unsigned long offset , unsigned long size ,
2010-01-19 13:34:38 +09:00
pgprot_t prot , void * caller ) ;
2006-01-16 22:14:15 -08:00
void __iounmap ( void __iomem * addr ) ;
2008-04-25 12:58:40 +09:00
2009-12-14 14:23:41 +09:00
static inline void __iomem *
2010-01-19 13:34:38 +09:00
__ioremap ( unsigned long offset , unsigned long size , pgprot_t prot )
2009-12-14 14:23:41 +09:00
{
2010-01-19 13:34:38 +09:00
return __ioremap_caller ( offset , size , prot , __builtin_return_address ( 0 ) ) ;
2009-12-14 14:23:41 +09:00
}
2006-01-16 22:14:15 -08:00
static inline void __iomem *
2010-01-19 13:34:38 +09:00
__ioremap_29bit ( unsigned long offset , unsigned long size , pgprot_t prot )
2005-04-16 15:20:36 -07:00
{
2010-01-13 18:31:48 +09:00
# ifdef CONFIG_29BIT
2006-01-16 22:14:15 -08:00
unsigned long last_addr = offset + size - 1 ;
/*
* For P1 and P2 space this is trivial , as everything is already
* mapped . Uncached access for P1 addresses are done through P2 .
* In the P3 case or for addresses outside of the 29 - bit space ,
* mapping must be done by the PMB or by using page tables .
*/
if ( likely ( PXSEG ( offset ) < P3SEG & & PXSEG ( last_addr ) < P3SEG ) ) {
2010-01-19 13:34:38 +09:00
if ( unlikely ( pgprot_val ( prot ) & _PAGE_CACHABLE ) )
2006-01-16 22:14:15 -08:00
return ( void __iomem * ) P1SEGADDR ( offset ) ;
return ( void __iomem * ) P2SEGADDR ( offset ) ;
}
2008-11-25 21:57:29 +09:00
/* P4 above the store queues are always mapped. */
if ( unlikely ( offset > = P3_ADDR_MAX ) )
return ( void __iomem * ) P4SEGADDR ( offset ) ;
2007-11-09 12:58:12 +09:00
# endif
2006-01-16 22:14:15 -08:00
2010-01-13 18:31:48 +09:00
return NULL ;
}
static inline void __iomem *
2010-01-19 13:34:38 +09:00
__ioremap_mode ( unsigned long offset , unsigned long size , pgprot_t prot )
2010-01-13 18:31:48 +09:00
{
void __iomem * ret ;
ret = __ioremap_trapped ( offset , size ) ;
if ( ret )
return ret ;
2010-01-19 13:34:38 +09:00
ret = __ioremap_29bit ( offset , size , prot ) ;
2010-01-13 18:31:48 +09:00
if ( ret )
return ret ;
2010-01-19 13:34:38 +09:00
return __ioremap ( offset , size , prot ) ;
2005-04-16 15:20:36 -07:00
}
2009-04-30 12:56:37 +09:00
# else
2010-01-19 13:34:38 +09:00
# define __ioremap(offset, size, prot) ((void __iomem *)(offset))
# define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
2009-04-30 12:56:37 +09:00
# define __iounmap(addr) do { } while (0)
# endif /* CONFIG_MMU */
2005-04-16 15:20:36 -07:00
2010-01-19 13:34:38 +09:00
static inline void __iomem *
ioremap ( unsigned long offset , unsigned long size )
{
return __ioremap_mode ( offset , size , PAGE_KERNEL_NOCACHE ) ;
}
static inline void __iomem *
ioremap_cache ( unsigned long offset , unsigned long size )
{
return __ioremap_mode ( offset , size , PAGE_KERNEL ) ;
}
2010-01-19 14:00:14 +09:00
# ifdef CONFIG_HAVE_IOREMAP_PROT
2010-01-19 13:34:38 +09:00
static inline void __iomem *
ioremap_prot ( resource_size_t offset , unsigned long size , unsigned long flags )
{
return __ioremap_mode ( offset , size , __pgprot ( flags ) ) ;
}
2010-01-19 14:00:14 +09:00
# endif
2010-01-19 13:34:38 +09:00
2010-01-28 18:17:29 +09:00
# ifdef CONFIG_IOREMAP_FIXED
extern void __iomem * ioremap_fixed ( resource_size_t , unsigned long ,
unsigned long , pgprot_t ) ;
extern int iounmap_fixed ( void __iomem * ) ;
extern void ioremap_fixed_init ( void ) ;
# else
static inline void __iomem *
ioremap_fixed ( resource_size_t phys_addr , unsigned long offset ,
unsigned long size , pgprot_t prot )
{
BUG ( ) ;
return NULL ;
}
static inline void ioremap_fixed_init ( void ) { }
static inline int iounmap_fixed ( void __iomem * addr ) { return - EINVAL ; }
# endif
2010-01-19 13:34:38 +09:00
# define ioremap_nocache ioremap
# define iounmap __iounmap
2006-01-16 22:14:15 -08:00
2008-10-04 05:25:52 +09:00
# define maybebadio(port) \
printk ( KERN_ERR " bad PC-like io %s:%u for port 0x%lx at 0x%08x \n " , \
__func__ , __LINE__ , ( port ) , ( u32 ) __builtin_return_address ( 0 ) )
2005-04-16 15:20:36 -07:00
/*
* Convert a physical pointer to a virtual kernel pointer for / dev / mem
* access
*/
# define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
# define xlate_dev_kmem_ptr(p) p
2008-11-12 12:53:48 +09:00
# define ARCH_HAS_VALID_PHYS_ADDR_RANGE
int valid_phys_addr_range ( unsigned long addr , size_t size ) ;
int valid_mmap_phys_addr_range ( unsigned long pfn , size_t size ) ;
2005-04-16 15:20:36 -07:00
# endif /* __KERNEL__ */
# endif /* __ASM_SH_IO_H */