2005-04-16 15:20:36 -07:00
# ifndef __ASM_SH_IO_H
# define __ASM_SH_IO_H
/*
* Convention :
2008-10-04 05:25:52 +09:00
* read { b , w , l , q } / write { b , w , l , q } are for PCI ,
2005-04-16 15:20:36 -07:00
* while in { b , w , l } / out { b , w , l } are for ISA
2008-10-04 05:25:52 +09:00
*
2005-04-16 15:20:36 -07:00
* In addition we have ' pausing ' versions : in { b , w , l } _p / out { b , w , l } _p
* and ' string ' versions : ins { b , w , l } / outs { b , w , l }
*
2008-10-04 05:25:52 +09:00
* While read { b , w , l , q } and write { b , w , l , q } contain memory barriers
* automatically , there are also __raw versions , which do not .
*
* Historically , we have also had ctrl_in { b , w , l , q } / ctrl_out { b , w , l , q } for
* SuperH specific I / O ( raw I / O to on - chip CPU peripherals ) . In practice
* these have the same semantics as the __raw variants , and as such , all
* new code should be using the __raw versions .
*
* All ISA I / O routines are wrapped through the machine vector . If a
* board does not provide overrides , a generic set that are copied in
* from the default machine vector are used instead . These are largely
* for old compat code for I / O offseting to SuperIOs , all of which are
* better handled through the machvec ioport mapping routines these days .
2005-04-16 15:20:36 -07:00
*/
# include <asm/cache.h>
# include <asm/system.h>
# include <asm/addrspace.h>
# include <asm/machvec.h>
2006-01-16 22:14:15 -08:00
# include <asm/pgtable.h>
# include <asm-generic/iomap.h>
# ifdef __KERNEL__
2005-04-16 15:20:36 -07:00
/*
* Depending on which platform we are running on , we need different
* I / O functions .
*/
2006-01-16 22:14:15 -08:00
# define __IO_PREFIX generic
# include <asm/io_generic.h>
2008-02-07 20:18:21 +09:00
# include <asm/io_trapped.h>
2006-01-16 22:14:15 -08:00
2008-10-04 05:25:52 +09:00
# define inb(p) sh_mv.mv_inb((p))
# define inw(p) sh_mv.mv_inw((p))
# define inl(p) sh_mv.mv_inl((p))
# define outb(x,p) sh_mv.mv_outb((x),(p))
# define outw(x,p) sh_mv.mv_outw((x),(p))
# define outl(x,p) sh_mv.mv_outl((x),(p))
# define inb_p(p) sh_mv.mv_inb_p((p))
# define inw_p(p) sh_mv.mv_inw_p((p))
# define inl_p(p) sh_mv.mv_inl_p((p))
# define outb_p(x,p) sh_mv.mv_outb_p((x),(p))
# define outw_p(x,p) sh_mv.mv_outw_p((x),(p))
# define outl_p(x,p) sh_mv.mv_outl_p((x),(p))
# define insb(p,b,c) sh_mv.mv_insb((p), (b), (c))
# define insw(p,b,c) sh_mv.mv_insw((p), (b), (c))
# define insl(p,b,c) sh_mv.mv_insl((p), (b), (c))
# define outsb(p,b,c) sh_mv.mv_outsb((p), (b), (c))
# define outsw(p,b,c) sh_mv.mv_outsw((p), (b), (c))
# define outsl(p,b,c) sh_mv.mv_outsl((p), (b), (c))
# define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
# define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
# define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
# define __raw_writeq(v,a) (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
# define __raw_readb(a) (__chk_io_ptr(a), *(volatile u8 __force *)(a))
# define __raw_readw(a) (__chk_io_ptr(a), *(volatile u16 __force *)(a))
# define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a))
# define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a))
# define readb(a) ({ u8 r_ = __raw_readb(a); mb(); r_; })
# define readw(a) ({ u16 r_ = __raw_readw(a); mb(); r_; })
# define readl(a) ({ u32 r_ = __raw_readl(a); mb(); r_; })
# define readq(a) ({ u64 r_ = __raw_readq(a); mb(); r_; })
# define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); })
# define writew(v,a) ({ __raw_writew((v),(a)); mb(); })
# define writel(v,a) ({ __raw_writel((v),(a)); mb(); })
# define writeq(v,a) ({ __raw_writeq((v),(a)); mb(); })
2005-04-16 15:20:36 -07:00
2008-10-04 05:25:52 +09:00
/* SuperH on-chip I/O functions */
# define ctrl_inb __raw_readb
# define ctrl_inw __raw_readw
# define ctrl_inl __raw_readl
# define ctrl_inq __raw_readq
2006-09-27 18:25:24 +09:00
2008-10-04 05:25:52 +09:00
# define ctrl_outb __raw_writeb
# define ctrl_outw __raw_writew
# define ctrl_outl __raw_writel
# define ctrl_outq __raw_writeq
2008-10-01 15:12:27 +09:00
2008-10-04 05:25:52 +09:00
static inline void ctrl_delay ( void )
{
# ifdef P2SEG
__raw_readw ( P2SEG ) ;
# endif
}
2005-04-16 15:20:36 -07:00
2007-09-10 12:08:42 +09:00
# define __BUILD_MEMORY_STRING(bwlq, type) \
\
2008-10-01 15:12:27 +09:00
static inline void __raw_writes # # bwlq ( volatile void __iomem * mem , \
2007-09-10 12:08:42 +09:00
const void * addr , unsigned int count ) \
{ \
const volatile type * __addr = addr ; \
\
while ( count - - ) { \
__raw_write # # bwlq ( * __addr , mem ) ; \
__addr + + ; \
} \
} \
\
2008-10-01 15:12:27 +09:00
static inline void __raw_reads # # bwlq ( volatile void __iomem * mem , \
void * addr , unsigned int count ) \
2007-09-10 12:08:42 +09:00
{ \
volatile type * __addr = addr ; \
\
while ( count - - ) { \
* __addr = __raw_read # # bwlq ( mem ) ; \
__addr + + ; \
} \
}
__BUILD_MEMORY_STRING ( b , u8 )
__BUILD_MEMORY_STRING ( w , u16 )
2008-10-04 05:25:52 +09:00
__BUILD_MEMORY_STRING ( q , u64 )
void __raw_writesl ( void __iomem * addr , const void * data , int longlen ) ;
void __raw_readsl ( const void __iomem * addr , void * data , int longlen ) ;
2008-10-01 15:12:27 +09:00
2008-10-04 05:25:52 +09:00
# define writesb __raw_writesb
# define writesw __raw_writesw
# define writesl __raw_writesl
2008-10-01 15:12:27 +09:00
2008-10-04 05:25:52 +09:00
# define readsb __raw_readsb
# define readsw __raw_readsw
# define readsl __raw_readsl
2006-09-27 18:25:24 +09:00
2008-10-04 05:25:52 +09:00
# define readb_relaxed(a) readb(a)
# define readw_relaxed(a) readw(a)
# define readl_relaxed(a) readl(a)
# define readq_relaxed(a) readq(a)
2005-04-16 15:20:36 -07:00
2006-01-16 22:14:15 -08:00
/* Simple MMIO */
2008-10-01 15:12:27 +09:00
# define ioread8(a) __raw_readb(a)
# define ioread16(a) __raw_readw(a)
2006-01-16 22:14:15 -08:00
# define ioread16be(a) be16_to_cpu(__raw_readw((a)))
2008-10-01 15:12:27 +09:00
# define ioread32(a) __raw_readl(a)
2006-01-16 22:14:15 -08:00
# define ioread32be(a) be32_to_cpu(__raw_readl((a)))
2005-04-16 15:20:36 -07:00
2008-10-01 15:12:27 +09:00
# define iowrite8(v,a) __raw_writeb((v),(a))
# define iowrite16(v,a) __raw_writew((v),(a))
2006-01-16 22:14:15 -08:00
# define iowrite16be(v,a) __raw_writew(cpu_to_be16((v)),(a))
2008-10-01 15:12:27 +09:00
# define iowrite32(v,a) __raw_writel((v),(a))
2006-01-16 22:14:15 -08:00
# define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a))
2008-10-01 15:12:27 +09:00
# define ioread8_rep(a, d, c) __raw_readsb((a), (d), (c))
# define ioread16_rep(a, d, c) __raw_readsw((a), (d), (c))
# define ioread32_rep(a, d, c) __raw_readsl((a), (d), (c))
2006-01-16 22:14:15 -08:00
2008-10-01 15:12:27 +09:00
# define iowrite8_rep(a, s, c) __raw_writesb((a), (s), (c))
# define iowrite16_rep(a, s, c) __raw_writesw((a), (s), (c))
# define iowrite32_rep(a, s, c) __raw_writesl((a), (s), (c))
2006-01-16 22:14:15 -08:00
2008-10-04 05:25:52 +09:00
/* synco on SH-4A, otherwise a nop */
# define mmiowb() wmb()
2005-04-16 15:20:36 -07:00
2007-11-21 18:06:34 +09:00
# define IO_SPACE_LIMIT 0xffffffff
2008-09-04 18:53:58 +09:00
extern unsigned long generic_io_base ;
2005-04-16 15:20:36 -07:00
/*
2008-10-04 05:25:52 +09:00
* This function provides a method for the generic case where a
* board - specific ioport_map simply needs to return the port + some
* arbitrary port base .
2005-04-16 15:20:36 -07:00
*
* We use this at board setup time to implicitly set the port base , and
2006-01-16 22:14:15 -08:00
* as a result , we can use the generic ioport_map .
2005-04-16 15:20:36 -07:00
*/
static inline void __set_io_port_base ( unsigned long pbase )
{
generic_io_base = pbase ;
}
2008-02-07 20:18:21 +09:00
# define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n))
2005-04-16 15:20:36 -07:00
/* We really want to try and get these to memcpy etc */
2008-10-04 05:25:52 +09:00
void memcpy_fromio ( void * , const volatile void __iomem * , unsigned long ) ;
void memcpy_toio ( volatile void __iomem * , const void * , unsigned long ) ;
void memset_io ( volatile void __iomem * , int , unsigned long ) ;
2006-09-27 16:43:28 +09:00
2007-11-20 18:26:28 +09:00
/* Quad-word real-mode I/O, don't ask.. */
unsigned long long peek_real_address_q ( unsigned long long addr ) ;
unsigned long long poke_real_address_q ( unsigned long long addr ,
unsigned long long val ) ;
2007-11-09 12:58:12 +09:00
# if !defined(CONFIG_MMU)
# define virt_to_phys(address) ((unsigned long)(address))
# define phys_to_virt(address) ((void *)(address))
2007-11-30 17:52:53 +09:00
# else
2007-11-09 12:58:12 +09:00
# define virt_to_phys(address) (__pa(address))
# define phys_to_virt(address) (__va(address))
2006-09-27 17:25:07 +09:00
# endif
2005-04-16 15:20:36 -07:00
/*
2007-11-09 12:58:12 +09:00
* On 32 - bit SH , we traditionally have the whole physical address space
* mapped at all times ( as MIPS does ) , so " ioremap() " and " iounmap() " do
* not need to do anything but place the address in the proper segment .
* This is true for P1 and P2 addresses , as well as some P3 ones .
* However , most of the P3 addresses and newer cores using extended
* addressing need to map through page tables , so the ioremap ( )
* implementation becomes a bit more complicated .
2005-04-16 15:20:36 -07:00
*
2007-11-09 12:58:12 +09:00
* See arch / sh / mm / ioremap . c for additional notes on this .
2005-04-16 15:20:36 -07:00
*
* We cheat a bit and always return uncachable areas until we ' ve fixed
2006-01-16 22:14:15 -08:00
* the drivers to handle caching properly .
2007-11-09 12:58:12 +09:00
*
* On the SH - 5 the concept of segmentation in the 1 : 1 PXSEG sense simply
* doesn ' t exist , so everything must go through page tables .
2005-04-16 15:20:36 -07:00
*/
2006-01-16 22:14:15 -08:00
# ifdef CONFIG_MMU
void __iomem * __ioremap ( unsigned long offset , unsigned long size ,
unsigned long flags ) ;
void __iounmap ( void __iomem * addr ) ;
2008-04-25 12:58:40 +09:00
/* arch/sh/mm/ioremap_64.c */
unsigned long onchip_remap ( unsigned long addr , unsigned long size ,
const char * name ) ;
extern void onchip_unmap ( unsigned long vaddr ) ;
2006-01-16 22:14:15 -08:00
# else
# define __ioremap(offset, size, flags) ((void __iomem *)(offset))
# define __iounmap(addr) do { } while (0)
2008-04-25 12:58:40 +09:00
# define onchip_remap(addr, size, name) (addr)
# define onchip_unmap(addr) do { } while (0)
2006-01-16 22:14:15 -08:00
# endif /* CONFIG_MMU */
static inline void __iomem *
__ioremap_mode ( unsigned long offset , unsigned long size , unsigned long flags )
2005-04-16 15:20:36 -07:00
{
2007-11-09 12:58:12 +09:00
# ifdef CONFIG_SUPERH32
2006-01-16 22:14:15 -08:00
unsigned long last_addr = offset + size - 1 ;
2008-02-07 20:18:21 +09:00
# endif
void __iomem * ret ;
2006-01-16 22:14:15 -08:00
2008-02-07 20:18:21 +09:00
ret = __ioremap_trapped ( offset , size ) ;
if ( ret )
return ret ;
# ifdef CONFIG_SUPERH32
2006-01-16 22:14:15 -08:00
/*
* For P1 and P2 space this is trivial , as everything is already
* mapped . Uncached access for P1 addresses are done through P2 .
* In the P3 case or for addresses outside of the 29 - bit space ,
* mapping must be done by the PMB or by using page tables .
*/
if ( likely ( PXSEG ( offset ) < P3SEG & & PXSEG ( last_addr ) < P3SEG ) ) {
if ( unlikely ( flags & _PAGE_CACHABLE ) )
return ( void __iomem * ) P1SEGADDR ( offset ) ;
return ( void __iomem * ) P2SEGADDR ( offset ) ;
}
2007-11-09 12:58:12 +09:00
# endif
2006-01-16 22:14:15 -08:00
return __ioremap ( offset , size , flags ) ;
2005-04-16 15:20:36 -07:00
}
2006-01-16 22:14:15 -08:00
# define ioremap(offset, size) \
__ioremap_mode ( ( offset ) , ( size ) , 0 )
# define ioremap_nocache(offset, size) \
__ioremap_mode ( ( offset ) , ( size ) , 0 )
# define ioremap_cache(offset, size) \
__ioremap_mode ( ( offset ) , ( size ) , _PAGE_CACHABLE )
# define p3_ioremap(offset, size, flags) \
__ioremap ( ( offset ) , ( size ) , ( flags ) )
2008-09-12 20:41:05 +09:00
# define ioremap_prot(offset, size, flags) \
__ioremap_mode ( ( offset ) , ( size ) , ( flags ) )
2006-01-16 22:14:15 -08:00
# define iounmap(addr) \
__iounmap ( ( addr ) )
2008-10-04 05:25:52 +09:00
# define maybebadio(port) \
printk ( KERN_ERR " bad PC-like io %s:%u for port 0x%lx at 0x%08x \n " , \
__func__ , __LINE__ , ( port ) , ( u32 ) __builtin_return_address ( 0 ) )
2005-04-16 15:20:36 -07:00
/*
* Convert a physical pointer to a virtual kernel pointer for / dev / mem
* access
*/
# define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
# define xlate_dev_kmem_ptr(p) p
# endif /* __KERNEL__ */
# endif /* __ASM_SH_IO_H */