2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1994 , 1995 Waldorf GmbH
2006-03-15 11:36:31 +00:00
* Copyright ( C ) 1994 - 2000 , 06 Ralf Baechle
2005-04-16 15:20:36 -07:00
* Copyright ( C ) 1999 , 2000 Silicon Graphics , Inc .
* Copyright ( C ) 2004 , 2005 MIPS Technologies , Inc . All rights reserved .
2013-01-22 12:59:30 +01:00
* Author : Maciej W . Rozycki < macro @ mips . com >
2005-04-16 15:20:36 -07:00
*/
# ifndef _ASM_IO_H
# define _ASM_IO_H
2018-07-09 16:57:12 +03:00
# define ARCH_HAS_IOREMAP_WC
2005-04-16 15:20:36 -07:00
# include <linux/compiler.h>
# include <linux/kernel.h>
# include <linux/types.h>
2012-09-06 11:36:55 -04:00
# include <linux/irqflags.h>
2005-04-16 15:20:36 -07:00
# include <asm/addrspace.h>
2012-07-18 14:12:01 -07:00
# include <asm/bug.h>
2005-04-16 15:20:36 -07:00
# include <asm/byteorder.h>
# include <asm/cpu.h>
# include <asm/cpu-features.h>
2006-12-07 15:35:43 +01:00
# include <asm-generic/iomap.h>
2005-04-16 15:20:36 -07:00
# include <asm/page.h>
# include <asm/pgtable-bits.h>
# include <asm/processor.h>
2005-03-01 19:22:29 +00:00
# include <asm/string.h>
2005-04-16 15:20:36 -07:00
2005-06-30 10:48:40 +00:00
# include <ioremap.h>
2005-04-16 15:20:36 -07:00
# include <mangle-port.h>
/*
2005-02-22 21:49:17 +00:00
* Raw operations are never swapped in software . OTOH values that raw
2005-04-16 15:20:36 -07:00
* operations are working on may or may not have been swapped by the bus
* hardware . An example use would be for flash memory that ' s used for
* execute in place .
*/
2007-10-11 23:46:15 +01:00
# define __raw_ioswabb(a, x) (x)
# define __raw_ioswabw(a, x) (x)
# define __raw_ioswabl(a, x) (x)
# define __raw_ioswabq(a, x) (x)
# define ____raw_ioswabq(a, x) (x)
2005-04-16 15:20:36 -07:00
2006-02-17 01:36:24 +09:00
/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
2005-04-16 15:20:36 -07:00
# define IO_SPACE_LIMIT 0xffff
/*
* On MIPS I / O ports are memory mapped , so we access them using normal
* load / store instructions . mips_io_port_base is the virtual address to
* which all ports are being mapped . For sake of efficiency some code
* assumes that this is an address that can be loaded with a single lui
* instruction , so the lower 16 bits must be zero . Should be true on
* on any sane architecture ; generic code does not use this assumption .
*/
extern const unsigned long mips_io_port_base ;
2006-03-15 11:36:31 +00:00
/*
* Gcc will generate code to load the value of mips_io_port_base after each
* function call which may be fairly wasteful in some cases . So we don ' t
* play quite by the book . We tell gcc mips_io_port_base is a long variable
* which solves the code generation issue . Now we need to violate the
* aliasing rules a little to make initialization possible and finally we
* will need the barrier ( ) to fight side effects of the aliasing chat .
* This trickery will eventually collapse under gcc ' s optimizer . Oh well .
*/
static inline void set_io_port_base ( unsigned long base )
{
* ( unsigned long * ) & mips_io_port_base = base ;
barrier ( ) ;
}
2005-04-16 15:20:36 -07:00
MIPS: Use GENERIC_IOMAP
MIPS has a copy of lib/iomap.c with minor alterations, none of which are
necessary given appropriate definitions of PIO_OFFSET, PIO_MASK &
PIO_RESERVED. Provide such definitions, select GENERIC_IOMAP & remove
arch/mips/lib/iomap.c to cut back on the needless duplication.
The one change this does make is to our mmio_{in,out}s[bwl] functions,
which began to deviate from their generic counterparts with commit
0845bb721ebb ("MIPS: iomap: Use __mem_{read,write}{b,w,l} for MMIO"). I
suspect that this commit was incorrect, and that the SEAD-3 platform
should have instead selected CONFIG_SWAP_IO_SPACE. Since the SEAD-3
platform code is now gone & the board is instead supported by the
generic platform (CONFIG_MIPS_GENERIC) which selects
CONFIG_SWAP_IO_SPACE anyway, this shouldn't be a problem any more.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20342/
Cc: linux-mips@linux-mips.org
2018-08-29 14:54:00 -07:00
/*
* Provide the necessary definitions for generic iomap . We make use of
* mips_io_port_base for iomap ( ) , but we don ' t reserve any low addresses for
* use with I / O ports .
*/
# define HAVE_ARCH_PIO_SIZE
# define PIO_OFFSET mips_io_port_base
# define PIO_MASK IO_SPACE_LIMIT
# define PIO_RESERVED 0x0UL
2005-04-16 15:20:36 -07:00
/*
* virt_to_phys - map virtual addresses to physical
* @ address : address to remap
*
* The returned physical address is the physical ( CPU ) mapping for
* the memory address given . It is only valid to use this function on
* addresses directly mapped or allocated via kmalloc .
*
* This function does not give bus mappings for DMA transfers . In
* almost all conceivable cases a device driver should not be using
* this function
*/
2006-10-19 13:19:59 +02:00
static inline unsigned long virt_to_phys ( volatile const void * address )
2005-04-16 15:20:36 -07:00
{
2013-05-07 17:11:16 +00:00
return __pa ( address ) ;
2005-04-16 15:20:36 -07:00
}
/*
* phys_to_virt - map physical address to virtual
* @ address : address to remap
*
* The returned virtual address is a current CPU mapping for
* the memory address given . It is only valid to use this function on
* addresses that have a kernel mapping
*
* This function does not handle bus mappings for DMA transfers . In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline void * phys_to_virt ( unsigned long address )
{
2007-01-10 09:44:05 +01:00
return ( void * ) ( address + PAGE_OFFSET - PHYS_OFFSET ) ;
2005-04-16 15:20:36 -07:00
}
/*
* ISA I / O bus memory addresses are 1 : 1 with the physical address .
*/
2018-07-27 18:23:19 -07:00
static inline unsigned long isa_virt_to_bus ( volatile void * address )
2005-04-16 15:20:36 -07:00
{
2018-07-27 18:23:19 -07:00
return virt_to_phys ( address ) ;
2005-04-16 15:20:36 -07:00
}
2018-07-27 18:23:19 -07:00
static inline void * isa_bus_to_virt ( unsigned long address )
2005-04-16 15:20:36 -07:00
{
2018-07-27 18:23:19 -07:00
return phys_to_virt ( address ) ;
2005-04-16 15:20:36 -07:00
}
# define isa_page_to_bus page_to_phys
/*
* However PCI ones are not necessarily 1 : 1 and therefore these interfaces
* are forbidden in portable PCI drivers .
*
* Allow them for x86 for legacy drivers , though .
*/
# define virt_to_bus virt_to_phys
# define bus_to_virt phys_to_virt
/*
* Change " struct page " to physical address .
*/
# define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
2014-11-22 00:22:09 +01:00
extern void __iomem * __ioremap ( phys_addr_t offset , phys_addr_t size , unsigned long flags ) ;
2006-10-19 14:21:47 +01:00
extern void __iounmap ( const volatile void __iomem * addr ) ;
2005-04-16 15:20:36 -07:00
2014-11-22 00:22:09 +01:00
static inline void __iomem * __ioremap_mode ( phys_addr_t offset , unsigned long size ,
2005-04-16 15:20:36 -07:00
unsigned long flags )
{
2007-06-26 01:14:01 +09:00
void __iomem * addr = plat_ioremap ( offset , size , flags ) ;
if ( addr )
return addr ;
2014-11-22 00:22:09 +01:00
# define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
2005-06-30 10:48:40 +00:00
2005-04-16 15:20:36 -07:00
if ( cpu_has_64bit_addresses ) {
u64 base = UNCAC_BASE ;
/*
* R10000 supports a 2 bit uncached attribute therefore
* UNCAC_BASE may not equal IO_BASE .
*/
if ( flags = = _CACHE_UNCACHED )
base = ( u64 ) IO_BASE ;
2005-03-01 19:22:29 +00:00
return ( void __iomem * ) ( unsigned long ) ( base + offset ) ;
2005-06-30 10:48:40 +00:00
} else if ( __builtin_constant_p ( offset ) & &
__builtin_constant_p ( size ) & & __builtin_constant_p ( flags ) ) {
2014-11-22 00:22:09 +01:00
phys_addr_t phys_addr , last_addr ;
2005-06-30 10:48:40 +00:00
phys_addr = fixup_bigphys_addr ( offset , size ) ;
/* Don't allow wraparound or zero size. */
last_addr = phys_addr + size - 1 ;
if ( ! size | | last_addr < phys_addr )
return NULL ;
/*
* Map uncached objects in the low 512 MB of address
* space using KSEG1 .
*/
if ( __IS_LOW512 ( phys_addr ) & & __IS_LOW512 ( last_addr ) & &
flags = = _CACHE_UNCACHED )
2007-07-11 23:12:00 +09:00
return ( void __iomem * )
( unsigned long ) CKSEG1ADDR ( phys_addr ) ;
2005-04-16 15:20:36 -07:00
}
return __ioremap ( offset , size , flags ) ;
2005-06-30 10:48:40 +00:00
# undef __IS_LOW512
2005-04-16 15:20:36 -07:00
}
/*
* ioremap - map bus memory into CPU space
* @ offset : bus address of the memory
* @ size : size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb / readw / readl / writeb /
* writew / writel functions and the other mmio helpers . The returned
* address is not guaranteed to be usable directly as a virtual
* address .
*/
# define ioremap(offset, size) \
__ioremap_mode ( ( offset ) , ( size ) , _CACHE_UNCACHED )
/*
* ioremap_nocache - map bus memory into CPU space
* @ offset : bus address of the memory
* @ size : size of the resource to map
*
* ioremap_nocache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb / readw / readl / writeb /
* writew / writel functions and the other mmio helpers . The returned
* address is not guaranteed to be usable directly as a virtual
* address .
*
* This version of ioremap ensures that the memory is marked uncachable
* on the CPU as well as honouring existing caching rules from things like
* the PCI bus . Note that there are other caches and buffers on many
2011-03-30 22:57:33 -03:00
* busses . In particular driver authors should read up on PCI writes
2005-04-16 15:20:36 -07:00
*
* It ' s useful if some control registers are in such an area and
* write combining or read caching is not desirable :
*/
# define ioremap_nocache(offset, size) \
__ioremap_mode ( ( offset ) , ( size ) , _CACHE_UNCACHED )
2015-10-06 00:56:56 +01:00
# define ioremap_uc ioremap_nocache
2005-04-16 15:20:36 -07:00
2006-02-28 17:04:20 +00:00
/*
2013-01-22 12:59:30 +01:00
* ioremap_cachable - map bus memory into CPU space
* @ offset : bus address of the memory
* @ size : size of the resource to map
2006-02-28 17:04:20 +00:00
*
* ioremap_nocache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb / readw / readl / writeb /
* writew / writel functions and the other mmio helpers . The returned
* address is not guaranteed to be usable directly as a virtual
* address .
*
* This version of ioremap ensures that the memory is marked cachable by
2013-01-22 12:59:30 +01:00
* the CPU . Also enables full write - combining . Useful for some
2006-02-28 17:04:20 +00:00
* memory - like regions on I / O busses .
*/
# define ioremap_cachable(offset, size) \
2007-09-19 00:58:24 +01:00
__ioremap_mode ( ( offset ) , ( size ) , _page_cachable_default )
2016-01-09 02:05:31 +00:00
# define ioremap_cache ioremap_cachable
2006-02-28 17:04:20 +00:00
2018-07-09 16:57:12 +03:00
/*
* ioremap_wc - map bus memory into CPU space
* @ offset : bus address of the memory
* @ size : size of the resource to map
*
* ioremap_wc performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb / readw / readl / writeb /
* writew / writel functions and the other mmio helpers . The returned
* address is not guaranteed to be usable directly as a virtual
* address .
*
* This version of ioremap ensures that the memory is marked uncachable
* but accelerated by means of write - combining feature . It is specifically
* useful for PCIe prefetchable windows , which may vastly improve a
* communications performance . If it was determined on boot stage , what
* CPU CCA doesn ' t support UCA , the method shall fall - back to the
* _CACHE_UNCACHED option ( see cpu_probe ( ) method ) .
*/
# define ioremap_wc(offset, size) \
__ioremap_mode ( ( offset ) , ( size ) , boot_cpu_data . writecombine )
2006-10-19 14:21:47 +01:00
static inline void iounmap ( const volatile void __iomem * addr )
2005-04-16 15:20:36 -07:00
{
2007-06-26 01:14:01 +09:00
if ( plat_iounmap ( addr ) )
return ;
2005-06-30 10:48:40 +00:00
# define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
if ( cpu_has_64bit_addresses | |
( __builtin_constant_p ( addr ) & & __IS_KSEG1 ( addr ) ) )
2005-04-16 15:20:36 -07:00
return ;
__iounmap ( addr ) ;
2005-06-30 10:48:40 +00:00
# undef __IS_KSEG1
}
2005-04-16 15:20:36 -07:00
MIPS: Loongson-3: Introduce CONFIG_LOONGSON3_ENHANCEMENT
New Loongson 3 CPU (since Loongson-3A R2, as opposed to Loongson-3A R1,
Loongson-3B R1 and Loongson-3B R2) has many enhancements, such as FTLB,
L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPv2 ASE, User Local
register, Read-Inhibit/Execute-Inhibit, SFB (Store Fill Buffer), Fast
TLB refill support, etc.
This patch introduce a config option, CONFIG_LOONGSON3_ENHANCEMENT, to
enable those enhancements which are not probed at run time. If you want
a generic kernel to run on all Loongson 3 machines, please say 'N'
here. If you want a high-performance kernel to run on new Loongson 3
machines only, please say 'Y' here.
Some additional explanations:
1) SFB locates between core and L1 cache, it causes memory access out
of order, so writel/outl (and other similar functions) need a I/O
reorder barrier.
2) Loongson 3 has a bug that di instruction can not save the irqflag,
so arch_local_irq_save() is modified. Since CPU_MIPSR2 is selected
by CONFIG_LOONGSON3_ENHANCEMENT, generic kernel doesn't use ei/di
at all.
3) CPU_HAS_PREFETCH is selected by CONFIG_LOONGSON3_ENHANCEMENT, so
MIPS_CPU_PREFETCH (used by uasm) probing is also put in this patch.
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Cc: Aurelien Jarno <aurelien@aurel32.net>
Cc: Steven J . Hill <sjhill@realitydiluted.com>
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/12755/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-03 09:45:13 +08:00
# if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT)
# define war_io_reorder_wmb() wmb()
2008-12-11 15:33:29 -08:00
# else
2018-04-03 08:55:03 -04:00
# define war_io_reorder_wmb() barrier()
2008-12-11 15:33:29 -08:00
# endif
2005-04-16 15:20:36 -07:00
# define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
\
static inline void pfx # # write # # bwlq ( type val , \
volatile void __iomem * mem ) \
{ \
volatile type * __mem ; \
type __val ; \
\
MIPS: Loongson-3: Introduce CONFIG_LOONGSON3_ENHANCEMENT
New Loongson 3 CPU (since Loongson-3A R2, as opposed to Loongson-3A R1,
Loongson-3B R1 and Loongson-3B R2) has many enhancements, such as FTLB,
L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPv2 ASE, User Local
register, Read-Inhibit/Execute-Inhibit, SFB (Store Fill Buffer), Fast
TLB refill support, etc.
This patch introduce a config option, CONFIG_LOONGSON3_ENHANCEMENT, to
enable those enhancements which are not probed at run time. If you want
a generic kernel to run on all Loongson 3 machines, please say 'N'
here. If you want a high-performance kernel to run on new Loongson 3
machines only, please say 'Y' here.
Some additional explanations:
1) SFB locates between core and L1 cache, it causes memory access out
of order, so writel/outl (and other similar functions) need a I/O
reorder barrier.
2) Loongson 3 has a bug that di instruction can not save the irqflag,
so arch_local_irq_save() is modified. Since CPU_MIPSR2 is selected
by CONFIG_LOONGSON3_ENHANCEMENT, generic kernel doesn't use ei/di
at all.
3) CPU_HAS_PREFETCH is selected by CONFIG_LOONGSON3_ENHANCEMENT, so
MIPS_CPU_PREFETCH (used by uasm) probing is also put in this patch.
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Cc: Aurelien Jarno <aurelien@aurel32.net>
Cc: Steven J . Hill <sjhill@realitydiluted.com>
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/12755/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-03 09:45:13 +08:00
war_io_reorder_wmb ( ) ; \
2008-12-11 15:33:29 -08:00
\
2005-04-16 15:20:36 -07:00
__mem = ( void * ) __swizzle_addr_ # # bwlq ( ( unsigned long ) ( mem ) ) ; \
\
2006-02-17 01:36:24 +09:00
__val = pfx # # ioswab # # bwlq ( __mem , val ) ; \
2005-04-16 15:20:36 -07:00
\
2013-01-22 12:59:30 +01:00
if ( sizeof ( type ) ! = sizeof ( u64 ) | | sizeof ( u64 ) = = sizeof ( long ) ) \
2005-04-16 15:20:36 -07:00
* __mem = __val ; \
else if ( cpu_has_64bits ) { \
unsigned long __flags ; \
type __tmp ; \
\
if ( irq ) \
local_irq_save ( __flags ) ; \
__asm__ __volatile__ ( \
2014-03-30 13:20:10 +02:00
" .set arch=r4000 " " \t \t # __writeq " " \n \t " \
2013-01-22 12:59:30 +01:00
" dsll32 %L0, %L0, 0 " " \n \t " \
" dsrl32 %L0, %L0, 0 " " \n \t " \
" dsll32 %M0, %M0, 0 " " \n \t " \
2005-04-16 15:20:36 -07:00
" or %L0, %L0, %M0 " " \n \t " \
" sd %L0, %2 " " \n \t " \
" .set mips0 " " \n " \
: " =r " ( __tmp ) \
2011-06-30 14:43:14 +01:00
: " 0 " ( __val ) , " m " ( * __mem ) ) ; \
2005-04-16 15:20:36 -07:00
if ( irq ) \
local_irq_restore ( __flags ) ; \
} else \
BUG ( ) ; \
} \
\
2006-02-09 00:57:44 +09:00
static inline type pfx # # read # # bwlq ( const volatile void __iomem * mem ) \
2005-04-16 15:20:36 -07:00
{ \
volatile type * __mem ; \
type __val ; \
\
__mem = ( void * ) __swizzle_addr_ # # bwlq ( ( unsigned long ) ( mem ) ) ; \
\
2013-01-22 12:59:30 +01:00
if ( sizeof ( type ) ! = sizeof ( u64 ) | | sizeof ( u64 ) = = sizeof ( long ) ) \
2005-04-16 15:20:36 -07:00
__val = * __mem ; \
else if ( cpu_has_64bits ) { \
unsigned long __flags ; \
\
2005-02-21 11:44:31 +00:00
if ( irq ) \
local_irq_save ( __flags ) ; \
2005-04-16 15:20:36 -07:00
__asm__ __volatile__ ( \
2014-03-30 13:20:10 +02:00
" .set arch=r4000 " " \t \t # __readq " " \n \t " \
2005-04-16 15:20:36 -07:00
" ld %L0, %1 " " \n \t " \
2013-01-22 12:59:30 +01:00
" dsra32 %M0, %L0, 0 " " \n \t " \
2005-04-16 15:20:36 -07:00
" sll %L0, %L0, 0 " " \n \t " \
" .set mips0 " " \n " \
: " =r " ( __val ) \
2011-06-30 14:43:14 +01:00
: " m " ( * __mem ) ) ; \
2005-02-21 11:44:31 +00:00
if ( irq ) \
local_irq_restore ( __flags ) ; \
2005-04-16 15:20:36 -07:00
} else { \
__val = 0 ; \
BUG ( ) ; \
} \
\
2018-04-12 22:30:44 -04:00
/* prevent prefetching of coherent DMA data prematurely */ \
rmb ( ) ; \
2006-02-17 01:36:24 +09:00
return pfx # # ioswab # # bwlq ( __mem , __val ) ; \
2005-04-16 15:20:36 -07:00
}
2018-08-29 14:54:01 -07:00
# define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p) \
2005-04-16 15:20:36 -07:00
\
static inline void pfx # # out # # bwlq # # p ( type val , unsigned long port ) \
{ \
volatile type * __addr ; \
type __val ; \
\
MIPS: Loongson-3: Introduce CONFIG_LOONGSON3_ENHANCEMENT
New Loongson 3 CPU (since Loongson-3A R2, as opposed to Loongson-3A R1,
Loongson-3B R1 and Loongson-3B R2) has many enhancements, such as FTLB,
L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPv2 ASE, User Local
register, Read-Inhibit/Execute-Inhibit, SFB (Store Fill Buffer), Fast
TLB refill support, etc.
This patch introduce a config option, CONFIG_LOONGSON3_ENHANCEMENT, to
enable those enhancements which are not probed at run time. If you want
a generic kernel to run on all Loongson 3 machines, please say 'N'
here. If you want a high-performance kernel to run on new Loongson 3
machines only, please say 'Y' here.
Some additional explanations:
1) SFB locates between core and L1 cache, it causes memory access out
of order, so writel/outl (and other similar functions) need a I/O
reorder barrier.
2) Loongson 3 has a bug that di instruction can not save the irqflag,
so arch_local_irq_save() is modified. Since CPU_MIPSR2 is selected
by CONFIG_LOONGSON3_ENHANCEMENT, generic kernel doesn't use ei/di
at all.
3) CPU_HAS_PREFETCH is selected by CONFIG_LOONGSON3_ENHANCEMENT, so
MIPS_CPU_PREFETCH (used by uasm) probing is also put in this patch.
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Cc: Aurelien Jarno <aurelien@aurel32.net>
Cc: Steven J . Hill <sjhill@realitydiluted.com>
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/12755/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-03 09:45:13 +08:00
war_io_reorder_wmb ( ) ; \
2008-12-11 15:33:29 -08:00
\
2006-02-17 01:36:24 +09:00
__addr = ( void * ) __swizzle_addr_ # # bwlq ( mips_io_port_base + port ) ; \
2005-04-16 15:20:36 -07:00
\
2006-02-17 01:36:24 +09:00
__val = pfx # # ioswab # # bwlq ( __addr , val ) ; \
2005-04-16 15:20:36 -07:00
\
2005-09-23 20:02:38 +00:00
/* Really, we want this to be atomic */ \
BUILD_BUG_ON ( sizeof ( type ) > sizeof ( unsigned long ) ) ; \
\
* __addr = __val ; \
2005-04-16 15:20:36 -07:00
} \
\
static inline type pfx # # in # # bwlq # # p ( unsigned long port ) \
{ \
volatile type * __addr ; \
type __val ; \
\
2006-02-17 01:36:24 +09:00
__addr = ( void * ) __swizzle_addr_ # # bwlq ( mips_io_port_base + port ) ; \
2005-04-16 15:20:36 -07:00
\
2005-09-23 20:02:38 +00:00
BUILD_BUG_ON ( sizeof ( type ) > sizeof ( unsigned long ) ) ; \
\
__val = * __addr ; \
2005-04-16 15:20:36 -07:00
\
2018-06-12 17:54:42 +08:00
/* prevent prefetching of coherent DMA data prematurely */ \
rmb ( ) ; \
2006-02-17 01:36:24 +09:00
return pfx # # ioswab # # bwlq ( __addr , __val ) ; \
2005-04-16 15:20:36 -07:00
}
# define __BUILD_MEMORY_PFX(bus, bwlq, type) \
\
__BUILD_MEMORY_SINGLE ( bus , bwlq , type , 1 )
2005-09-23 20:02:38 +00:00
# define BUILDIO_MEM(bwlq, type) \
2005-04-16 15:20:36 -07:00
\
__BUILD_MEMORY_PFX ( __raw_ , bwlq , type ) \
2005-02-22 21:49:17 +00:00
__BUILD_MEMORY_PFX ( , bwlq , type ) \
2005-12-07 23:12:54 -05:00
__BUILD_MEMORY_PFX ( __mem_ , bwlq , type ) \
2005-09-23 20:02:38 +00:00
BUILDIO_MEM ( b , u8 )
BUILDIO_MEM ( w , u16 )
BUILDIO_MEM ( l , u32 )
BUILDIO_MEM ( q , u64 )
# define __BUILD_IOPORT_PFX(bus, bwlq, type) \
2018-08-29 14:54:01 -07:00
__BUILD_IOPORT_SINGLE ( bus , bwlq , type , ) \
__BUILD_IOPORT_SINGLE ( bus , bwlq , type , _p )
2005-09-23 20:02:38 +00:00
# define BUILDIO_IOPORT(bwlq, type) \
__BUILD_IOPORT_PFX ( , bwlq , type ) \
2005-12-07 23:12:54 -05:00
__BUILD_IOPORT_PFX ( __mem_ , bwlq , type )
2005-09-23 20:02:38 +00:00
BUILDIO_IOPORT ( b , u8 )
BUILDIO_IOPORT ( w , u16 )
BUILDIO_IOPORT ( l , u32 )
# ifdef CONFIG_64BIT
BUILDIO_IOPORT ( q , u64 )
# endif
2005-04-16 15:20:36 -07:00
# define __BUILDIO(bwlq, type) \
\
2005-02-22 21:49:17 +00:00
__BUILD_MEMORY_SINGLE ( ____raw_ , bwlq , type , 0 )
2005-04-16 15:20:36 -07:00
__BUILDIO ( q , u64 )
# define readb_relaxed readb
# define readw_relaxed readw
# define readl_relaxed readl
# define readq_relaxed readq
2013-05-31 13:07:44 +00:00
# define writeb_relaxed writeb
# define writew_relaxed writew
# define writel_relaxed writel
# define writeq_relaxed writeq
2009-12-16 11:29:06 +01:00
# define readb_be(addr) \
__raw_readb ( ( __force unsigned * ) ( addr ) )
# define readw_be(addr) \
be16_to_cpu ( __raw_readw ( ( __force unsigned * ) ( addr ) ) )
# define readl_be(addr) \
be32_to_cpu ( __raw_readl ( ( __force unsigned * ) ( addr ) ) )
# define readq_be(addr) \
be64_to_cpu ( __raw_readq ( ( __force unsigned * ) ( addr ) ) )
# define writeb_be(val, addr) \
__raw_writeb ( ( val ) , ( __force unsigned * ) ( addr ) )
# define writew_be(val, addr) \
__raw_writew ( cpu_to_be16 ( ( val ) ) , ( __force unsigned * ) ( addr ) )
# define writel_be(val, addr) \
__raw_writel ( cpu_to_be32 ( ( val ) ) , ( __force unsigned * ) ( addr ) )
# define writeq_be(val, addr) \
__raw_writeq ( cpu_to_be64 ( ( val ) ) , ( __force unsigned * ) ( addr ) )
2005-04-16 15:20:36 -07:00
/*
* Some code tests for these symbols
*/
# define readq readq
# define writeq writeq
# define __BUILD_MEMORY_STRING(bwlq, type) \
\
2005-11-13 00:38:18 +01:00
static inline void writes # # bwlq ( volatile void __iomem * mem , \
const void * addr , unsigned int count ) \
2005-04-16 15:20:36 -07:00
{ \
2005-11-13 00:38:18 +01:00
const volatile type * __addr = addr ; \
2005-04-16 15:20:36 -07:00
\
while ( count - - ) { \
2005-12-07 23:12:54 -05:00
__mem_write # # bwlq ( * __addr , mem ) ; \
2005-04-16 15:20:36 -07:00
__addr + + ; \
} \
} \
\
static inline void reads # # bwlq ( volatile void __iomem * mem , void * addr , \
unsigned int count ) \
{ \
volatile type * __addr = addr ; \
\
while ( count - - ) { \
2005-12-07 23:12:54 -05:00
* __addr = __mem_read # # bwlq ( mem ) ; \
2005-04-16 15:20:36 -07:00
__addr + + ; \
} \
}
# define __BUILD_IOPORT_STRING(bwlq, type) \
\
2005-04-18 14:54:43 +00:00
static inline void outs # # bwlq ( unsigned long port , const void * addr , \
2005-04-16 15:20:36 -07:00
unsigned int count ) \
{ \
2005-04-18 14:54:43 +00:00
const volatile type * __addr = addr ; \
2005-04-16 15:20:36 -07:00
\
while ( count - - ) { \
2005-12-07 23:12:54 -05:00
__mem_out # # bwlq ( * __addr , port ) ; \
2005-04-16 15:20:36 -07:00
__addr + + ; \
} \
} \
\
static inline void ins # # bwlq ( unsigned long port , void * addr , \
unsigned int count ) \
{ \
volatile type * __addr = addr ; \
\
while ( count - - ) { \
2005-12-07 23:12:54 -05:00
* __addr = __mem_in # # bwlq ( port ) ; \
2005-04-16 15:20:36 -07:00
__addr + + ; \
} \
}
# define BUILDSTRING(bwlq, type) \
\
__BUILD_MEMORY_STRING ( bwlq , type ) \
__BUILD_IOPORT_STRING ( bwlq , type )
BUILDSTRING ( b , u8 )
BUILDSTRING ( w , u16 )
BUILDSTRING ( l , u32 )
2005-09-23 20:02:38 +00:00
# ifdef CONFIG_64BIT
2005-04-16 15:20:36 -07:00
BUILDSTRING ( q , u64 )
2005-09-23 20:02:38 +00:00
# endif
2005-04-16 15:20:36 -07:00
2008-12-11 15:33:29 -08:00
# ifdef CONFIG_CPU_CAVIUM_OCTEON
# define mmiowb() wmb()
# else
2005-04-16 15:20:36 -07:00
/* Depends on MIPS II instruction set */
# define mmiowb() asm volatile ("sync" ::: "memory")
2008-12-11 15:33:29 -08:00
# endif
2005-04-16 15:20:36 -07:00
2005-03-01 19:22:29 +00:00
static inline void memset_io ( volatile void __iomem * addr , unsigned char val , int count )
{
memset ( ( void __force * ) addr , val , count ) ;
}
static inline void memcpy_fromio ( void * dst , const volatile void __iomem * src , int count )
{
memcpy ( dst , ( void __force * ) src , count ) ;
}
static inline void memcpy_toio ( volatile void __iomem * dst , const void * src , int count )
{
memcpy ( ( void __force * ) dst , src , count ) ;
}
2005-04-16 15:20:36 -07:00
/*
* The caches on some architectures aren ' t dma - coherent and have need to
* handle this in software . There are three types of operations that
* can be applied to dma buffers .
*
* - dma_cache_wback_inv ( start , size ) makes caches and coherent by
* writing the content of the caches back to memory , if necessary .
* The function also invalidates the affected part of the caches as
* necessary before DMA transfers from outside to memory .
* - dma_cache_wback ( start , size ) makes caches and coherent by
* writing the content of the caches back to memory , if necessary .
* The function also invalidates the affected part of the caches as
* necessary before DMA transfers from outside to memory .
* - dma_cache_inv ( start , size ) invalidates the affected parts of the
* caches . Dirty lines of the caches may be written back or simply
* be discarded . This operation is necessary before dma operations
* to the memory .
2007-10-16 23:29:42 -07:00
*
* This API used to be exported ; it now is for arch code internal use only .
2005-04-16 15:20:36 -07:00
*/
2018-06-15 13:08:31 +02:00
# ifdef CONFIG_DMA_NONCOHERENT
2005-04-16 15:20:36 -07:00
extern void ( * _dma_cache_wback_inv ) ( unsigned long start , unsigned long size ) ;
extern void ( * _dma_cache_wback ) ( unsigned long start , unsigned long size ) ;
extern void ( * _dma_cache_inv ) ( unsigned long start , unsigned long size ) ;
2007-10-11 23:46:15 +01:00
# define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
# define dma_cache_wback(start, size) _dma_cache_wback(start, size)
# define dma_cache_inv(start, size) _dma_cache_inv(start, size)
2005-04-16 15:20:36 -07:00
# else /* Sane hardware */
2013-01-22 12:59:30 +01:00
# define dma_cache_wback_inv(start,size) \
2005-04-16 15:20:36 -07:00
do { ( void ) ( start ) ; ( void ) ( size ) ; } while ( 0 )
# define dma_cache_wback(start,size) \
do { ( void ) ( start ) ; ( void ) ( size ) ; } while ( 0 )
# define dma_cache_inv(start,size) \
do { ( void ) ( start ) ; ( void ) ( size ) ; } while ( 0 )
2018-06-15 13:08:31 +02:00
# endif /* CONFIG_DMA_NONCOHERENT */
2005-04-16 15:20:36 -07:00
/*
* Read a 32 - bit register that requires a 64 - bit read cycle on the bus .
* Avoid interrupt mucking , just adjust the address for 4 - byte access .
* Assume the addresses are 8 - byte aligned .
*/
# ifdef __MIPSEB__
# define __CSR_32_ADJUST 4
# else
# define __CSR_32_ADJUST 0
# endif
2007-10-11 23:46:15 +01:00
# define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
2005-04-16 15:20:36 -07:00
# define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
/*
* Convert a physical pointer to a virtual kernel pointer for / dev / mem
* access
*/
# define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
# define xlate_dev_kmem_ptr(p) p
2017-08-12 21:36:15 -07:00
void __ioread64_copy ( void * to , const void __iomem * from , size_t count ) ;
2005-04-16 15:20:36 -07:00
# endif /* _ASM_IO_H */