2005-04-16 15:20:36 -07:00
/*
* linux / arch / arm / mm / ioremap . c
*
* Re - map IO memory to kernel address space so that we can access it .
*
* ( C ) Copyright 1995 1996 Linus Torvalds
*
* Hacked for ARM by Phil Blundell < philb @ gnu . org >
* Hacked to allow all architectures to build , and various cleanups
* by Russell King
*
* This allows a driver to remap an arbitrary region of bus memory into
* virtual space . One should * only * use readl , writel , memcpy_toio and
* so on with such remapped areas .
*
* Because the ARM only has a 32 - bit address space we can ' t address the
* whole of the ( physical ) PCI space at once . PCI huge - mode addressing
* allows us to circumvent this restriction by splitting PCI space into
* two 2 GB chunks and mapping only one at a time into processor memory .
* We use MMU protection domains to trap any attempt to access the bank
* that is not currently mapped . ( This isn ' t fully implemented yet . )
*/
# include <linux/module.h>
# include <linux/errno.h>
# include <linux/mm.h>
# include <linux/vmalloc.h>
2008-09-06 12:10:45 +01:00
# include <linux/io.h>
2012-06-24 12:46:26 +01:00
# include <linux/sizes.h>
2005-04-16 15:20:36 -07:00
2012-03-28 18:30:01 +01:00
# include <asm/cp15.h>
2008-08-10 18:08:10 +01:00
# include <asm/cputype.h>
2005-04-16 15:20:36 -07:00
# include <asm/cacheflush.h>
2006-06-29 20:17:15 +01:00
# include <asm/mmu_context.h>
# include <asm/pgalloc.h>
2005-04-16 15:20:36 -07:00
# include <asm/tlbflush.h>
2012-03-28 18:30:01 +01:00
# include <asm/system_info.h>
2006-06-29 20:17:15 +01:00
2007-04-21 10:47:29 +01:00
# include <asm/mach/map.h>
2012-02-29 18:10:58 -06:00
# include <asm/mach/pci.h>
2007-04-21 10:47:29 +01:00
# include "mm.h"
2009-01-28 21:32:08 +02:00
int ioremap_page ( unsigned long virt , unsigned long phys ,
const struct mem_type * mtype )
{
2010-07-26 10:29:13 +01:00
return ioremap_page_range ( virt , virt + PAGE_SIZE , phys ,
__pgprot ( mtype - > prot_pte ) ) ;
2009-01-28 21:32:08 +02:00
}
EXPORT_SYMBOL ( ioremap_page ) ;
2006-06-29 20:17:15 +01:00
void __check_kvm_seq ( struct mm_struct * mm )
{
unsigned int seq ;
do {
seq = init_mm . context . kvm_seq ;
memcpy ( pgd_offset ( mm , VMALLOC_START ) ,
pgd_offset_k ( VMALLOC_START ) ,
sizeof ( pgd_t ) * ( pgd_index ( VMALLOC_END ) -
pgd_index ( VMALLOC_START ) ) ) ;
mm - > context . kvm_seq = seq ;
} while ( seq ! = init_mm . context . kvm_seq ) ;
}
2011-11-22 17:30:29 +00:00
# if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
2006-06-29 20:17:15 +01:00
/*
* Section support is unsafe on SMP - If you iounmap and ioremap a region ,
* the other CPUs will not see this change until their next context switch .
* Meanwhile , ( eg ) if an interrupt comes in on one of those other CPUs
* which requires the new ioremap ' d region to be referenced , the CPU will
* reference the _old_ region .
*
2009-12-18 11:10:03 +00:00
* Note that get_vm_area_caller ( ) allocates a guard 4 K page , so we need to
* mask the size back to 1 MB aligned or we will overflow in the loop below .
2006-06-29 20:17:15 +01:00
*/
static void unmap_area_sections ( unsigned long virt , unsigned long size )
{
2009-01-25 17:36:34 +00:00
unsigned long addr = virt , end = virt + ( size & ~ ( SZ_1M - 1 ) ) ;
2006-06-29 20:17:15 +01:00
pgd_t * pgd ;
2011-11-22 17:30:27 +00:00
pud_t * pud ;
pmd_t * pmdp ;
2006-06-29 20:17:15 +01:00
flush_cache_vunmap ( addr , end ) ;
pgd = pgd_offset_k ( addr ) ;
2011-11-22 17:30:27 +00:00
pud = pud_offset ( pgd , addr ) ;
pmdp = pmd_offset ( pud , addr ) ;
2006-06-29 20:17:15 +01:00
do {
2011-11-22 17:30:27 +00:00
pmd_t pmd = * pmdp ;
2006-06-29 20:17:15 +01:00
if ( ! pmd_none ( pmd ) ) {
/*
* Clear the PMD from the page table , and
* increment the kvm sequence so others
* notice this change .
*
* Note : this is still racy on SMP machines .
*/
pmd_clear ( pmdp ) ;
init_mm . context . kvm_seq + + ;
/*
* Free the page table , if there was one .
*/
if ( ( pmd_val ( pmd ) & PMD_TYPE_MASK ) = = PMD_TYPE_TABLE )
2008-02-04 22:29:14 -08:00
pte_free_kernel ( & init_mm , pmd_page_vaddr ( pmd ) ) ;
2006-06-29 20:17:15 +01:00
}
2011-11-22 17:30:27 +00:00
addr + = PMD_SIZE ;
pmdp + = 2 ;
2006-06-29 20:17:15 +01:00
} while ( addr < end ) ;
/*
* Ensure that the active_mm is up to date - we want to
* catch any use - after - iounmap cases .
*/
if ( current - > active_mm - > context . kvm_seq ! = init_mm . context . kvm_seq )
__check_kvm_seq ( current - > active_mm ) ;
flush_tlb_kernel_range ( virt , end ) ;
}
static int
remap_area_sections ( unsigned long virt , unsigned long pfn ,
2007-04-21 10:47:29 +01:00
size_t size , const struct mem_type * type )
2006-06-29 20:17:15 +01:00
{
2007-04-21 10:47:29 +01:00
unsigned long addr = virt , end = virt + size ;
2006-06-29 20:17:15 +01:00
pgd_t * pgd ;
2011-11-22 17:30:27 +00:00
pud_t * pud ;
pmd_t * pmd ;
2006-06-29 20:17:15 +01:00
/*
* Remove and free any PTE - based mapping , and
* sync the current kernel mapping .
*/
unmap_area_sections ( virt , size ) ;
pgd = pgd_offset_k ( addr ) ;
2011-11-22 17:30:27 +00:00
pud = pud_offset ( pgd , addr ) ;
pmd = pmd_offset ( pud , addr ) ;
2006-06-29 20:17:15 +01:00
do {
2007-04-21 10:47:29 +01:00
pmd [ 0 ] = __pmd ( __pfn_to_phys ( pfn ) | type - > prot_sect ) ;
2006-06-29 20:17:15 +01:00
pfn + = SZ_1M > > PAGE_SHIFT ;
2007-04-21 10:47:29 +01:00
pmd [ 1 ] = __pmd ( __pfn_to_phys ( pfn ) | type - > prot_sect ) ;
2006-06-29 20:17:15 +01:00
pfn + = SZ_1M > > PAGE_SHIFT ;
flush_pmd_entry ( pmd ) ;
2011-11-22 17:30:27 +00:00
addr + = PMD_SIZE ;
pmd + = 2 ;
2006-06-29 20:17:15 +01:00
} while ( addr < end ) ;
return 0 ;
}
2006-07-01 19:58:20 +01:00
static int
remap_area_supersections ( unsigned long virt , unsigned long pfn ,
2007-04-21 10:47:29 +01:00
size_t size , const struct mem_type * type )
2006-07-01 19:58:20 +01:00
{
2007-04-21 10:47:29 +01:00
unsigned long addr = virt , end = virt + size ;
2006-07-01 19:58:20 +01:00
pgd_t * pgd ;
2011-11-22 17:30:27 +00:00
pud_t * pud ;
pmd_t * pmd ;
2006-07-01 19:58:20 +01:00
/*
* Remove and free any PTE - based mapping , and
* sync the current kernel mapping .
*/
unmap_area_sections ( virt , size ) ;
pgd = pgd_offset_k ( virt ) ;
2011-11-22 17:30:27 +00:00
pud = pud_offset ( pgd , addr ) ;
pmd = pmd_offset ( pud , addr ) ;
2006-07-01 19:58:20 +01:00
do {
unsigned long super_pmd_val , i ;
2007-04-21 10:47:29 +01:00
super_pmd_val = __pfn_to_phys ( pfn ) | type - > prot_sect |
PMD_SECT_SUPER ;
2006-07-01 19:58:20 +01:00
super_pmd_val | = ( ( pfn > > ( 32 - PAGE_SHIFT ) ) & 0xf ) < < 20 ;
for ( i = 0 ; i < 8 ; i + + ) {
pmd [ 0 ] = __pmd ( super_pmd_val ) ;
pmd [ 1 ] = __pmd ( super_pmd_val ) ;
flush_pmd_entry ( pmd ) ;
2011-11-22 17:30:27 +00:00
addr + = PMD_SIZE ;
pmd + = 2 ;
2006-07-01 19:58:20 +01:00
}
pfn + = SUPERSECTION_SIZE > > PAGE_SHIFT ;
} while ( addr < end ) ;
return 0 ;
}
2006-06-29 20:17:15 +01:00
# endif
2009-12-18 11:10:03 +00:00
void __iomem * __arm_ioremap_pfn_caller ( unsigned long pfn ,
unsigned long offset , size_t size , unsigned int mtype , void * caller )
2006-01-09 19:23:11 +00:00
{
2007-04-21 10:47:29 +01:00
const struct mem_type * type ;
2006-06-29 20:17:15 +01:00
int err ;
2006-01-09 19:23:11 +00:00
unsigned long addr ;
struct vm_struct * area ;
2006-07-01 19:58:20 +01:00
2011-11-22 17:30:29 +00:00
# ifndef CONFIG_ARM_LPAE
2006-07-01 19:58:20 +01:00
/*
* High mappings must be supersection aligned
*/
if ( pfn > = 0x100000 & & ( __pfn_to_phys ( pfn ) & ~ SUPERSECTION_MASK ) )
return NULL ;
2011-11-22 17:30:29 +00:00
# endif
2006-01-09 19:23:11 +00:00
2007-05-05 20:59:27 +01:00
type = get_mem_type ( mtype ) ;
if ( ! type )
return NULL ;
2007-04-21 10:47:29 +01:00
2007-06-03 19:26:04 +01:00
/*
* Page align the mapping size , taking account of any offset .
*/
size = PAGE_ALIGN ( offset + size ) ;
2006-12-17 23:29:57 +00:00
2011-09-16 01:14:23 -04:00
/*
* Try to reuse one of the static mapping whenever possible .
*/
read_lock ( & vmlist_lock ) ;
for ( area = vmlist ; area ; area = area - > next ) {
if ( ! size | | ( sizeof ( phys_addr_t ) = = 4 & & pfn > = 0x100000 ) )
break ;
if ( ! ( area - > flags & VM_ARM_STATIC_MAPPING ) )
continue ;
if ( ( area - > flags & VM_ARM_MTYPE_MASK ) ! = VM_ARM_MTYPE ( mtype ) )
continue ;
if ( __phys_to_pfn ( area - > phys_addr ) > pfn | |
2012-01-29 14:55:21 +00:00
__pfn_to_phys ( pfn ) + size - 1 > area - > phys_addr + area - > size - 1 )
2011-09-16 01:14:23 -04:00
continue ;
/* we can drop the lock here as we know *area is static */
read_unlock ( & vmlist_lock ) ;
addr = ( unsigned long ) area - > addr ;
addr + = __pfn_to_phys ( pfn ) - area - > phys_addr ;
return ( void __iomem * ) ( offset + addr ) ;
}
read_unlock ( & vmlist_lock ) ;
/*
* Don ' t allow RAM to be mapped - this causes problems with ARMv6 +
*/
if ( WARN_ON ( pfn_valid ( pfn ) ) )
return NULL ;
2009-12-18 11:10:03 +00:00
area = get_vm_area_caller ( size , VM_IOREMAP , caller ) ;
2006-01-09 19:23:11 +00:00
if ( ! area )
return NULL ;
addr = ( unsigned long ) area - > addr ;
2012-09-28 13:48:48 +01:00
area - > phys_addr = __pfn_to_phys ( pfn ) ;
2006-06-29 20:17:15 +01:00
2011-11-22 17:30:29 +00:00
# if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
2007-01-25 14:16:47 +01:00
if ( DOMAIN_IO = = 0 & &
( ( ( cpu_architecture ( ) > = CPU_ARCH_ARMv6 ) & & ( get_cr ( ) & CR_XP ) ) | |
2007-04-21 10:16:48 +01:00
cpu_is_xsc3 ( ) ) & & pfn > = 0x100000 & &
2006-07-01 19:58:20 +01:00
! ( ( __pfn_to_phys ( pfn ) | size | addr ) & ~ SUPERSECTION_MASK ) ) {
area - > flags | = VM_ARM_SECTION_MAPPING ;
2007-04-21 10:47:29 +01:00
err = remap_area_supersections ( addr , pfn , size , type ) ;
2006-07-01 19:58:20 +01:00
} else if ( ! ( ( __pfn_to_phys ( pfn ) | size | addr ) & ~ PMD_MASK ) ) {
2006-06-29 20:17:15 +01:00
area - > flags | = VM_ARM_SECTION_MAPPING ;
2007-04-21 10:47:29 +01:00
err = remap_area_sections ( addr , pfn , size , type ) ;
2006-06-29 20:17:15 +01:00
} else
# endif
2010-07-26 10:29:13 +01:00
err = ioremap_page_range ( addr , addr + size , __pfn_to_phys ( pfn ) ,
__pgprot ( type - > prot_pte ) ) ;
2006-06-29 20:17:15 +01:00
if ( err ) {
2006-05-16 11:30:26 +01:00
vunmap ( ( void * ) addr ) ;
2006-01-09 19:23:11 +00:00
return NULL ;
}
2006-06-29 20:17:15 +01:00
flush_cache_vmap ( addr , addr + size ) ;
return ( void __iomem * ) ( offset + addr ) ;
2006-01-09 19:23:11 +00:00
}
2009-12-18 11:10:03 +00:00
void __iomem * __arm_ioremap_caller ( unsigned long phys_addr , size_t size ,
unsigned int mtype , void * caller )
2005-04-16 15:20:36 -07:00
{
2006-01-09 19:23:11 +00:00
unsigned long last_addr ;
unsigned long offset = phys_addr & ~ PAGE_MASK ;
unsigned long pfn = __phys_to_pfn ( phys_addr ) ;
2005-04-16 15:20:36 -07:00
2006-01-09 19:23:11 +00:00
/*
* Don ' t allow wraparound or zero size
*/
2005-04-16 15:20:36 -07:00
last_addr = phys_addr + size - 1 ;
if ( ! size | | last_addr < phys_addr )
return NULL ;
2009-12-18 11:10:03 +00:00
return __arm_ioremap_pfn_caller ( pfn , offset , size , mtype ,
caller ) ;
}
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space . Needed when the kernel wants to access high addresses
* directly .
*
* NOTE ! We need to allow non - page - aligned mappings too : we will obviously
* have to convert them into an offset in a page - aligned mapping , but the
* caller shouldn ' t need to know that small detail .
*/
void __iomem *
__arm_ioremap_pfn ( unsigned long pfn , unsigned long offset , size_t size ,
unsigned int mtype )
{
return __arm_ioremap_pfn_caller ( pfn , offset , size , mtype ,
__builtin_return_address ( 0 ) ) ;
}
EXPORT_SYMBOL ( __arm_ioremap_pfn ) ;
2012-02-10 17:05:13 -06:00
void __iomem * ( * arch_ioremap_caller ) ( unsigned long , size_t ,
unsigned int , void * ) =
__arm_ioremap_caller ;
2009-12-18 11:10:03 +00:00
void __iomem *
__arm_ioremap ( unsigned long phys_addr , size_t size , unsigned int mtype )
{
2012-02-10 17:05:13 -06:00
return arch_ioremap_caller ( phys_addr , size , mtype ,
__builtin_return_address ( 0 ) ) ;
2005-04-16 15:20:36 -07:00
}
2007-05-05 20:59:27 +01:00
EXPORT_SYMBOL ( __arm_ioremap ) ;
2005-04-16 15:20:36 -07:00
2011-10-12 01:02:50 +01:00
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space as memory . Needed when the kernel wants to execute
* code in external memory . This is needed for reprogramming source
* clocks that would affect normal memory for example . Please see
* CONFIG_GENERIC_ALLOCATOR for allocating external memory .
*/
void __iomem *
__arm_ioremap_exec ( unsigned long phys_addr , size_t size , bool cached )
{
unsigned int mtype ;
if ( cached )
mtype = MT_MEMORY ;
else
mtype = MT_MEMORY_NONCACHED ;
return __arm_ioremap_caller ( phys_addr , size , mtype ,
__builtin_return_address ( 0 ) ) ;
}
2008-09-05 14:08:44 +01:00
void __iounmap ( volatile void __iomem * io_addr )
2005-04-16 15:20:36 -07:00
{
2008-09-05 14:08:44 +01:00
void * addr = ( void * ) ( PAGE_MASK & ( unsigned long ) io_addr ) ;
2011-09-15 22:12:19 -04:00
struct vm_struct * vm ;
2006-06-29 20:17:15 +01:00
2011-09-15 22:12:19 -04:00
read_lock ( & vmlist_lock ) ;
for ( vm = vmlist ; vm ; vm = vm - > next ) {
2011-09-16 01:14:23 -04:00
if ( vm - > addr > addr )
2006-06-29 20:17:15 +01:00
break ;
2011-09-16 01:14:23 -04:00
if ( ! ( vm - > flags & VM_IOREMAP ) )
continue ;
/* If this is a static mapping we must leave it alone */
if ( ( vm - > flags & VM_ARM_STATIC_MAPPING ) & &
( vm - > addr < = addr ) & & ( vm - > addr + vm - > size > addr ) ) {
read_unlock ( & vmlist_lock ) ;
return ;
2006-06-29 20:17:15 +01:00
}
2011-12-08 18:02:04 +00:00
# if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
2011-09-16 01:14:23 -04:00
/*
* If this is a section based mapping we need to handle it
* specially as the VM subsystem does not know how to handle
* such a beast .
*/
if ( ( vm - > addr = = addr ) & &
( vm - > flags & VM_ARM_SECTION_MAPPING ) ) {
unmap_area_sections ( ( unsigned long ) vm - > addr , vm - > size ) ;
break ;
}
# endif
2006-06-29 20:17:15 +01:00
}
2011-09-15 22:12:19 -04:00
read_unlock ( & vmlist_lock ) ;
2006-06-29 20:17:15 +01:00
2009-01-25 17:36:34 +00:00
vunmap ( addr ) ;
2005-04-16 15:20:36 -07:00
}
2012-02-10 17:05:13 -06:00
void ( * arch_iounmap ) ( volatile void __iomem * ) = __iounmap ;
void __arm_iounmap ( volatile void __iomem * io_addr )
{
arch_iounmap ( io_addr ) ;
}
EXPORT_SYMBOL ( __arm_iounmap ) ;
2012-02-29 18:10:58 -06:00
# ifdef CONFIG_PCI
int pci_ioremap_io ( unsigned int offset , phys_addr_t phys_addr )
{
BUG_ON ( offset + SZ_64K > IO_SPACE_LIMIT ) ;
return ioremap_page_range ( PCI_IO_VIRT_BASE + offset ,
PCI_IO_VIRT_BASE + offset + SZ_64K ,
phys_addr ,
__pgprot ( get_mem_type ( MT_DEVICE ) - > prot_pte ) ) ;
}
EXPORT_SYMBOL_GPL ( pci_ioremap_io ) ;
# endif