2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / mm / ioremap . c
*
* Re - map IO memory to kernel address space so that we can access it .
*
* ( C ) Copyright 1995 1996 Linus Torvalds
*
* Hacked for ARM by Phil Blundell < philb @ gnu . org >
* Hacked to allow all architectures to build , and various cleanups
* by Russell King
*
* This allows a driver to remap an arbitrary region of bus memory into
* virtual space . One should * only * use readl , writel , memcpy_toio and
* so on with such remapped areas .
*
* Because the ARM only has a 32 - bit address space we can ' t address the
* whole of the ( physical ) PCI space at once . PCI huge - mode addressing
* allows us to circumvent this restriction by splitting PCI space into
* two 2 GB chunks and mapping only one at a time into processor memory .
* We use MMU protection domains to trap any attempt to access the bank
* that is not currently mapped . ( This isn ' t fully implemented yet . )
*/
# include <linux/module.h>
# include <linux/errno.h>
# include <linux/mm.h>
# include <linux/vmalloc.h>
2008-08-10 21:08:10 +04:00
# include <asm/cputype.h>
2005-04-17 02:20:36 +04:00
# include <asm/cacheflush.h>
# include <asm/io.h>
2006-06-29 23:17:15 +04:00
# include <asm/mmu_context.h>
# include <asm/pgalloc.h>
2005-04-17 02:20:36 +04:00
# include <asm/tlbflush.h>
2006-06-29 23:17:15 +04:00
# include <asm/sizes.h>
2007-04-21 13:47:29 +04:00
# include <asm/mach/map.h>
# include "mm.h"
2006-06-29 23:17:15 +04:00
/*
2006-07-01 22:58:20 +04:00
* Used by ioremap ( ) and iounmap ( ) code to mark ( super ) section - mapped
* I / O regions in vm_struct - > flags field .
2006-06-29 23:17:15 +04:00
*/
# define VM_ARM_SECTION_MAPPING 0x80000000
2005-04-17 02:20:36 +04:00
2006-12-13 17:35:58 +03:00
static int remap_area_pte ( pmd_t * pmd , unsigned long addr , unsigned long end ,
2007-04-21 13:47:29 +04:00
unsigned long phys_addr , const struct mem_type * type )
2005-04-17 02:20:36 +04:00
{
2007-04-21 13:47:29 +04:00
pgprot_t prot = __pgprot ( type - > prot_pte ) ;
2006-12-13 17:35:58 +03:00
pte_t * pte ;
pte = pte_alloc_kernel ( pmd , addr ) ;
if ( ! pte )
return - ENOMEM ;
2005-04-17 02:20:36 +04:00
do {
if ( ! pte_none ( * pte ) )
goto bad ;
2007-04-21 13:52:32 +04:00
set_pte_ext ( pte , pfn_pte ( phys_addr > > PAGE_SHIFT , prot ) ,
type - > prot_pte_ext ) ;
2005-04-17 02:20:36 +04:00
phys_addr + = PAGE_SIZE ;
2006-12-13 17:35:58 +03:00
} while ( pte + + , addr + = PAGE_SIZE , addr ! = end ) ;
return 0 ;
2005-04-17 02:20:36 +04:00
bad :
2006-12-13 17:35:58 +03:00
printk ( KERN_CRIT " remap_area_pte: page already exists \n " ) ;
2005-04-17 02:20:36 +04:00
BUG ( ) ;
}
2006-12-13 17:35:58 +03:00
static inline int remap_area_pmd ( pgd_t * pgd , unsigned long addr ,
unsigned long end , unsigned long phys_addr ,
2007-04-21 13:47:29 +04:00
const struct mem_type * type )
2005-04-17 02:20:36 +04:00
{
2006-12-13 17:35:58 +03:00
unsigned long next ;
pmd_t * pmd ;
int ret = 0 ;
2005-04-17 02:20:36 +04:00
2006-12-13 17:35:58 +03:00
pmd = pmd_alloc ( & init_mm , pgd , addr ) ;
if ( ! pmd )
return - ENOMEM ;
2005-04-17 02:20:36 +04:00
do {
2006-12-13 17:35:58 +03:00
next = pmd_addr_end ( addr , end ) ;
2007-04-21 13:47:29 +04:00
ret = remap_area_pte ( pmd , addr , next , phys_addr , type ) ;
2006-12-13 17:35:58 +03:00
if ( ret )
return ret ;
phys_addr + = next - addr ;
} while ( pmd + + , addr = next , addr ! = end ) ;
return ret ;
2005-04-17 02:20:36 +04:00
}
2006-12-13 17:35:58 +03:00
static int remap_area_pages ( unsigned long start , unsigned long pfn ,
2007-04-21 13:47:29 +04:00
size_t size , const struct mem_type * type )
2005-04-17 02:20:36 +04:00
{
2006-12-13 17:35:58 +03:00
unsigned long addr = start ;
unsigned long next , end = start + size ;
2006-01-09 22:23:11 +03:00
unsigned long phys_addr = __pfn_to_phys ( pfn ) ;
2006-12-13 17:35:58 +03:00
pgd_t * pgd ;
2005-04-17 02:20:36 +04:00
int err = 0 ;
2006-12-13 17:35:58 +03:00
BUG_ON ( addr > = end ) ;
pgd = pgd_offset_k ( addr ) ;
2005-04-17 02:20:36 +04:00
do {
2006-12-13 17:35:58 +03:00
next = pgd_addr_end ( addr , end ) ;
2007-04-21 13:47:29 +04:00
err = remap_area_pmd ( pgd , addr , next , phys_addr , type ) ;
2006-12-13 17:35:58 +03:00
if ( err )
2005-04-17 02:20:36 +04:00
break ;
2006-12-13 17:35:58 +03:00
phys_addr + = next - addr ;
} while ( pgd + + , addr = next , addr ! = end ) ;
2005-04-17 02:20:36 +04:00
return err ;
}
2006-06-29 23:17:15 +04:00
void __check_kvm_seq ( struct mm_struct * mm )
{
unsigned int seq ;
do {
seq = init_mm . context . kvm_seq ;
memcpy ( pgd_offset ( mm , VMALLOC_START ) ,
pgd_offset_k ( VMALLOC_START ) ,
sizeof ( pgd_t ) * ( pgd_index ( VMALLOC_END ) -
pgd_index ( VMALLOC_START ) ) ) ;
mm - > context . kvm_seq = seq ;
} while ( seq ! = init_mm . context . kvm_seq ) ;
}
# ifndef CONFIG_SMP
/*
* Section support is unsafe on SMP - If you iounmap and ioremap a region ,
* the other CPUs will not see this change until their next context switch .
* Meanwhile , ( eg ) if an interrupt comes in on one of those other CPUs
* which requires the new ioremap ' d region to be referenced , the CPU will
* reference the _old_ region .
*
* Note that get_vm_area ( ) allocates a guard 4 K page , so we need to mask
* the size back to 1 MB aligned or we will overflow in the loop below .
*/
static void unmap_area_sections ( unsigned long virt , unsigned long size )
{
unsigned long addr = virt , end = virt + ( size & ~ SZ_1M ) ;
pgd_t * pgd ;
flush_cache_vunmap ( addr , end ) ;
pgd = pgd_offset_k ( addr ) ;
do {
pmd_t pmd , * pmdp = pmd_offset ( pgd , addr ) ;
pmd = * pmdp ;
if ( ! pmd_none ( pmd ) ) {
/*
* Clear the PMD from the page table , and
* increment the kvm sequence so others
* notice this change .
*
* Note : this is still racy on SMP machines .
*/
pmd_clear ( pmdp ) ;
init_mm . context . kvm_seq + + ;
/*
* Free the page table , if there was one .
*/
if ( ( pmd_val ( pmd ) & PMD_TYPE_MASK ) = = PMD_TYPE_TABLE )
2008-02-05 09:29:14 +03:00
pte_free_kernel ( & init_mm , pmd_page_vaddr ( pmd ) ) ;
2006-06-29 23:17:15 +04:00
}
addr + = PGDIR_SIZE ;
pgd + + ;
} while ( addr < end ) ;
/*
* Ensure that the active_mm is up to date - we want to
* catch any use - after - iounmap cases .
*/
if ( current - > active_mm - > context . kvm_seq ! = init_mm . context . kvm_seq )
__check_kvm_seq ( current - > active_mm ) ;
flush_tlb_kernel_range ( virt , end ) ;
}
static int
remap_area_sections ( unsigned long virt , unsigned long pfn ,
2007-04-21 13:47:29 +04:00
size_t size , const struct mem_type * type )
2006-06-29 23:17:15 +04:00
{
2007-04-21 13:47:29 +04:00
unsigned long addr = virt , end = virt + size ;
2006-06-29 23:17:15 +04:00
pgd_t * pgd ;
/*
* Remove and free any PTE - based mapping , and
* sync the current kernel mapping .
*/
unmap_area_sections ( virt , size ) ;
pgd = pgd_offset_k ( addr ) ;
do {
pmd_t * pmd = pmd_offset ( pgd , addr ) ;
2007-04-21 13:47:29 +04:00
pmd [ 0 ] = __pmd ( __pfn_to_phys ( pfn ) | type - > prot_sect ) ;
2006-06-29 23:17:15 +04:00
pfn + = SZ_1M > > PAGE_SHIFT ;
2007-04-21 13:47:29 +04:00
pmd [ 1 ] = __pmd ( __pfn_to_phys ( pfn ) | type - > prot_sect ) ;
2006-06-29 23:17:15 +04:00
pfn + = SZ_1M > > PAGE_SHIFT ;
flush_pmd_entry ( pmd ) ;
addr + = PGDIR_SIZE ;
pgd + + ;
} while ( addr < end ) ;
return 0 ;
}
2006-07-01 22:58:20 +04:00
static int
remap_area_supersections ( unsigned long virt , unsigned long pfn ,
2007-04-21 13:47:29 +04:00
size_t size , const struct mem_type * type )
2006-07-01 22:58:20 +04:00
{
2007-04-21 13:47:29 +04:00
unsigned long addr = virt , end = virt + size ;
2006-07-01 22:58:20 +04:00
pgd_t * pgd ;
/*
* Remove and free any PTE - based mapping , and
* sync the current kernel mapping .
*/
unmap_area_sections ( virt , size ) ;
pgd = pgd_offset_k ( virt ) ;
do {
unsigned long super_pmd_val , i ;
2007-04-21 13:47:29 +04:00
super_pmd_val = __pfn_to_phys ( pfn ) | type - > prot_sect |
PMD_SECT_SUPER ;
2006-07-01 22:58:20 +04:00
super_pmd_val | = ( ( pfn > > ( 32 - PAGE_SHIFT ) ) & 0xf ) < < 20 ;
for ( i = 0 ; i < 8 ; i + + ) {
pmd_t * pmd = pmd_offset ( pgd , addr ) ;
pmd [ 0 ] = __pmd ( super_pmd_val ) ;
pmd [ 1 ] = __pmd ( super_pmd_val ) ;
flush_pmd_entry ( pmd ) ;
addr + = PGDIR_SIZE ;
pgd + + ;
}
pfn + = SUPERSECTION_SIZE > > PAGE_SHIFT ;
} while ( addr < end ) ;
return 0 ;
}
2006-06-29 23:17:15 +04:00
# endif
2005-04-17 02:20:36 +04:00
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space . Needed when the kernel wants to access high addresses
* directly .
*
* NOTE ! We need to allow non - page - aligned mappings too : we will obviously
* have to convert them into an offset in a page - aligned mapping , but the
* caller shouldn ' t need to know that small detail .
*
* ' flags ' are the extra L_PTE_ flags that you want to specify for this
2008-08-02 13:55:55 +04:00
* mapping . See < asm / pgtable . h > for more information .
2005-04-17 02:20:36 +04:00
*/
2006-01-09 22:23:11 +03:00
void __iomem *
2007-05-05 23:59:27 +04:00
__arm_ioremap_pfn ( unsigned long pfn , unsigned long offset , size_t size ,
unsigned int mtype )
2006-01-09 22:23:11 +03:00
{
2007-04-21 13:47:29 +04:00
const struct mem_type * type ;
2006-06-29 23:17:15 +04:00
int err ;
2006-01-09 22:23:11 +03:00
unsigned long addr ;
struct vm_struct * area ;
2006-07-01 22:58:20 +04:00
/*
* High mappings must be supersection aligned
*/
if ( pfn > = 0x100000 & & ( __pfn_to_phys ( pfn ) & ~ SUPERSECTION_MASK ) )
return NULL ;
2006-01-09 22:23:11 +03:00
2007-05-05 23:59:27 +04:00
type = get_mem_type ( mtype ) ;
if ( ! type )
return NULL ;
2007-04-21 13:47:29 +04:00
2007-06-03 22:26:04 +04:00
/*
* Page align the mapping size , taking account of any offset .
*/
size = PAGE_ALIGN ( offset + size ) ;
2006-12-18 02:29:57 +03:00
2006-01-09 22:23:11 +03:00
area = get_vm_area ( size , VM_IOREMAP ) ;
if ( ! area )
return NULL ;
addr = ( unsigned long ) area - > addr ;
2006-06-29 23:17:15 +04:00
# ifndef CONFIG_SMP
2007-01-25 16:16:47 +03:00
if ( DOMAIN_IO = = 0 & &
( ( ( cpu_architecture ( ) > = CPU_ARCH_ARMv6 ) & & ( get_cr ( ) & CR_XP ) ) | |
2007-04-21 13:16:48 +04:00
cpu_is_xsc3 ( ) ) & & pfn > = 0x100000 & &
2006-07-01 22:58:20 +04:00
! ( ( __pfn_to_phys ( pfn ) | size | addr ) & ~ SUPERSECTION_MASK ) ) {
area - > flags | = VM_ARM_SECTION_MAPPING ;
2007-04-21 13:47:29 +04:00
err = remap_area_supersections ( addr , pfn , size , type ) ;
2006-07-01 22:58:20 +04:00
} else if ( ! ( ( __pfn_to_phys ( pfn ) | size | addr ) & ~ PMD_MASK ) ) {
2006-06-29 23:17:15 +04:00
area - > flags | = VM_ARM_SECTION_MAPPING ;
2007-04-21 13:47:29 +04:00
err = remap_area_sections ( addr , pfn , size , type ) ;
2006-06-29 23:17:15 +04:00
} else
# endif
2007-04-21 13:47:29 +04:00
err = remap_area_pages ( addr , pfn , size , type ) ;
2006-06-29 23:17:15 +04:00
if ( err ) {
2006-05-16 14:30:26 +04:00
vunmap ( ( void * ) addr ) ;
2006-01-09 22:23:11 +03:00
return NULL ;
}
2006-06-29 23:17:15 +04:00
flush_cache_vmap ( addr , addr + size ) ;
return ( void __iomem * ) ( offset + addr ) ;
2006-01-09 22:23:11 +03:00
}
2007-05-05 23:59:27 +04:00
EXPORT_SYMBOL ( __arm_ioremap_pfn ) ;
2006-01-09 22:23:11 +03:00
2005-04-17 02:20:36 +04:00
void __iomem *
2007-05-05 23:59:27 +04:00
__arm_ioremap ( unsigned long phys_addr , size_t size , unsigned int mtype )
2005-04-17 02:20:36 +04:00
{
2006-01-09 22:23:11 +03:00
unsigned long last_addr ;
unsigned long offset = phys_addr & ~ PAGE_MASK ;
unsigned long pfn = __phys_to_pfn ( phys_addr ) ;
2005-04-17 02:20:36 +04:00
2006-01-09 22:23:11 +03:00
/*
* Don ' t allow wraparound or zero size
*/
2005-04-17 02:20:36 +04:00
last_addr = phys_addr + size - 1 ;
if ( ! size | | last_addr < phys_addr )
return NULL ;
2007-05-05 23:59:27 +04:00
return __arm_ioremap_pfn ( pfn , offset , size , mtype ) ;
2005-04-17 02:20:36 +04:00
}
2007-05-05 23:59:27 +04:00
EXPORT_SYMBOL ( __arm_ioremap ) ;
2005-04-17 02:20:36 +04:00
2008-09-05 17:08:44 +04:00
void __iounmap ( volatile void __iomem * io_addr )
2005-04-17 02:20:36 +04:00
{
2008-09-05 17:08:44 +04:00
void * addr = ( void * ) ( PAGE_MASK & ( unsigned long ) io_addr ) ;
2006-07-29 11:29:30 +04:00
# ifndef CONFIG_SMP
2006-06-29 23:17:15 +04:00
struct vm_struct * * p , * tmp ;
2006-07-29 11:29:30 +04:00
# endif
2006-06-29 23:17:15 +04:00
unsigned int section_mapping = 0 ;
2006-07-03 15:26:02 +04:00
# ifndef CONFIG_SMP
2006-06-29 23:17:15 +04:00
/*
* If this is a section based mapping we need to handle it
2007-05-11 23:40:30 +04:00
* specially as the VM subsystem does not know how to handle
2006-06-29 23:17:15 +04:00
* such a beast . We need the lock here b / c we need to clear
* all the mappings before the area can be reclaimed
* by someone else .
*/
write_lock ( & vmlist_lock ) ;
for ( p = & vmlist ; ( tmp = * p ) ; p = & tmp - > next ) {
2008-09-05 17:08:44 +04:00
if ( ( tmp - > flags & VM_IOREMAP ) & & ( tmp - > addr = = addr ) ) {
2006-06-29 23:17:15 +04:00
if ( tmp - > flags & VM_ARM_SECTION_MAPPING ) {
* p = tmp - > next ;
unmap_area_sections ( ( unsigned long ) tmp - > addr ,
tmp - > size ) ;
kfree ( tmp ) ;
section_mapping = 1 ;
}
break ;
}
}
write_unlock ( & vmlist_lock ) ;
2006-07-03 15:26:02 +04:00
# endif
2006-06-29 23:17:15 +04:00
if ( ! section_mapping )
2008-09-05 17:08:44 +04:00
vunmap ( addr ) ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( __iounmap ) ;