2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / mm / mm - armv . c
*
2005-10-28 17:48:37 +04:00
* Copyright ( C ) 1998 - 2005 Russell King
2005-04-17 02:20:36 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* Page table sludge for ARM v3 and v4 processor architectures .
*/
# include <linux/module.h>
# include <linux/mm.h>
# include <linux/init.h>
# include <linux/bootmem.h>
# include <linux/highmem.h>
# include <linux/nodemask.h>
# include <asm/pgalloc.h>
# include <asm/page.h>
# include <asm/setup.h>
# include <asm/tlbflush.h>
# include <asm/mach/map.h>
# define CPOLICY_UNCACHED 0
# define CPOLICY_BUFFERED 1
# define CPOLICY_WRITETHROUGH 2
# define CPOLICY_WRITEBACK 3
# define CPOLICY_WRITEALLOC 4
static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK ;
static unsigned int ecc_mask __initdata = 0 ;
pgprot_t pgprot_kernel ;
EXPORT_SYMBOL ( pgprot_kernel ) ;
2005-05-10 13:40:19 +04:00
pmd_t * top_pmd ;
2005-04-17 02:20:36 +04:00
struct cachepolicy {
const char policy [ 16 ] ;
unsigned int cr_mask ;
unsigned int pmd ;
unsigned int pte ;
} ;
static struct cachepolicy cache_policies [ ] __initdata = {
{
. policy = " uncached " ,
. cr_mask = CR_W | CR_C ,
. pmd = PMD_SECT_UNCACHED ,
. pte = 0 ,
} , {
. policy = " buffered " ,
. cr_mask = CR_C ,
. pmd = PMD_SECT_BUFFERED ,
. pte = PTE_BUFFERABLE ,
} , {
. policy = " writethrough " ,
. cr_mask = 0 ,
. pmd = PMD_SECT_WT ,
. pte = PTE_CACHEABLE ,
} , {
. policy = " writeback " ,
. cr_mask = 0 ,
. pmd = PMD_SECT_WB ,
. pte = PTE_BUFFERABLE | PTE_CACHEABLE ,
} , {
. policy = " writealloc " ,
. cr_mask = 0 ,
. pmd = PMD_SECT_WBWA ,
. pte = PTE_BUFFERABLE | PTE_CACHEABLE ,
}
} ;
/*
* These are useful for identifing cache coherency
* problems by allowing the cache or the cache and
* writebuffer to be turned off . ( Note : the write
* buffer should not be on and the cache off ) .
*/
static void __init early_cachepolicy ( char * * p )
{
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( cache_policies ) ; i + + ) {
int len = strlen ( cache_policies [ i ] . policy ) ;
if ( memcmp ( * p , cache_policies [ i ] . policy , len ) = = 0 ) {
cachepolicy = i ;
cr_alignment & = ~ cache_policies [ i ] . cr_mask ;
cr_no_alignment & = ~ cache_policies [ i ] . cr_mask ;
* p + = len ;
break ;
}
}
if ( i = = ARRAY_SIZE ( cache_policies ) )
printk ( KERN_ERR " ERROR: unknown or unsupported cache policy \n " ) ;
flush_cache_all ( ) ;
set_cr ( cr_alignment ) ;
}
static void __init early_nocache ( char * * __unused )
{
char * p = " buffered " ;
printk ( KERN_WARNING " nocache is deprecated; use cachepolicy=%s \n " , p ) ;
early_cachepolicy ( & p ) ;
}
static void __init early_nowrite ( char * * __unused )
{
char * p = " uncached " ;
printk ( KERN_WARNING " nowb is deprecated; use cachepolicy=%s \n " , p ) ;
early_cachepolicy ( & p ) ;
}
static void __init early_ecc ( char * * p )
{
if ( memcmp ( * p , " on " , 2 ) = = 0 ) {
ecc_mask = PMD_PROTECTION ;
* p + = 2 ;
} else if ( memcmp ( * p , " off " , 3 ) = = 0 ) {
ecc_mask = 0 ;
* p + = 3 ;
}
}
__early_param ( " nocache " , early_nocache ) ;
__early_param ( " nowb " , early_nowrite ) ;
__early_param ( " cachepolicy= " , early_cachepolicy ) ;
__early_param ( " ecc= " , early_ecc ) ;
static int __init noalign_setup ( char * __unused )
{
cr_alignment & = ~ CR_A ;
cr_no_alignment & = ~ CR_A ;
set_cr ( cr_alignment ) ;
return 1 ;
}
__setup ( " noalign " , noalign_setup ) ;
# define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
2005-05-09 23:52:51 +04:00
static inline pmd_t * pmd_off ( pgd_t * pgd , unsigned long virt )
{
return pmd_offset ( pgd , virt ) ;
}
static inline pmd_t * pmd_off_k ( unsigned long virt )
{
return pmd_off ( pgd_offset_k ( virt ) , virt ) ;
}
2005-04-17 02:20:36 +04:00
/*
* need to get a 16 k page for level 1
*/
pgd_t * get_pgd_slow ( struct mm_struct * mm )
{
pgd_t * new_pgd , * init_pgd ;
pmd_t * new_pmd , * init_pmd ;
pte_t * new_pte , * init_pte ;
new_pgd = ( pgd_t * ) __get_free_pages ( GFP_KERNEL , 2 ) ;
if ( ! new_pgd )
goto no_pgd ;
memzero ( new_pgd , FIRST_KERNEL_PGD_NR * sizeof ( pgd_t ) ) ;
2005-06-27 17:08:56 +04:00
/*
* Copy over the kernel and IO PGD entries
*/
2005-04-17 02:20:36 +04:00
init_pgd = pgd_offset_k ( 0 ) ;
2005-06-27 17:08:56 +04:00
memcpy ( new_pgd + FIRST_KERNEL_PGD_NR , init_pgd + FIRST_KERNEL_PGD_NR ,
( PTRS_PER_PGD - FIRST_KERNEL_PGD_NR ) * sizeof ( pgd_t ) ) ;
clean_dcache_area ( new_pgd , PTRS_PER_PGD * sizeof ( pgd_t ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! vectors_high ( ) ) {
/*
* On ARM , first page must always be allocated since it
* contains the machine vectors .
*/
new_pmd = pmd_alloc ( mm , new_pgd , 0 ) ;
if ( ! new_pmd )
goto no_pmd ;
new_pte = pte_alloc_map ( mm , new_pmd , 0 ) ;
if ( ! new_pte )
goto no_pte ;
init_pmd = pmd_offset ( init_pgd , 0 ) ;
init_pte = pte_offset_map_nested ( init_pmd , 0 ) ;
set_pte ( new_pte , * init_pte ) ;
pte_unmap_nested ( init_pte ) ;
pte_unmap ( new_pte ) ;
}
return new_pgd ;
no_pte :
pmd_free ( new_pmd ) ;
no_pmd :
free_pages ( ( unsigned long ) new_pgd , 2 ) ;
no_pgd :
return NULL ;
}
void free_pgd_slow ( pgd_t * pgd )
{
pmd_t * pmd ;
struct page * pte ;
if ( ! pgd )
return ;
/* pgd is always present and good */
2005-05-09 23:52:51 +04:00
pmd = pmd_off ( pgd , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( pmd_none ( * pmd ) )
goto free ;
if ( pmd_bad ( * pmd ) ) {
pmd_ERROR ( * pmd ) ;
pmd_clear ( pmd ) ;
goto free ;
}
pte = pmd_page ( * pmd ) ;
pmd_clear ( pmd ) ;
2006-06-30 12:55:38 +04:00
dec_zone_page_state ( virt_to_page ( ( unsigned long * ) pgd ) , NR_PAGETABLE ) ;
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 04:16:40 +03:00
pte_lock_deinit ( pte ) ;
2005-04-17 02:20:36 +04:00
pte_free ( pte ) ;
pmd_free ( pmd ) ;
free :
free_pages ( ( unsigned long ) pgd , 2 ) ;
}
/*
* Create a SECTION PGD between VIRT and PHYS in domain
* DOMAIN with protection PROT . This operates on half -
* pgdir entry increments .
*/
static inline void
alloc_init_section ( unsigned long virt , unsigned long phys , int prot )
{
2005-05-09 23:52:51 +04:00
pmd_t * pmdp = pmd_off_k ( virt ) ;
2005-04-17 02:20:36 +04:00
if ( virt & ( 1 < < 20 ) )
pmdp + + ;
* pmdp = __pmd ( phys | prot ) ;
flush_pmd_entry ( pmdp ) ;
}
/*
* Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
*/
static inline void
alloc_init_supersection ( unsigned long virt , unsigned long phys , int prot )
{
int i ;
for ( i = 0 ; i < 16 ; i + = 1 ) {
2005-08-30 01:54:53 +04:00
alloc_init_section ( virt , phys , prot | PMD_SECT_SUPER ) ;
2005-04-17 02:20:36 +04:00
virt + = ( PGDIR_SIZE / 2 ) ;
}
}
/*
* Add a PAGE mapping between VIRT and PHYS in domain
* DOMAIN with protection PROT . Note that due to the
* way we map the PTEs , we must allocate two PTE_SIZE ' d
* blocks - one for the Linux pte table , and one for
* the hardware pte table .
*/
static inline void
alloc_init_page ( unsigned long virt , unsigned long phys , unsigned int prot_l1 , pgprot_t prot )
{
2005-05-09 23:52:51 +04:00
pmd_t * pmdp = pmd_off_k ( virt ) ;
2005-04-17 02:20:36 +04:00
pte_t * ptep ;
if ( pmd_none ( * pmdp ) ) {
ptep = alloc_bootmem_low_pages ( 2 * PTRS_PER_PTE *
sizeof ( pte_t ) ) ;
2005-09-01 17:45:18 +04:00
__pmd_populate ( pmdp , __pa ( ptep ) | prot_l1 ) ;
2005-04-17 02:20:36 +04:00
}
ptep = pte_offset_kernel ( pmdp , virt ) ;
set_pte ( ptep , pfn_pte ( phys > > PAGE_SHIFT , prot ) ) ;
}
struct mem_types {
unsigned int prot_pte ;
unsigned int prot_l1 ;
unsigned int prot_sect ;
unsigned int domain ;
} ;
static struct mem_types mem_types [ ] __initdata = {
[ MT_DEVICE ] = {
. prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_WRITE ,
. prot_l1 = PMD_TYPE_TABLE ,
2006-06-29 21:24:21 +04:00
. prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
2005-04-17 02:20:36 +04:00
PMD_SECT_AP_WRITE ,
. domain = DOMAIN_IO ,
} ,
[ MT_CACHECLEAN ] = {
2006-06-29 21:24:21 +04:00
. prot_sect = PMD_TYPE_SECT | PMD_BIT4 ,
2005-04-17 02:20:36 +04:00
. domain = DOMAIN_KERNEL ,
} ,
[ MT_MINICLEAN ] = {
2006-06-29 21:24:21 +04:00
. prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE ,
2005-04-17 02:20:36 +04:00
. domain = DOMAIN_KERNEL ,
} ,
[ MT_LOW_VECTORS ] = {
. prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_EXEC ,
. prot_l1 = PMD_TYPE_TABLE ,
. domain = DOMAIN_USER ,
} ,
[ MT_HIGH_VECTORS ] = {
. prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_EXEC ,
. prot_l1 = PMD_TYPE_TABLE ,
. domain = DOMAIN_USER ,
} ,
[ MT_MEMORY ] = {
2006-06-29 21:24:21 +04:00
. prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE ,
2005-04-17 02:20:36 +04:00
. domain = DOMAIN_KERNEL ,
} ,
[ MT_ROM ] = {
2006-06-29 21:24:21 +04:00
. prot_sect = PMD_TYPE_SECT | PMD_BIT4 ,
2005-04-17 02:20:36 +04:00
. domain = DOMAIN_KERNEL ,
} ,
[ MT_IXP2000_DEVICE ] = { /* IXP2400 requires XCB=101 for on-chip I/O */
. prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_WRITE ,
. prot_l1 = PMD_TYPE_TABLE ,
2006-06-29 21:24:21 +04:00
. prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
2005-04-17 02:20:36 +04:00
PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
PMD_SECT_TEX ( 1 ) ,
. domain = DOMAIN_IO ,
2006-01-26 18:21:28 +03:00
} ,
[ MT_NONSHARED_DEVICE ] = {
. prot_l1 = PMD_TYPE_TABLE ,
2006-06-29 21:24:21 +04:00
. prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
2006-01-26 18:21:28 +03:00
PMD_SECT_AP_WRITE ,
. domain = DOMAIN_IO ,
2005-04-17 02:20:36 +04:00
}
} ;
/*
* Adjust the PMD section entries according to the CPU in use .
*/
2005-10-28 17:48:37 +04:00
void __init build_mem_type_table ( void )
2005-04-17 02:20:36 +04:00
{
struct cachepolicy * cp ;
unsigned int cr = get_cr ( ) ;
2005-11-07 13:10:28 +03:00
unsigned int user_pgprot , kern_pgprot ;
2005-04-17 02:20:36 +04:00
int cpu_arch = cpu_architecture ( ) ;
int i ;
# if defined(CONFIG_CPU_DCACHE_DISABLE)
if ( cachepolicy > CPOLICY_BUFFERED )
cachepolicy = CPOLICY_BUFFERED ;
# elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
if ( cachepolicy > CPOLICY_WRITETHROUGH )
cachepolicy = CPOLICY_WRITETHROUGH ;
# endif
if ( cpu_arch < CPU_ARCH_ARMv5 ) {
if ( cachepolicy > = CPOLICY_WRITEALLOC )
cachepolicy = CPOLICY_WRITEBACK ;
ecc_mask = 0 ;
}
2006-06-29 21:24:21 +04:00
/*
* Xscale must not have PMD bit 4 set for section mappings .
*/
if ( cpu_is_xscale ( ) )
for ( i = 0 ; i < ARRAY_SIZE ( mem_types ) ; i + + )
mem_types [ i ] . prot_sect & = ~ PMD_BIT4 ;
/*
* ARMv5 and lower , excluding Xscale , bit 4 must be set for
* page tables .
*/
if ( cpu_arch < CPU_ARCH_ARMv6 & & ! cpu_is_xscale ( ) )
for ( i = 0 ; i < ARRAY_SIZE ( mem_types ) ; i + + )
2005-04-17 02:20:36 +04:00
if ( mem_types [ i ] . prot_l1 )
mem_types [ i ] . prot_l1 | = PMD_BIT4 ;
2005-08-10 19:18:35 +04:00
cp = & cache_policies [ cachepolicy ] ;
2005-11-07 13:10:28 +03:00
kern_pgprot = user_pgprot = cp - > pte ;
2005-08-10 19:18:35 +04:00
2006-04-02 03:07:39 +04:00
/*
* Enable CPU - specific coherency if supported .
* ( Only available on XSC3 at the moment . )
*/
if ( arch_is_coherent ( ) ) {
if ( cpu_is_xsc3 ( ) ) {
mem_types [ MT_MEMORY ] . prot_sect | = PMD_SECT_S ;
mem_types [ MT_MEMORY ] . prot_pte | = L_PTE_COHERENT ;
}
}
2005-04-17 02:20:36 +04:00
/*
* ARMv6 and above have extended page tables .
*/
if ( cpu_arch > = CPU_ARCH_ARMv6 & & ( cr & CR_XP ) ) {
/*
* bit 4 becomes XN which we must clear for the
* kernel memory mapping .
*/
2006-06-29 21:24:21 +04:00
mem_types [ MT_MEMORY ] . prot_sect & = ~ PMD_SECT_XN ;
mem_types [ MT_ROM ] . prot_sect & = ~ PMD_SECT_XN ;
2005-11-07 13:10:28 +03:00
2005-04-17 02:20:36 +04:00
/*
2005-04-30 01:08:35 +04:00
* Mark cache clean areas and XIP ROM read only
* from SVC mode and no access from userspace .
2005-04-17 02:20:36 +04:00
*/
2005-04-30 01:08:35 +04:00
mem_types [ MT_ROM ] . prot_sect | = PMD_SECT_APX | PMD_SECT_AP_WRITE ;
2005-04-17 02:20:36 +04:00
mem_types [ MT_MINICLEAN ] . prot_sect | = PMD_SECT_APX | PMD_SECT_AP_WRITE ;
mem_types [ MT_CACHECLEAN ] . prot_sect | = PMD_SECT_APX | PMD_SECT_AP_WRITE ;
2005-07-26 22:51:26 +04:00
2005-08-10 19:18:35 +04:00
/*
* Mark the device area as " shared device "
*/
2005-07-26 22:51:26 +04:00
mem_types [ MT_DEVICE ] . prot_pte | = L_PTE_BUFFERABLE ;
mem_types [ MT_DEVICE ] . prot_sect | = PMD_SECT_BUFFERED ;
2005-04-17 02:20:36 +04:00
2005-08-10 19:18:35 +04:00
/*
* User pages need to be mapped with the ASID
* ( iow , non - global )
*/
user_pgprot | = L_PTE_ASID ;
2005-11-07 13:10:28 +03:00
# ifdef CONFIG_SMP
/*
* Mark memory with the " shared " attribute for SMP systems
*/
user_pgprot | = L_PTE_SHARED ;
kern_pgprot | = L_PTE_SHARED ;
mem_types [ MT_MEMORY ] . prot_sect | = PMD_SECT_S ;
# endif
2005-08-10 19:18:35 +04:00
}
2005-04-17 02:20:36 +04:00
2005-11-07 13:10:28 +03:00
for ( i = 0 ; i < 16 ; i + + ) {
unsigned long v = pgprot_val ( protection_map [ i ] ) ;
v = ( v & ~ ( L_PTE_BUFFERABLE | L_PTE_CACHEABLE ) ) | user_pgprot ;
protection_map [ i ] = __pgprot ( v ) ;
}
mem_types [ MT_LOW_VECTORS ] . prot_pte | = kern_pgprot ;
mem_types [ MT_HIGH_VECTORS ] . prot_pte | = kern_pgprot ;
2005-04-17 02:20:36 +04:00
if ( cpu_arch > = CPU_ARCH_ARMv5 ) {
2005-11-07 13:10:28 +03:00
# ifndef CONFIG_SMP
/*
* Only use write - through for non - SMP systems
*/
mem_types [ MT_LOW_VECTORS ] . prot_pte & = ~ L_PTE_BUFFERABLE ;
mem_types [ MT_HIGH_VECTORS ] . prot_pte & = ~ L_PTE_BUFFERABLE ;
# endif
2005-04-17 02:20:36 +04:00
} else {
mem_types [ MT_MINICLEAN ] . prot_sect & = ~ PMD_SECT_TEX ( 1 ) ;
}
2005-11-07 13:10:28 +03:00
pgprot_kernel = __pgprot ( L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | L_PTE_WRITE |
L_PTE_EXEC | kern_pgprot ) ;
2005-04-17 02:20:36 +04:00
mem_types [ MT_LOW_VECTORS ] . prot_l1 | = ecc_mask ;
mem_types [ MT_HIGH_VECTORS ] . prot_l1 | = ecc_mask ;
mem_types [ MT_MEMORY ] . prot_sect | = ecc_mask | cp - > pmd ;
mem_types [ MT_ROM ] . prot_sect | = cp - > pmd ;
switch ( cp - > pmd ) {
case PMD_SECT_WT :
mem_types [ MT_CACHECLEAN ] . prot_sect | = PMD_SECT_WT ;
break ;
case PMD_SECT_WB :
case PMD_SECT_WBWA :
mem_types [ MT_CACHECLEAN ] . prot_sect | = PMD_SECT_WB ;
break ;
}
printk ( " Memory policy: ECC %sabled, Data cache %s \n " ,
ecc_mask ? " en " : " dis " , cp - > policy ) ;
}
# define vectors_base() (vectors_high() ? 0xffff0000 : 0)
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by ` md ' . We
* are able to cope here with varying sizes and address
* offsets , and we take full advantage of sections and
* supersections .
*/
2005-10-28 17:48:37 +04:00
void __init create_mapping ( struct map_desc * md )
2005-04-17 02:20:36 +04:00
{
unsigned long virt , length ;
int prot_sect , prot_l1 , domain ;
pgprot_t prot_pte ;
2005-10-28 18:19:12 +04:00
unsigned long off = ( u32 ) __pfn_to_phys ( md - > pfn ) ;
2005-04-17 02:20:36 +04:00
if ( md - > virtual ! = vectors_base ( ) & & md - > virtual < TASK_SIZE ) {
printk ( KERN_WARNING " BUG: not creating mapping for "
2005-11-03 23:40:50 +03:00
" 0x%08llx at 0x%08lx in user region \n " ,
2005-10-28 18:19:12 +04:00
__pfn_to_phys ( ( u64 ) md - > pfn ) , md - > virtual ) ;
2005-04-17 02:20:36 +04:00
return ;
}
if ( ( md - > type = = MT_DEVICE | | md - > type = = MT_ROM ) & &
md - > virtual > = PAGE_OFFSET & & md - > virtual < VMALLOC_END ) {
2005-11-03 23:40:50 +03:00
printk ( KERN_WARNING " BUG: mapping for 0x%08llx at 0x%08lx "
2005-04-17 02:20:36 +04:00
" overlaps vmalloc space \n " ,
2005-10-28 18:19:12 +04:00
__pfn_to_phys ( ( u64 ) md - > pfn ) , md - > virtual ) ;
2005-04-17 02:20:36 +04:00
}
domain = mem_types [ md - > type ] . domain ;
prot_pte = __pgprot ( mem_types [ md - > type ] . prot_pte ) ;
prot_l1 = mem_types [ md - > type ] . prot_l1 | PMD_DOMAIN ( domain ) ;
prot_sect = mem_types [ md - > type ] . prot_sect | PMD_DOMAIN ( domain ) ;
2005-10-28 18:19:12 +04:00
/*
* Catch 36 - bit addresses
*/
if ( md - > pfn > = 0x100000 ) {
if ( domain ) {
printk ( KERN_ERR " MM: invalid domain in supersection "
2005-11-03 23:40:50 +03:00
" mapping for 0x%08llx at 0x%08lx \n " ,
2005-10-28 18:19:12 +04:00
__pfn_to_phys ( ( u64 ) md - > pfn ) , md - > virtual ) ;
return ;
}
if ( ( md - > virtual | md - > length | __pfn_to_phys ( md - > pfn ) )
& ~ SUPERSECTION_MASK ) {
printk ( KERN_ERR " MM: cannot create mapping for "
2005-11-03 23:40:50 +03:00
" 0x%08llx at 0x%08lx invalid alignment \n " ,
2005-10-28 18:19:12 +04:00
__pfn_to_phys ( ( u64 ) md - > pfn ) , md - > virtual ) ;
return ;
}
/*
* Shift bits [ 35 : 32 ] of address into bits [ 23 : 20 ] of PMD
* ( See ARMv6 spec ) .
*/
off | = ( ( ( md - > pfn > > ( 32 - PAGE_SHIFT ) ) & 0xF ) < < 20 ) ;
}
2005-04-17 02:20:36 +04:00
virt = md - > virtual ;
2005-10-28 18:19:12 +04:00
off - = virt ;
2005-04-17 02:20:36 +04:00
length = md - > length ;
if ( mem_types [ md - > type ] . prot_l1 = = 0 & &
( virt & 0xfffff | | ( virt + off ) & 0xfffff | | ( virt + length ) & 0xfffff ) ) {
printk ( KERN_WARNING " BUG: map for 0x%08lx at 0x%08lx can not "
" be mapped using pages, ignoring. \n " ,
2005-10-28 18:19:11 +04:00
__pfn_to_phys ( md - > pfn ) , md - > virtual ) ;
2005-04-17 02:20:36 +04:00
return ;
}
while ( ( virt & 0xfffff | | ( virt + off ) & 0xfffff ) & & length > = PAGE_SIZE ) {
alloc_init_page ( virt , virt + off , prot_l1 , prot_pte ) ;
virt + = PAGE_SIZE ;
length - = PAGE_SIZE ;
}
/* N.B. ARMv6 supersections are only defined to work with domain 0.
* Since domain assignments can in fact be arbitrary , the
* ' domain = = 0 ' check below is required to insure that ARMv6
* supersections are only allocated for domain 0 regardless
* of the actual domain assignments in use .
*/
2006-03-29 00:00:40 +04:00
if ( ( cpu_architecture ( ) > = CPU_ARCH_ARMv6 | | cpu_is_xsc3 ( ) )
& & domain = = 0 ) {
2005-10-28 18:19:12 +04:00
/*
* Align to supersection boundary if ! high pages .
* High pages have already been checked for proper
* alignment above and they will fail the SUPSERSECTION_MASK
* check because of the way the address is encoded into
* offset .
*/
if ( md - > pfn < = 0x100000 ) {
while ( ( virt & ~ SUPERSECTION_MASK | |
( virt + off ) & ~ SUPERSECTION_MASK ) & &
length > = ( PGDIR_SIZE / 2 ) ) {
alloc_init_section ( virt , virt + off , prot_sect ) ;
virt + = ( PGDIR_SIZE / 2 ) ;
length - = ( PGDIR_SIZE / 2 ) ;
}
2005-04-17 02:20:36 +04:00
}
while ( length > = SUPERSECTION_SIZE ) {
alloc_init_supersection ( virt , virt + off , prot_sect ) ;
virt + = SUPERSECTION_SIZE ;
length - = SUPERSECTION_SIZE ;
}
}
/*
* A section mapping covers half a " pgdir " entry .
*/
while ( length > = ( PGDIR_SIZE / 2 ) ) {
alloc_init_section ( virt , virt + off , prot_sect ) ;
virt + = ( PGDIR_SIZE / 2 ) ;
length - = ( PGDIR_SIZE / 2 ) ;
}
while ( length > = PAGE_SIZE ) {
alloc_init_page ( virt , virt + off , prot_l1 , prot_pte ) ;
virt + = PAGE_SIZE ;
length - = PAGE_SIZE ;
}
}
/*
* In order to soft - boot , we need to insert a 1 : 1 mapping in place of
* the user - mode pages . This will then ensure that we have predictable
* results when turning the mmu off
*/
void setup_mm_for_reboot ( char mode )
{
2005-09-01 17:51:59 +04:00
unsigned long base_pmdval ;
2005-04-17 02:20:36 +04:00
pgd_t * pgd ;
int i ;
if ( current - > mm & & current - > mm - > pgd )
pgd = current - > mm - > pgd ;
else
pgd = init_mm . pgd ;
2005-09-01 17:51:59 +04:00
base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT ;
2006-06-01 03:14:05 +04:00
if ( cpu_architecture ( ) < = CPU_ARCH_ARMv5TEJ & & ! cpu_is_xscale ( ) )
2005-09-01 17:51:59 +04:00
base_pmdval | = PMD_BIT4 ;
for ( i = 0 ; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD ; i + + , pgd + + ) {
unsigned long pmdval = ( i < < PGDIR_SHIFT ) | base_pmdval ;
pmd_t * pmd ;
2005-05-09 23:52:51 +04:00
pmd = pmd_off ( pgd , i < < PGDIR_SHIFT ) ;
2005-04-17 02:20:36 +04:00
pmd [ 0 ] = __pmd ( pmdval ) ;
pmd [ 1 ] = __pmd ( pmdval + ( 1 < < ( PGDIR_SHIFT - 1 ) ) ) ;
flush_pmd_entry ( pmd ) ;
}
}
/*
* Create the architecture specific mappings
*/
void __init iotable_init ( struct map_desc * io_desc , int nr )
{
int i ;
for ( i = 0 ; i < nr ; i + + )
create_mapping ( io_desc + i ) ;
}