2010-11-21 14:41:57 +03:00
# include <linux/kernel.h>
# include <asm/cputype.h>
2011-09-30 14:43:29 +04:00
# include <asm/idmap.h>
2010-11-21 14:41:57 +03:00
# include <asm/pgalloc.h>
# include <asm/pgtable.h>
2011-09-30 14:43:29 +04:00
# include <asm/sections.h>
2012-03-28 21:30:01 +04:00
# include <asm/system_info.h>
2011-09-30 14:43:29 +04:00
pgd_t * idmap_pgd ;
2010-11-21 14:41:57 +03:00
2011-11-22 21:30:32 +04:00
# ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd ( pud_t * pud , unsigned long addr , unsigned long end ,
unsigned long prot )
{
pmd_t * pmd ;
unsigned long next ;
if ( pud_none_or_clear_bad ( pud ) | | ( pud_val ( * pud ) & L_PGD_SWAPPER ) ) {
pmd = pmd_alloc_one ( & init_mm , addr ) ;
if ( ! pmd ) {
pr_warning ( " Failed to allocate identity pmd. \n " ) ;
return ;
}
pud_populate ( & init_mm , pud , pmd ) ;
pmd + = pmd_index ( addr ) ;
} else
pmd = pmd_offset ( pud , addr ) ;
do {
next = pmd_addr_end ( addr , end ) ;
* pmd = __pmd ( ( addr & PMD_MASK ) | prot ) ;
flush_pmd_entry ( pmd ) ;
} while ( pmd + + , addr = next , addr ! = end ) ;
}
# else /* !CONFIG_ARM_LPAE */
2010-11-21 19:27:49 +03:00
static void idmap_add_pmd ( pud_t * pud , unsigned long addr , unsigned long end ,
2010-11-21 14:48:16 +03:00
unsigned long prot )
{
2010-11-21 19:27:49 +03:00
pmd_t * pmd = pmd_offset ( pud , addr ) ;
2010-11-21 14:48:16 +03:00
addr = ( addr & PMD_MASK ) | prot ;
pmd [ 0 ] = __pmd ( addr ) ;
addr + = SECTION_SIZE ;
pmd [ 1 ] = __pmd ( addr ) ;
flush_pmd_entry ( pmd ) ;
}
2011-11-22 21:30:32 +04:00
# endif /* CONFIG_ARM_LPAE */
2010-11-21 14:48:16 +03:00
2010-11-21 19:27:49 +03:00
static void idmap_add_pud ( pgd_t * pgd , unsigned long addr , unsigned long end ,
unsigned long prot )
{
pud_t * pud = pud_offset ( pgd , addr ) ;
unsigned long next ;
do {
next = pud_addr_end ( addr , end ) ;
idmap_add_pmd ( pud , addr , next , prot ) ;
} while ( pud + + , addr = next , addr ! = end ) ;
}
2011-11-23 16:26:25 +04:00
static void identity_mapping_add ( pgd_t * pgd , unsigned long addr , unsigned long end )
2010-11-21 14:41:57 +03:00
{
2010-11-21 14:48:16 +03:00
unsigned long prot , next ;
2010-11-21 14:41:57 +03:00
2011-11-22 21:30:32 +04:00
prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF ;
2010-11-21 14:41:57 +03:00
if ( cpu_architecture ( ) < = CPU_ARCH_ARMv5TEJ & & ! cpu_is_xscale ( ) )
prot | = PMD_BIT4 ;
2010-11-21 14:48:16 +03:00
pgd + = pgd_index ( addr ) ;
do {
next = pgd_addr_end ( addr , end ) ;
2010-11-21 19:27:49 +03:00
idmap_add_pud ( pgd , addr , next , prot ) ;
2010-11-21 14:48:16 +03:00
} while ( pgd + + , addr = next , addr ! = end ) ;
2010-11-21 14:41:57 +03:00
}
2011-09-30 14:43:29 +04:00
extern char __idmap_text_start [ ] , __idmap_text_end [ ] ;
static int __init init_static_idmap ( void )
{
phys_addr_t idmap_start , idmap_end ;
idmap_pgd = pgd_alloc ( & init_mm ) ;
if ( ! idmap_pgd )
return - ENOMEM ;
/* Add an identity mapping for the physical address of the section. */
idmap_start = virt_to_phys ( ( void * ) __idmap_text_start ) ;
idmap_end = virt_to_phys ( ( void * ) __idmap_text_end ) ;
pr_info ( " Setting up static identity map for 0x%llx - 0x%llx \n " ,
( long long ) idmap_start , ( long long ) idmap_end ) ;
identity_mapping_add ( idmap_pgd , idmap_start , idmap_end ) ;
2012-11-08 22:46:07 +04:00
/* Flush L1 for the hardware to see this page table content */
flush_cache_louis ( ) ;
2011-09-30 14:43:29 +04:00
return 0 ;
}
2011-11-23 16:26:25 +04:00
early_initcall ( init_static_idmap ) ;
2011-09-30 14:43:29 +04:00
2010-11-21 14:41:57 +03:00
/*
2011-06-08 18:53:34 +04:00
* In order to soft - boot , we need to switch to a 1 : 1 mapping for the
* cpu_reset functions . This will then ensure that we have predictable
* results when turning off the mmu .
2010-11-21 14:41:57 +03:00
*/
2011-11-01 14:15:27 +04:00
void setup_mm_for_reboot ( void )
2010-11-21 14:41:57 +03:00
{
2011-06-08 18:53:34 +04:00
/* Switch to the identity mapping. */
cpu_switch_mm ( idmap_pgd , & init_mm ) ;
2012-11-08 22:46:07 +04:00
# ifdef CONFIG_CPU_HAS_ASID
/*
* We don ' t have a clean ASID for the identity mapping , which
* may clash with virtual addresses of the previous page tables
* and therefore potentially in the TLB .
*/
2010-11-21 14:41:57 +03:00
local_flush_tlb_all ( ) ;
2012-11-08 22:46:07 +04:00
# endif
2010-11-21 14:41:57 +03:00
}