2012-03-05 11:49:27 +00:00
/*
* Based on arch / arm / mm / mmu . c
*
* Copyright ( C ) 1995 - 2005 Russell King
* Copyright ( C ) 2012 ARM Ltd .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include <linux/export.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/init.h>
# include <linux/mman.h>
# include <linux/nodemask.h>
# include <linux/memblock.h>
# include <linux/fs.h>
2012-10-23 14:55:08 +01:00
# include <linux/io.h>
2012-03-05 11:49:27 +00:00
# include <asm/cputype.h>
# include <asm/sections.h>
# include <asm/setup.h>
# include <asm/sizes.h>
# include <asm/tlb.h>
2014-05-12 18:40:51 +09:00
# include <asm/memblock.h>
2012-03-05 11:49:27 +00:00
# include <asm/mmu_context.h>
# include "mm.h"
/*
* Empty_zero_page is a special page that is used for zero - initialized data
* and COW .
*/
struct page * empty_zero_page ;
EXPORT_SYMBOL ( empty_zero_page ) ;
struct cachepolicy {
const char policy [ 16 ] ;
u64 mair ;
u64 tcr ;
} ;
static struct cachepolicy cache_policies [ ] __initdata = {
{
. policy = " uncached " ,
. mair = 0x44 , /* inner, outer non-cacheable */
. tcr = TCR_IRGN_NC | TCR_ORGN_NC ,
} , {
. policy = " writethrough " ,
. mair = 0xaa , /* inner, outer write-through, read-allocate */
. tcr = TCR_IRGN_WT | TCR_ORGN_WT ,
} , {
. policy = " writeback " ,
. mair = 0xee , /* inner, outer write-back, read-allocate */
. tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA ,
}
} ;
/*
* These are useful for identifying cache coherency problems by allowing the
* cache or the cache and writebuffer to be turned off . It changes the Normal
* memory caching attributes in the MAIR_EL1 register .
*/
static int __init early_cachepolicy ( char * p )
{
int i ;
u64 tmp ;
for ( i = 0 ; i < ARRAY_SIZE ( cache_policies ) ; i + + ) {
int len = strlen ( cache_policies [ i ] . policy ) ;
if ( memcmp ( p , cache_policies [ i ] . policy , len ) = = 0 )
break ;
}
if ( i = = ARRAY_SIZE ( cache_policies ) ) {
pr_err ( " ERROR: unknown or unsupported cache policy: %s \n " , p ) ;
return 0 ;
}
flush_cache_all ( ) ;
/*
* Modify MT_NORMAL attributes in MAIR_EL1 .
*/
asm volatile (
" mrs %0, mair_el1 \n "
2014-09-15 06:30:15 +01:00
" bfi %0, %1, %2, #8 \n "
2012-03-05 11:49:27 +00:00
" msr mair_el1, %0 \n "
" isb \n "
: " =&r " ( tmp )
: " r " ( cache_policies [ i ] . mair ) , " i " ( MT_NORMAL * 8 ) ) ;
/*
* Modify TCR PTW cacheability attributes .
*/
asm volatile (
" mrs %0, tcr_el1 \n "
" bic %0, %0, %2 \n "
" orr %0, %0, %1 \n "
" msr tcr_el1, %0 \n "
" isb \n "
: " =&r " ( tmp )
: " r " ( cache_policies [ i ] . tcr ) , " r " ( TCR_IRGN_MASK | TCR_ORGN_MASK ) ) ;
flush_cache_all ( ) ;
return 0 ;
}
early_param ( " cachepolicy " , early_cachepolicy ) ;
pgprot_t phys_mem_access_prot ( struct file * file , unsigned long pfn ,
unsigned long size , pgprot_t vma_prot )
{
if ( ! pfn_valid ( pfn ) )
return pgprot_noncached ( vma_prot ) ;
else if ( file - > f_flags & O_SYNC )
return pgprot_writecombine ( vma_prot ) ;
return vma_prot ;
}
EXPORT_SYMBOL ( phys_mem_access_prot ) ;
static void __init * early_alloc ( unsigned long sz )
{
void * ptr = __va ( memblock_alloc ( sz , sz ) ) ;
memset ( ptr , 0 , sz ) ;
return ptr ;
}
static void __init alloc_init_pte ( pmd_t * pmd , unsigned long addr ,
2014-03-12 12:28:06 -04:00
unsigned long end , unsigned long pfn ,
pgprot_t prot )
2012-03-05 11:49:27 +00:00
{
pte_t * pte ;
if ( pmd_none ( * pmd ) ) {
pte = early_alloc ( PTRS_PER_PTE * sizeof ( pte_t ) ) ;
__pmd_populate ( pmd , __pa ( pte ) , PMD_TYPE_TABLE ) ;
}
BUG_ON ( pmd_bad ( * pmd ) ) ;
pte = pte_offset_kernel ( pmd , addr ) ;
do {
2014-03-12 12:28:06 -04:00
set_pte ( pte , pfn_pte ( pfn , prot ) ) ;
2012-03-05 11:49:27 +00:00
pfn + + ;
} while ( pte + + , addr + = PAGE_SIZE , addr ! = end ) ;
}
static void __init alloc_init_pmd ( pud_t * pud , unsigned long addr ,
2014-03-12 12:28:06 -04:00
unsigned long end , phys_addr_t phys ,
int map_io )
2012-03-05 11:49:27 +00:00
{
pmd_t * pmd ;
unsigned long next ;
2014-03-12 12:28:06 -04:00
pmdval_t prot_sect ;
pgprot_t prot_pte ;
if ( map_io ) {
2014-06-06 10:43:28 -07:00
prot_sect = PROT_SECT_DEVICE_nGnRE ;
2014-03-12 12:28:06 -04:00
prot_pte = __pgprot ( PROT_DEVICE_nGnRE ) ;
} else {
2014-06-06 10:43:28 -07:00
prot_sect = PROT_SECT_NORMAL_EXEC ;
2014-03-12 12:28:06 -04:00
prot_pte = PAGE_KERNEL_EXEC ;
}
2012-03-05 11:49:27 +00:00
/*
* Check for initial section mappings in the pgd / pud and remove them .
*/
if ( pud_none ( * pud ) | | pud_bad ( * pud ) ) {
pmd = early_alloc ( PTRS_PER_PMD * sizeof ( pmd_t ) ) ;
pud_populate ( & init_mm , pud , pmd ) ;
}
pmd = pmd_offset ( pud , addr ) ;
do {
next = pmd_addr_end ( addr , end ) ;
/* try section mapping first */
2014-02-04 16:01:31 +00:00
if ( ( ( addr | next | phys ) & ~ SECTION_MASK ) = = 0 ) {
pmd_t old_pmd = * pmd ;
2014-03-12 12:28:06 -04:00
set_pmd ( pmd , __pmd ( phys | prot_sect ) ) ;
2014-02-04 16:01:31 +00:00
/*
* Check for previous table entries created during
* boot ( __create_page_tables ) and flush them .
*/
if ( ! pmd_none ( old_pmd ) )
flush_tlb_all ( ) ;
} else {
2014-03-12 12:28:06 -04:00
alloc_init_pte ( pmd , addr , next , __phys_to_pfn ( phys ) ,
prot_pte ) ;
2014-02-04 16:01:31 +00:00
}
2012-03-05 11:49:27 +00:00
phys + = next - addr ;
} while ( pmd + + , addr = next , addr ! = end ) ;
}
static void __init alloc_init_pud ( pgd_t * pgd , unsigned long addr ,
2014-10-09 16:53:10 +01:00
unsigned long end , phys_addr_t phys ,
2014-03-12 12:28:06 -04:00
int map_io )
2012-03-05 11:49:27 +00:00
{
2014-05-12 18:40:51 +09:00
pud_t * pud ;
2012-03-05 11:49:27 +00:00
unsigned long next ;
2014-05-12 18:40:51 +09:00
if ( pgd_none ( * pgd ) ) {
pud = early_alloc ( PTRS_PER_PUD * sizeof ( pud_t ) ) ;
pgd_populate ( & init_mm , pgd , pud ) ;
}
BUG_ON ( pgd_bad ( * pgd ) ) ;
pud = pud_offset ( pgd , addr ) ;
2012-03-05 11:49:27 +00:00
do {
next = pud_addr_end ( addr , end ) ;
2014-05-06 14:02:27 +01:00
/*
* For 4 K granule only , attempt to put down a 1 GB block
*/
2014-06-06 10:43:28 -07:00
if ( ! map_io & & ( PAGE_SHIFT = = 12 ) & &
2014-05-06 14:02:27 +01:00
( ( addr | next | phys ) & ~ PUD_MASK ) = = 0 ) {
pud_t old_pud = * pud ;
set_pud ( pud , __pud ( phys | PROT_SECT_NORMAL_EXEC ) ) ;
/*
* If we have an old value for a pud , it will
* be pointing to a pmd table that we no longer
* need ( from swapper_pg_dir ) .
*
* Look up the old pmd table and free it .
*/
if ( ! pud_none ( old_pud ) ) {
phys_addr_t table = __pa ( pmd_offset ( & old_pud , 0 ) ) ;
memblock_free ( table , PAGE_SIZE ) ;
flush_tlb_all ( ) ;
}
} else {
2014-06-06 10:43:28 -07:00
alloc_init_pmd ( pud , addr , next , phys , map_io ) ;
2014-05-06 14:02:27 +01:00
}
2012-03-05 11:49:27 +00:00
phys + = next - addr ;
} while ( pud + + , addr = next , addr ! = end ) ;
}
/*
* Create the page directory entries and any necessary page tables for the
* mapping specified by ' md ' .
*/
2014-03-12 12:28:06 -04:00
static void __init __create_mapping ( pgd_t * pgd , phys_addr_t phys ,
unsigned long virt , phys_addr_t size ,
int map_io )
2012-03-05 11:49:27 +00:00
{
unsigned long addr , length , end , next ;
addr = virt & PAGE_MASK ;
length = PAGE_ALIGN ( size + ( virt & ~ PAGE_MASK ) ) ;
end = addr + length ;
do {
next = pgd_addr_end ( addr , end ) ;
2014-03-12 12:28:06 -04:00
alloc_init_pud ( pgd , addr , next , phys , map_io ) ;
2012-03-05 11:49:27 +00:00
phys + = next - addr ;
} while ( pgd + + , addr = next , addr ! = end ) ;
}
2014-03-12 12:28:06 -04:00
static void __init create_mapping ( phys_addr_t phys , unsigned long virt ,
phys_addr_t size )
{
if ( virt < VMALLOC_START ) {
pr_warn ( " BUG: not creating mapping for %pa at 0x%016lx - outside kernel range \n " ,
& phys , virt ) ;
return ;
}
__create_mapping ( pgd_offset_k ( virt & PAGE_MASK ) , phys , virt , size , 0 ) ;
}
void __init create_id_mapping ( phys_addr_t addr , phys_addr_t size , int map_io )
{
if ( ( addr > > PGDIR_SHIFT ) > = ARRAY_SIZE ( idmap_pg_dir ) ) {
pr_warn ( " BUG: not creating id mapping for %pa \n " , & addr ) ;
return ;
}
__create_mapping ( & idmap_pg_dir [ pgd_index ( addr ) ] ,
addr , addr , size , map_io ) ;
}
2012-03-05 11:49:27 +00:00
static void __init map_mem ( void )
{
struct memblock_region * reg ;
2013-08-23 18:04:44 +01:00
phys_addr_t limit ;
2012-03-05 11:49:27 +00:00
2013-04-30 11:00:33 +01:00
/*
* Temporarily limit the memblock range . We need to do this as
* create_mapping requires puds , pmds and ptes to be allocated from
* memory addressable from the initial direct kernel mapping .
*
2014-10-24 18:16:47 +01:00
* The initial direct kernel mapping , located at swapper_pg_dir , gives
* us PUD_SIZE ( 4 K pages ) or PMD_SIZE ( 64 K pages ) memory starting from
* PHYS_OFFSET ( which must be aligned to 2 MB as per
* Documentation / arm64 / booting . txt ) .
2013-04-30 11:00:33 +01:00
*/
2014-10-24 18:16:47 +01:00
if ( IS_ENABLED ( CONFIG_ARM64_64K_PAGES ) )
limit = PHYS_OFFSET + PMD_SIZE ;
else
limit = PHYS_OFFSET + PUD_SIZE ;
2013-08-23 18:04:44 +01:00
memblock_set_current_limit ( limit ) ;
2013-04-30 11:00:33 +01:00
2012-03-05 11:49:27 +00:00
/* map all the memory banks */
for_each_memblock ( memory , reg ) {
phys_addr_t start = reg - > base ;
phys_addr_t end = start + reg - > size ;
if ( start > = end )
break ;
2013-08-23 18:04:44 +01:00
# ifndef CONFIG_ARM64_64K_PAGES
/*
* For the first memory bank align the start address and
* current memblock limit to prevent create_mapping ( ) from
* allocating pte page tables from unmapped memory .
* When 64 K pages are enabled , the pte page table for the
* first PGDIR_SIZE is already present in swapper_pg_dir .
*/
if ( start < limit )
start = ALIGN ( start , PMD_SIZE ) ;
if ( end < limit ) {
limit = end & PMD_MASK ;
memblock_set_current_limit ( limit ) ;
}
# endif
2012-03-05 11:49:27 +00:00
create_mapping ( start , __phys_to_virt ( start ) , end - start ) ;
}
2013-04-30 11:00:33 +01:00
/* Limit no longer required. */
memblock_set_current_limit ( MEMBLOCK_ALLOC_ANYWHERE ) ;
2012-03-05 11:49:27 +00:00
}
/*
* paging_init ( ) sets up the page tables , initialises the zone memory
* maps and sets up the zero page .
*/
void __init paging_init ( void )
{
void * zero_page ;
map_mem ( ) ;
/*
* Finally flush the caches and tlb to ensure that we ' re in a
* consistent state .
*/
flush_cache_all ( ) ;
flush_tlb_all ( ) ;
/* allocate the zero page. */
zero_page = early_alloc ( PAGE_SIZE ) ;
bootmem_init ( ) ;
empty_zero_page = virt_to_page ( zero_page ) ;
/*
* TTBR0 is only used for the identity mapping at this stage . Make it
* point to zero page to avoid speculatively fetching new entries .
*/
cpu_set_reserved_ttbr0 ( ) ;
flush_tlb_all ( ) ;
}
/*
* Enable the identity mapping to allow the MMU disabling .
*/
void setup_mm_for_reboot ( void )
{
cpu_switch_mm ( idmap_pg_dir , & init_mm ) ;
flush_tlb_all ( ) ;
}
/*
* Check whether a kernel address is valid ( derived from arch / x86 / ) .
*/
int kern_addr_valid ( unsigned long addr )
{
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte ;
if ( ( ( ( long ) addr ) > > VA_BITS ) ! = - 1UL )
return 0 ;
pgd = pgd_offset_k ( addr ) ;
if ( pgd_none ( * pgd ) )
return 0 ;
pud = pud_offset ( pgd , addr ) ;
if ( pud_none ( * pud ) )
return 0 ;
2014-05-06 14:02:27 +01:00
if ( pud_sect ( * pud ) )
return pfn_valid ( pud_pfn ( * pud ) ) ;
2012-03-05 11:49:27 +00:00
pmd = pmd_offset ( pud , addr ) ;
if ( pmd_none ( * pmd ) )
return 0 ;
2014-04-15 18:53:24 +01:00
if ( pmd_sect ( * pmd ) )
return pfn_valid ( pmd_pfn ( * pmd ) ) ;
2012-03-05 11:49:27 +00:00
pte = pte_offset_kernel ( pmd , addr ) ;
if ( pte_none ( * pte ) )
return 0 ;
return pfn_valid ( pte_pfn ( * pte ) ) ;
}
# ifdef CONFIG_SPARSEMEM_VMEMMAP
# ifdef CONFIG_ARM64_64K_PAGES
2013-04-29 15:07:50 -07:00
int __meminit vmemmap_populate ( unsigned long start , unsigned long end , int node )
2012-03-05 11:49:27 +00:00
{
2013-04-29 15:07:50 -07:00
return vmemmap_populate_basepages ( start , end , node ) ;
2012-03-05 11:49:27 +00:00
}
# else /* !CONFIG_ARM64_64K_PAGES */
2013-04-29 15:07:50 -07:00
int __meminit vmemmap_populate ( unsigned long start , unsigned long end , int node )
2012-03-05 11:49:27 +00:00
{
2013-04-29 15:07:50 -07:00
unsigned long addr = start ;
2012-03-05 11:49:27 +00:00
unsigned long next ;
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
do {
next = pmd_addr_end ( addr , end ) ;
pgd = vmemmap_pgd_populate ( addr , node ) ;
if ( ! pgd )
return - ENOMEM ;
pud = vmemmap_pud_populate ( pgd , addr , node ) ;
if ( ! pud )
return - ENOMEM ;
pmd = pmd_offset ( pud , addr ) ;
if ( pmd_none ( * pmd ) ) {
void * p = NULL ;
p = vmemmap_alloc_block_buf ( PMD_SIZE , node ) ;
if ( ! p )
return - ENOMEM ;
2014-04-03 15:57:15 +01:00
set_pmd ( pmd , __pmd ( __pa ( p ) | PROT_SECT_NORMAL ) ) ;
2012-03-05 11:49:27 +00:00
} else
vmemmap_verify ( ( pte_t * ) pmd , node , addr , next ) ;
} while ( addr = next , addr ! = end ) ;
return 0 ;
}
# endif /* CONFIG_ARM64_64K_PAGES */
2013-04-29 15:07:50 -07:00
void vmemmap_free ( unsigned long start , unsigned long end )
2013-02-22 16:33:08 -08:00
{
}
2012-03-05 11:49:27 +00:00
# endif /* CONFIG_SPARSEMEM_VMEMMAP */