2019-05-29 07:18:00 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2017-07-10 18:00:26 -07:00
/*
* Copyright ( C ) 2012 Regents of the University of California
2019-06-28 13:36:21 -07:00
* Copyright ( C ) 2019 Western Digital Corporation or its affiliates .
2017-07-10 18:00:26 -07:00
*/
# include <linux/init.h>
# include <linux/mm.h>
# include <linux/memblock.h>
2018-10-30 15:09:49 -07:00
# include <linux/initrd.h>
2017-07-10 18:00:26 -07:00
# include <linux/swap.h>
2018-01-16 09:37:50 +01:00
# include <linux/sizes.h>
2019-02-21 11:25:49 +05:30
# include <linux/of_fdt.h>
2017-07-10 18:00:26 -07:00
2019-01-07 20:57:01 +05:30
# include <asm/fixmap.h>
2017-07-10 18:00:26 -07:00
# include <asm/tlbflush.h>
# include <asm/sections.h>
# include <asm/pgtable.h>
# include <asm/io.h>
2019-03-26 08:03:47 +00:00
unsigned long empty_zero_page [ PAGE_SIZE / sizeof ( unsigned long ) ]
__page_aligned_bss ;
EXPORT_SYMBOL ( empty_zero_page ) ;
2019-06-07 06:01:29 +00:00
extern char _start [ ] ;
2017-07-10 18:00:26 -07:00
static void __init zone_sizes_init ( void )
{
2018-01-16 09:37:50 +01:00
unsigned long max_zone_pfns [ MAX_NR_ZONES ] = { 0 , } ;
2017-07-10 18:00:26 -07:00
2018-06-25 16:49:37 +08:00
# ifdef CONFIG_ZONE_DMA32
2019-01-12 16:16:27 +08:00
max_zone_pfns [ ZONE_DMA32 ] = PFN_DOWN ( min ( 4UL * SZ_1G ,
( unsigned long ) PFN_PHYS ( max_low_pfn ) ) ) ;
2018-06-25 16:49:37 +08:00
# endif
2018-01-16 09:37:50 +01:00
max_zone_pfns [ ZONE_NORMAL ] = max_low_pfn ;
free_area_init_nodes ( max_zone_pfns ) ;
2017-07-10 18:00:26 -07:00
}
void setup_zero_page ( void )
{
memset ( ( void * ) empty_zero_page , 0 , PAGE_SIZE ) ;
}
void __init mem_init ( void )
{
# ifdef CONFIG_FLATMEM
BUG_ON ( ! mem_map ) ;
# endif /* CONFIG_FLATMEM */
high_memory = ( void * ) ( __va ( PFN_PHYS ( max_low_pfn ) ) ) ;
2018-10-30 15:09:30 -07:00
memblock_free_all ( ) ;
2017-07-10 18:00:26 -07:00
mem_init_print_info ( NULL ) ;
}
# ifdef CONFIG_BLK_DEV_INITRD
2019-02-21 11:25:49 +05:30
static void __init setup_initrd ( void )
{
unsigned long size ;
if ( initrd_start > = initrd_end ) {
pr_info ( " initrd not found or empty " ) ;
goto disable ;
}
if ( __pa ( initrd_end ) > PFN_PHYS ( max_low_pfn ) ) {
pr_err ( " initrd extends beyond end of memory " ) ;
goto disable ;
}
size = initrd_end - initrd_start ;
memblock_reserve ( __pa ( initrd_start ) , size ) ;
initrd_below_start_ok = 1 ;
pr_info ( " Initial ramdisk at: 0x%p (%lu bytes) \n " ,
( void * ) ( initrd_start ) , size ) ;
return ;
disable :
pr_cont ( " - disabling initrd \n " ) ;
initrd_start = 0 ;
initrd_end = 0 ;
}
2017-07-10 18:00:26 -07:00
# endif /* CONFIG_BLK_DEV_INITRD */
2019-02-21 11:25:49 +05:30
void __init setup_bootmem ( void )
{
struct memblock_region * reg ;
phys_addr_t mem_size = 0 ;
2019-06-07 06:01:29 +00:00
phys_addr_t vmlinux_end = __pa ( & _end ) ;
phys_addr_t vmlinux_start = __pa ( & _start ) ;
2019-02-21 11:25:49 +05:30
/* Find the memory region containing the kernel */
for_each_memblock ( memory , reg ) {
phys_addr_t end = reg - > base + reg - > size ;
if ( reg - > base < = vmlinux_end & & vmlinux_end < = end ) {
mem_size = min ( reg - > size , ( phys_addr_t ) - PAGE_OFFSET ) ;
2019-04-05 05:49:34 +00:00
/*
* Remove memblock from the end of usable area to the
* end of region
*/
if ( reg - > base + mem_size < end )
memblock_remove ( reg - > base + mem_size ,
end - reg - > base - mem_size ) ;
2019-02-21 11:25:49 +05:30
}
}
BUG_ON ( mem_size = = 0 ) ;
2019-06-07 06:01:29 +00:00
/* Reserve from the start of the kernel to the end of the kernel */
memblock_reserve ( vmlinux_start , vmlinux_end - vmlinux_start ) ;
2019-02-21 11:25:49 +05:30
set_max_mapnr ( PFN_DOWN ( mem_size ) ) ;
max_low_pfn = PFN_DOWN ( memblock_end_of_DRAM ( ) ) ;
# ifdef CONFIG_BLK_DEV_INITRD
setup_initrd ( ) ;
# endif /* CONFIG_BLK_DEV_INITRD */
early_init_fdt_reserve_self ( ) ;
early_init_fdt_scan_reserved_mem ( ) ;
memblock_allow_resize ( ) ;
memblock_dump_all ( ) ;
for_each_memblock ( memory , reg ) {
unsigned long start_pfn = memblock_region_memory_base_pfn ( reg ) ;
unsigned long end_pfn = memblock_region_memory_end_pfn ( reg ) ;
memblock_set_node ( PFN_PHYS ( start_pfn ) ,
PFN_PHYS ( end_pfn - start_pfn ) ,
& memblock . memory , 0 ) ;
}
}
2019-02-13 16:38:36 +05:30
2019-03-26 08:03:47 +00:00
unsigned long va_pa_offset ;
EXPORT_SYMBOL ( va_pa_offset ) ;
unsigned long pfn_base ;
EXPORT_SYMBOL ( pfn_base ) ;
2019-06-28 13:36:21 -07:00
void * dtb_early_va ;
2019-02-13 16:38:36 +05:30
pgd_t swapper_pg_dir [ PTRS_PER_PGD ] __page_aligned_bss ;
2019-06-28 13:36:21 -07:00
pgd_t trampoline_pg_dir [ PTRS_PER_PGD ] __page_aligned_bss ;
pte_t fixmap_pte [ PTRS_PER_PTE ] __page_aligned_bss ;
static bool mmu_enabled ;
2019-02-13 16:38:36 +05:30
2019-06-28 13:36:21 -07:00
# define MAX_EARLY_MAPPING_SIZE SZ_128M
2019-02-13 16:38:36 +05:30
2019-06-28 13:36:21 -07:00
pgd_t early_pg_dir [ PTRS_PER_PGD ] __initdata __aligned ( PAGE_SIZE ) ;
2019-01-07 20:57:01 +05:30
void __set_fixmap ( enum fixed_addresses idx , phys_addr_t phys , pgprot_t prot )
{
unsigned long addr = __fix_to_virt ( idx ) ;
pte_t * ptep ;
BUG_ON ( idx < = FIX_HOLE | | idx > = __end_of_fixed_addresses ) ;
ptep = & fixmap_pte [ pte_index ( addr ) ] ;
if ( pgprot_val ( prot ) ) {
set_pte ( ptep , pfn_pte ( phys > > PAGE_SHIFT , prot ) ) ;
} else {
pte_clear ( & init_mm , addr , ptep ) ;
local_flush_tlb_page ( addr ) ;
}
}
2019-06-28 13:36:21 -07:00
static pte_t * __init get_pte_virt ( phys_addr_t pa )
{
if ( mmu_enabled ) {
clear_fixmap ( FIX_PTE ) ;
return ( pte_t * ) set_fixmap_offset ( FIX_PTE , pa ) ;
} else {
return ( pte_t * ) ( ( uintptr_t ) pa ) ;
}
}
static phys_addr_t __init alloc_pte ( uintptr_t va )
{
/*
* We only create PMD or PGD early mappings so we
* should never reach here with MMU disabled .
*/
BUG_ON ( ! mmu_enabled ) ;
return memblock_phys_alloc ( PAGE_SIZE , PAGE_SIZE ) ;
}
static void __init create_pte_mapping ( pte_t * ptep ,
uintptr_t va , phys_addr_t pa ,
phys_addr_t sz , pgprot_t prot )
{
uintptr_t pte_index = pte_index ( va ) ;
BUG_ON ( sz ! = PAGE_SIZE ) ;
if ( pte_none ( ptep [ pte_index ] ) )
ptep [ pte_index ] = pfn_pte ( PFN_DOWN ( pa ) , prot ) ;
}
# ifndef __PAGETABLE_PMD_FOLDED
pmd_t trampoline_pmd [ PTRS_PER_PMD ] __page_aligned_bss ;
pmd_t fixmap_pmd [ PTRS_PER_PMD ] __page_aligned_bss ;
# if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
# define NUM_EARLY_PMDS 1UL
# else
# define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
# endif
pmd_t early_pmd [ PTRS_PER_PMD * NUM_EARLY_PMDS ] __initdata __aligned ( PAGE_SIZE ) ;
static pmd_t * __init get_pmd_virt ( phys_addr_t pa )
{
if ( mmu_enabled ) {
clear_fixmap ( FIX_PMD ) ;
return ( pmd_t * ) set_fixmap_offset ( FIX_PMD , pa ) ;
} else {
return ( pmd_t * ) ( ( uintptr_t ) pa ) ;
}
}
static phys_addr_t __init alloc_pmd ( uintptr_t va )
{
uintptr_t pmd_num ;
if ( mmu_enabled )
return memblock_phys_alloc ( PAGE_SIZE , PAGE_SIZE ) ;
pmd_num = ( va - PAGE_OFFSET ) > > PGDIR_SHIFT ;
BUG_ON ( pmd_num > = NUM_EARLY_PMDS ) ;
return ( uintptr_t ) & early_pmd [ pmd_num * PTRS_PER_PMD ] ;
}
static void __init create_pmd_mapping ( pmd_t * pmdp ,
uintptr_t va , phys_addr_t pa ,
phys_addr_t sz , pgprot_t prot )
{
pte_t * ptep ;
phys_addr_t pte_phys ;
uintptr_t pmd_index = pmd_index ( va ) ;
if ( sz = = PMD_SIZE ) {
if ( pmd_none ( pmdp [ pmd_index ] ) )
pmdp [ pmd_index ] = pfn_pmd ( PFN_DOWN ( pa ) , prot ) ;
return ;
}
if ( pmd_none ( pmdp [ pmd_index ] ) ) {
pte_phys = alloc_pte ( va ) ;
pmdp [ pmd_index ] = pfn_pmd ( PFN_DOWN ( pte_phys ) , PAGE_TABLE ) ;
ptep = get_pte_virt ( pte_phys ) ;
memset ( ptep , 0 , PAGE_SIZE ) ;
} else {
pte_phys = PFN_PHYS ( _pmd_pfn ( pmdp [ pmd_index ] ) ) ;
ptep = get_pte_virt ( pte_phys ) ;
}
create_pte_mapping ( ptep , va , pa , sz , prot ) ;
}
# define pgd_next_t pmd_t
# define alloc_pgd_next(__va) alloc_pmd(__va)
# define get_pgd_next_virt(__pa) get_pmd_virt(__pa)
# define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pmd_mapping ( __nextp , __va , __pa , __sz , __prot )
# define PTE_PARENT_SIZE PMD_SIZE
# define fixmap_pgd_next fixmap_pmd
# else
# define pgd_next_t pte_t
# define alloc_pgd_next(__va) alloc_pte(__va)
# define get_pgd_next_virt(__pa) get_pte_virt(__pa)
# define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pte_mapping ( __nextp , __va , __pa , __sz , __prot )
# define PTE_PARENT_SIZE PGDIR_SIZE
# define fixmap_pgd_next fixmap_pte
# endif
static void __init create_pgd_mapping ( pgd_t * pgdp ,
uintptr_t va , phys_addr_t pa ,
phys_addr_t sz , pgprot_t prot )
{
pgd_next_t * nextp ;
phys_addr_t next_phys ;
uintptr_t pgd_index = pgd_index ( va ) ;
if ( sz = = PGDIR_SIZE ) {
if ( pgd_val ( pgdp [ pgd_index ] ) = = 0 )
pgdp [ pgd_index ] = pfn_pgd ( PFN_DOWN ( pa ) , prot ) ;
return ;
}
if ( pgd_val ( pgdp [ pgd_index ] ) = = 0 ) {
next_phys = alloc_pgd_next ( va ) ;
pgdp [ pgd_index ] = pfn_pgd ( PFN_DOWN ( next_phys ) , PAGE_TABLE ) ;
nextp = get_pgd_next_virt ( next_phys ) ;
memset ( nextp , 0 , PAGE_SIZE ) ;
} else {
next_phys = PFN_PHYS ( _pgd_pfn ( pgdp [ pgd_index ] ) ) ;
nextp = get_pgd_next_virt ( next_phys ) ;
}
create_pgd_next_mapping ( nextp , va , pa , sz , prot ) ;
}
static uintptr_t __init best_map_size ( phys_addr_t base , phys_addr_t size )
{
uintptr_t map_size = PAGE_SIZE ;
/* Upgrade to PMD/PGDIR mappings whenever possible */
if ( ! ( base & ( PTE_PARENT_SIZE - 1 ) ) & &
! ( size & ( PTE_PARENT_SIZE - 1 ) ) )
map_size = PTE_PARENT_SIZE ;
return map_size ;
}
2019-03-26 08:03:47 +00:00
/*
* setup_vm ( ) is called from head . S with MMU - off .
*
* Following requirements should be honoured for setup_vm ( ) to work
* correctly :
* 1 ) It should use PC - relative addressing for accessing kernel symbols .
* To achieve this we always use GCC cmodel = medany .
* 2 ) The compiler instrumentation for FTRACE will not work for setup_vm ( )
* so disable compiler instrumentation when FTRACE is enabled .
*
* Currently , the above requirements are honoured by using custom CFLAGS
* for init . o in mm / Makefile .
*/
# ifndef __riscv_cmodel_medany
# error "setup_vm() is called from head.S before relocate so it should "
" not use absolute addressing. "
# endif
2019-06-28 13:36:21 -07:00
asmlinkage void __init setup_vm ( uintptr_t dtb_pa )
2019-02-13 16:38:36 +05:30
{
2019-06-28 13:36:21 -07:00
uintptr_t va , end_va ;
uintptr_t load_pa = ( uintptr_t ) ( & _start ) ;
uintptr_t load_sz = ( uintptr_t ) ( & _end ) - load_pa ;
uintptr_t map_size = best_map_size ( load_pa , MAX_EARLY_MAPPING_SIZE ) ;
va_pa_offset = PAGE_OFFSET - load_pa ;
pfn_base = PFN_DOWN ( load_pa ) ;
2019-02-13 16:38:36 +05:30
2019-06-28 13:36:21 -07:00
/*
* Enforce boot alignment requirements of RV32 and
* RV64 by only allowing PMD or PGD mappings .
*/
BUG_ON ( map_size = = PAGE_SIZE ) ;
2019-02-13 16:38:36 +05:30
/* Sanity check alignment and size */
BUG_ON ( ( PAGE_OFFSET % PGDIR_SIZE ) ! = 0 ) ;
2019-06-28 13:36:21 -07:00
BUG_ON ( ( load_pa % map_size ) ! = 0 ) ;
BUG_ON ( load_sz > MAX_EARLY_MAPPING_SIZE ) ;
/* Setup early PGD for fixmap */
create_pgd_mapping ( early_pg_dir , FIXADDR_START ,
( uintptr_t ) fixmap_pgd_next , PGDIR_SIZE , PAGE_TABLE ) ;
2019-02-13 16:38:36 +05:30
# ifndef __PAGETABLE_PMD_FOLDED
2019-06-28 13:36:21 -07:00
/* Setup fixmap PMD */
create_pmd_mapping ( fixmap_pmd , FIXADDR_START ,
( uintptr_t ) fixmap_pte , PMD_SIZE , PAGE_TABLE ) ;
/* Setup trampoline PGD and PMD */
create_pgd_mapping ( trampoline_pg_dir , PAGE_OFFSET ,
( uintptr_t ) trampoline_pmd , PGDIR_SIZE , PAGE_TABLE ) ;
create_pmd_mapping ( trampoline_pmd , PAGE_OFFSET ,
load_pa , PMD_SIZE , PAGE_KERNEL_EXEC ) ;
# else
/* Setup trampoline PGD */
create_pgd_mapping ( trampoline_pg_dir , PAGE_OFFSET ,
load_pa , PGDIR_SIZE , PAGE_KERNEL_EXEC ) ;
# endif
2019-02-13 16:38:36 +05:30
2019-06-28 13:36:21 -07:00
/*
* Setup early PGD covering entire kernel which will allows
* us to reach paging_init ( ) . We map all memory banks later
* in setup_vm_final ( ) below .
*/
end_va = PAGE_OFFSET + load_sz ;
for ( va = PAGE_OFFSET ; va < end_va ; va + = map_size )
create_pgd_mapping ( early_pg_dir , va ,
load_pa + ( va - PAGE_OFFSET ) ,
map_size , PAGE_KERNEL_EXEC ) ;
/* Create fixed mapping for early FDT parsing */
end_va = __fix_to_virt ( FIX_FDT ) + FIX_FDT_SIZE ;
for ( va = __fix_to_virt ( FIX_FDT ) ; va < end_va ; va + = PAGE_SIZE )
create_pte_mapping ( fixmap_pte , va ,
dtb_pa + ( va - __fix_to_virt ( FIX_FDT ) ) ,
PAGE_SIZE , PAGE_KERNEL ) ;
/* Save pointer to DTB for early FDT parsing */
dtb_early_va = ( void * ) fix_to_virt ( FIX_FDT ) + ( dtb_pa & ~ PAGE_MASK ) ;
}
2019-01-07 20:57:01 +05:30
2019-06-28 13:36:21 -07:00
static void __init setup_vm_final ( void )
{
uintptr_t va , map_size ;
phys_addr_t pa , start , end ;
struct memblock_region * reg ;
/* Set mmu_enabled flag */
mmu_enabled = true ;
2019-02-13 16:38:36 +05:30
2019-06-28 13:36:21 -07:00
/* Setup swapper PGD for fixmap */
create_pgd_mapping ( swapper_pg_dir , FIXADDR_START ,
__pa ( fixmap_pgd_next ) ,
PGDIR_SIZE , PAGE_TABLE ) ;
2019-01-07 20:57:01 +05:30
2019-06-28 13:36:21 -07:00
/* Map all memory banks */
for_each_memblock ( memory , reg ) {
start = reg - > base ;
end = start + reg - > size ;
if ( start > = end )
break ;
if ( memblock_is_nomap ( reg ) )
continue ;
if ( start < = __pa ( PAGE_OFFSET ) & &
__pa ( PAGE_OFFSET ) < end )
start = __pa ( PAGE_OFFSET ) ;
map_size = best_map_size ( start , end - start ) ;
for ( pa = start ; pa < end ; pa + = map_size ) {
va = ( uintptr_t ) __va ( pa ) ;
create_pgd_mapping ( swapper_pg_dir , va , pa ,
map_size , PAGE_KERNEL_EXEC ) ;
}
2019-02-13 16:38:36 +05:30
}
2019-01-07 20:57:01 +05:30
2019-06-28 13:36:21 -07:00
/* Clear fixmap PTE and PMD mappings */
clear_fixmap ( FIX_PTE ) ;
clear_fixmap ( FIX_PMD ) ;
/* Move to swapper page table */
2019-08-07 09:13:38 -07:00
csr_write ( CSR_SATP , PFN_DOWN ( __pa ( swapper_pg_dir ) ) | SATP_MODE ) ;
2019-06-28 13:36:21 -07:00
local_flush_tlb_all ( ) ;
}
void __init paging_init ( void )
{
setup_vm_final ( ) ;
setup_zero_page ( ) ;
zone_sizes_init ( ) ;
2019-02-13 16:38:36 +05:30
}