2022-12-13 13:35:11 +03:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/sched/task.h>
# include <linux/pgtable.h>
2023-02-10 00:05:11 +03:00
# include <linux/kasan.h>
2022-12-13 13:35:11 +03:00
# include <asm/pgalloc.h>
# include <asm/facility.h>
# include <asm/sections.h>
2023-02-08 20:11:25 +03:00
# include <asm/physmem_info.h>
2022-12-11 10:18:57 +03:00
# include <asm/maccess.h>
2022-12-19 23:08:27 +03:00
# include <asm/abs_lowcore.h>
2022-12-13 13:35:11 +03:00
# include "decompressor.h"
# include "boot.h"
2023-02-02 15:59:36 +03:00
unsigned long __bootdata_preserved ( s390_invalid_asce ) ;
2023-04-06 14:31:29 +03:00
# ifdef CONFIG_PROC_FS
atomic_long_t __bootdata_preserved ( direct_pages_count [ PG_DIRECT_MAP_MAX ] ) ;
# endif
2022-12-13 13:35:11 +03:00
# define init_mm (*(struct mm_struct *)vmlinux.init_mm_off)
# define swapper_pg_dir vmlinux.swapper_pg_dir_off
# define invalid_pg_dir vmlinux.invalid_pg_dir_off
2023-02-10 00:05:11 +03:00
enum populate_mode {
POPULATE_NONE ,
2023-04-06 14:31:28 +03:00
POPULATE_DIRECT ,
2023-02-10 00:05:11 +03:00
POPULATE_ABS_LOWCORE ,
# ifdef CONFIG_KASAN
POPULATE_KASAN_MAP_SHADOW ,
POPULATE_KASAN_ZERO_SHADOW ,
POPULATE_KASAN_SHALLOW
# endif
} ;
static void pgtable_populate ( unsigned long addr , unsigned long end , enum populate_mode mode ) ;
# ifdef CONFIG_KASAN
# define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
# define kasan_early_shadow_pte ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
# define kasan_early_shadow_pmd ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
# define kasan_early_shadow_pud ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
# define kasan_early_shadow_p4d ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
# define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
static pte_t pte_z ;
2023-05-26 15:30:30 +03:00
static inline void kasan_populate ( unsigned long start , unsigned long end , enum populate_mode mode )
{
start = PAGE_ALIGN_DOWN ( __sha ( start ) ) ;
end = PAGE_ALIGN ( __sha ( end ) ) ;
pgtable_populate ( start , end , mode ) ;
}
2023-02-10 00:05:11 +03:00
static void kasan_populate_shadow ( void )
{
pmd_t pmd_z = __pmd ( __pa ( kasan_early_shadow_pte ) | _SEGMENT_ENTRY ) ;
pud_t pud_z = __pud ( __pa ( kasan_early_shadow_pmd ) | _REGION3_ENTRY ) ;
p4d_t p4d_z = __p4d ( __pa ( kasan_early_shadow_pud ) | _REGION2_ENTRY ) ;
unsigned long untracked_end ;
unsigned long start , end ;
int i ;
pte_z = __pte ( __pa ( kasan_early_shadow_page ) | pgprot_val ( PAGE_KERNEL_RO ) ) ;
if ( ! machine . has_nx )
pte_z = clear_pte_bit ( pte_z , __pgprot ( _PAGE_NOEXEC ) ) ;
crst_table_init ( ( unsigned long * ) kasan_early_shadow_p4d , p4d_val ( p4d_z ) ) ;
crst_table_init ( ( unsigned long * ) kasan_early_shadow_pud , pud_val ( pud_z ) ) ;
crst_table_init ( ( unsigned long * ) kasan_early_shadow_pmd , pmd_val ( pmd_z ) ) ;
memset64 ( ( u64 * ) kasan_early_shadow_pte , pte_val ( pte_z ) , PTRS_PER_PTE ) ;
/*
* Current memory layout :
* + - 0 - - - - - - - - - - - - - + + - shadow start - +
* | 1 : 1 ident mapping | / | 1 / 8 of ident map |
* | | / | |
* + - end of ident map + / + - - - - - - - - - - - - - - - - +
* | . . . gap . . . | / | kasan |
* | | / | zero page |
* + - vmalloc area - + / | mapping |
* | vmalloc_size | / | ( untracked ) |
* + - modules vaddr - + / + - - - - - - - - - - - - - - - - +
* | 2 Gb | / | unmapped | allocated per module
* + - shadow start - + + - - - - - - - - - - - - - - - - +
* | 1 / 8 addr space | | zero pg mapping | ( untracked )
* + - shadow end - - - - + - - - - - - - - - + - shadow end - - - +
*
* Current memory layout ( KASAN_VMALLOC ) :
* + - 0 - - - - - - - - - - - - - + + - shadow start - +
* | 1 : 1 ident mapping | / | 1 / 8 of ident map |
* | | / | |
* + - end of ident map + / + - - - - - - - - - - - - - - - - +
* | . . . gap . . . | / | kasan zero page | ( untracked )
* | | / | mapping |
* + - vmalloc area - + / + - - - - - - - - - - - - - - - - +
* | vmalloc_size | / | shallow populate |
* + - modules vaddr - + / + - - - - - - - - - - - - - - - - +
* | 2 Gb | / | shallow populate |
* + - shadow start - + + - - - - - - - - - - - - - - - - +
* | 1 / 8 addr space | | zero pg mapping | ( untracked )
* + - shadow end - - - - + - - - - - - - - - + - shadow end - - - +
*/
for_each_physmem_usable_range ( i , & start , & end )
2023-05-26 15:30:30 +03:00
kasan_populate ( start , end , POPULATE_KASAN_MAP_SHADOW ) ;
2023-02-10 00:05:11 +03:00
if ( IS_ENABLED ( CONFIG_KASAN_VMALLOC ) ) {
untracked_end = VMALLOC_START ;
/* shallowly populate kasan shadow for vmalloc and modules */
2023-05-26 15:30:30 +03:00
kasan_populate ( VMALLOC_START , MODULES_END , POPULATE_KASAN_SHALLOW ) ;
2023-02-10 00:05:11 +03:00
} else {
untracked_end = MODULES_VADDR ;
}
/* populate kasan shadow for untracked memory */
2023-05-26 15:30:30 +03:00
kasan_populate ( ident_map_size , untracked_end , POPULATE_KASAN_ZERO_SHADOW ) ;
kasan_populate ( MODULES_END , _REGION1_SIZE , POPULATE_KASAN_ZERO_SHADOW ) ;
2023-02-10 00:05:11 +03:00
}
static bool kasan_pgd_populate_zero_shadow ( pgd_t * pgd , unsigned long addr ,
unsigned long end , enum populate_mode mode )
{
if ( mode = = POPULATE_KASAN_ZERO_SHADOW & &
IS_ALIGNED ( addr , PGDIR_SIZE ) & & end - addr > = PGDIR_SIZE ) {
pgd_populate ( & init_mm , pgd , kasan_early_shadow_p4d ) ;
return true ;
}
return false ;
}
static bool kasan_p4d_populate_zero_shadow ( p4d_t * p4d , unsigned long addr ,
unsigned long end , enum populate_mode mode )
{
if ( mode = = POPULATE_KASAN_ZERO_SHADOW & &
IS_ALIGNED ( addr , P4D_SIZE ) & & end - addr > = P4D_SIZE ) {
p4d_populate ( & init_mm , p4d , kasan_early_shadow_pud ) ;
return true ;
}
return false ;
}
static bool kasan_pud_populate_zero_shadow ( pud_t * pud , unsigned long addr ,
unsigned long end , enum populate_mode mode )
{
if ( mode = = POPULATE_KASAN_ZERO_SHADOW & &
IS_ALIGNED ( addr , PUD_SIZE ) & & end - addr > = PUD_SIZE ) {
pud_populate ( & init_mm , pud , kasan_early_shadow_pmd ) ;
return true ;
}
return false ;
}
static bool kasan_pmd_populate_zero_shadow ( pmd_t * pmd , unsigned long addr ,
unsigned long end , enum populate_mode mode )
{
if ( mode = = POPULATE_KASAN_ZERO_SHADOW & &
IS_ALIGNED ( addr , PMD_SIZE ) & & end - addr > = PMD_SIZE ) {
pmd_populate ( & init_mm , pmd , kasan_early_shadow_pte ) ;
return true ;
}
return false ;
}
static bool kasan_pte_populate_zero_shadow ( pte_t * pte , enum populate_mode mode )
{
pte_t entry ;
if ( mode = = POPULATE_KASAN_ZERO_SHADOW ) {
set_pte ( pte , pte_z ) ;
return true ;
}
return false ;
}
# else
static inline void kasan_populate_shadow ( void ) { }
static inline bool kasan_pgd_populate_zero_shadow ( pgd_t * pgd , unsigned long addr ,
unsigned long end , enum populate_mode mode )
{
return false ;
}
static inline bool kasan_p4d_populate_zero_shadow ( p4d_t * p4d , unsigned long addr ,
unsigned long end , enum populate_mode mode )
{
return false ;
}
static inline bool kasan_pud_populate_zero_shadow ( pud_t * pud , unsigned long addr ,
unsigned long end , enum populate_mode mode )
{
return false ;
}
static inline bool kasan_pmd_populate_zero_shadow ( pmd_t * pmd , unsigned long addr ,
unsigned long end , enum populate_mode mode )
{
return false ;
}
static bool kasan_pte_populate_zero_shadow ( pte_t * pte , enum populate_mode mode )
{
return false ;
}
# endif
2022-12-11 10:18:57 +03:00
/*
* Mimic virt_to_kpte ( ) in lack of init_mm symbol . Skip pmd NULL check though .
*/
static inline pte_t * __virt_to_kpte ( unsigned long va )
{
return pte_offset_kernel ( pmd_offset ( pud_offset ( p4d_offset ( pgd_offset_k ( va ) , va ) , va ) , va ) , va ) ;
}
2022-12-13 13:35:11 +03:00
static void * boot_crst_alloc ( unsigned long val )
{
2023-02-02 15:59:36 +03:00
unsigned long size = PAGE_SIZE < < CRST_ALLOC_ORDER ;
2022-12-13 13:35:11 +03:00
unsigned long * table ;
2023-02-02 15:59:36 +03:00
table = ( unsigned long * ) physmem_alloc_top_down ( RR_VMEM , size , size ) ;
crst_table_init ( table , val ) ;
2022-12-13 13:35:11 +03:00
return table ;
}
static pte_t * boot_pte_alloc ( void )
{
2023-02-10 00:05:11 +03:00
static void * pte_leftover ;
2022-12-13 13:35:11 +03:00
pte_t * pte ;
2023-02-10 00:05:11 +03:00
/*
* handling pte_leftovers this way helps to avoid memory fragmentation
* during POPULATE_KASAN_MAP_SHADOW when EDAT is off
*/
if ( ! pte_leftover ) {
pte_leftover = ( void * ) physmem_alloc_top_down ( RR_VMEM , PAGE_SIZE , PAGE_SIZE ) ;
pte = pte_leftover + _PAGE_TABLE_SIZE ;
} else {
pte = pte_leftover ;
pte_leftover = NULL ;
}
2022-12-13 13:35:11 +03:00
memset64 ( ( u64 * ) pte , _PAGE_INVALID , PTRS_PER_PTE ) ;
return pte ;
}
2023-02-10 00:05:11 +03:00
static unsigned long _pa ( unsigned long addr , unsigned long size , enum populate_mode mode )
2022-12-15 12:33:52 +03:00
{
switch ( mode ) {
2022-12-11 10:18:57 +03:00
case POPULATE_NONE :
return - 1 ;
2023-04-06 14:31:28 +03:00
case POPULATE_DIRECT :
2022-12-15 12:33:52 +03:00
return addr ;
2022-12-19 23:08:27 +03:00
case POPULATE_ABS_LOWCORE :
return __abs_lowcore_pa ( addr ) ;
2023-02-10 00:05:11 +03:00
# ifdef CONFIG_KASAN
case POPULATE_KASAN_MAP_SHADOW :
addr = physmem_alloc_top_down ( RR_VMEM , size , size ) ;
memset ( ( void * ) addr , 0 , size ) ;
return addr ;
# endif
2022-12-15 12:33:52 +03:00
default :
return - 1 ;
}
}
2022-12-13 13:35:11 +03:00
static bool can_large_pud ( pud_t * pu_dir , unsigned long addr , unsigned long end )
{
return machine . has_edat2 & &
IS_ALIGNED ( addr , PUD_SIZE ) & & ( end - addr ) > = PUD_SIZE ;
}
static bool can_large_pmd ( pmd_t * pm_dir , unsigned long addr , unsigned long end )
{
return machine . has_edat1 & &
IS_ALIGNED ( addr , PMD_SIZE ) & & ( end - addr ) > = PMD_SIZE ;
}
2022-12-15 12:33:52 +03:00
static void pgtable_pte_populate ( pmd_t * pmd , unsigned long addr , unsigned long end ,
enum populate_mode mode )
2022-12-13 13:35:11 +03:00
{
2023-04-06 14:31:29 +03:00
unsigned long pages = 0 ;
2022-12-13 13:35:11 +03:00
pte_t * pte , entry ;
pte = pte_offset_kernel ( pmd , addr ) ;
for ( ; addr < end ; addr + = PAGE_SIZE , pte + + ) {
if ( pte_none ( * pte ) ) {
2023-02-10 00:05:11 +03:00
if ( kasan_pte_populate_zero_shadow ( pte , mode ) )
continue ;
entry = __pte ( _pa ( addr , PAGE_SIZE , mode ) ) ;
2023-08-25 15:29:48 +03:00
entry = set_pte_bit ( entry , PAGE_KERNEL ) ;
if ( ! machine . has_nx )
entry = clear_pte_bit ( entry , __pgprot ( _PAGE_NOEXEC ) ) ;
2022-12-13 13:35:11 +03:00
set_pte ( pte , entry ) ;
2023-04-06 14:31:29 +03:00
pages + + ;
2022-12-13 13:35:11 +03:00
}
}
2023-04-06 14:31:29 +03:00
if ( mode = = POPULATE_DIRECT )
update_page_count ( PG_DIRECT_MAP_4K , pages ) ;
2022-12-13 13:35:11 +03:00
}
2022-12-15 12:33:52 +03:00
static void pgtable_pmd_populate ( pud_t * pud , unsigned long addr , unsigned long end ,
enum populate_mode mode )
2022-12-13 13:35:11 +03:00
{
2023-04-06 14:31:29 +03:00
unsigned long next , pages = 0 ;
2022-12-13 13:35:11 +03:00
pmd_t * pmd , entry ;
pte_t * pte ;
pmd = pmd_offset ( pud , addr ) ;
for ( ; addr < end ; addr = next , pmd + + ) {
next = pmd_addr_end ( addr , end ) ;
if ( pmd_none ( * pmd ) ) {
2023-02-10 00:05:11 +03:00
if ( kasan_pmd_populate_zero_shadow ( pmd , addr , next , mode ) )
continue ;
2022-12-13 13:35:11 +03:00
if ( can_large_pmd ( pmd , addr , next ) ) {
2023-02-10 00:05:11 +03:00
entry = __pmd ( _pa ( addr , _SEGMENT_SIZE , mode ) ) ;
2023-08-25 15:29:48 +03:00
entry = set_pmd_bit ( entry , SEGMENT_KERNEL ) ;
if ( ! machine . has_nx )
entry = clear_pmd_bit ( entry , __pgprot ( _SEGMENT_ENTRY_NOEXEC ) ) ;
2022-12-13 13:35:11 +03:00
set_pmd ( pmd , entry ) ;
2023-04-06 14:31:29 +03:00
pages + + ;
2022-12-13 13:35:11 +03:00
continue ;
}
pte = boot_pte_alloc ( ) ;
pmd_populate ( & init_mm , pmd , pte ) ;
} else if ( pmd_large ( * pmd ) ) {
continue ;
}
2022-12-15 12:33:52 +03:00
pgtable_pte_populate ( pmd , addr , next , mode ) ;
2022-12-13 13:35:11 +03:00
}
2023-04-06 14:31:29 +03:00
if ( mode = = POPULATE_DIRECT )
update_page_count ( PG_DIRECT_MAP_1M , pages ) ;
2022-12-13 13:35:11 +03:00
}
2022-12-15 12:33:52 +03:00
static void pgtable_pud_populate ( p4d_t * p4d , unsigned long addr , unsigned long end ,
enum populate_mode mode )
2022-12-13 13:35:11 +03:00
{
2023-04-06 14:31:29 +03:00
unsigned long next , pages = 0 ;
2022-12-13 13:35:11 +03:00
pud_t * pud , entry ;
pmd_t * pmd ;
pud = pud_offset ( p4d , addr ) ;
for ( ; addr < end ; addr = next , pud + + ) {
next = pud_addr_end ( addr , end ) ;
if ( pud_none ( * pud ) ) {
2023-02-10 00:05:11 +03:00
if ( kasan_pud_populate_zero_shadow ( pud , addr , next , mode ) )
continue ;
2022-12-13 13:35:11 +03:00
if ( can_large_pud ( pud , addr , next ) ) {
2023-02-10 00:05:11 +03:00
entry = __pud ( _pa ( addr , _REGION3_SIZE , mode ) ) ;
2023-08-25 15:29:48 +03:00
entry = set_pud_bit ( entry , REGION3_KERNEL ) ;
if ( ! machine . has_nx )
entry = clear_pud_bit ( entry , __pgprot ( _REGION_ENTRY_NOEXEC ) ) ;
2022-12-13 13:35:11 +03:00
set_pud ( pud , entry ) ;
2023-04-06 14:31:29 +03:00
pages + + ;
2022-12-13 13:35:11 +03:00
continue ;
}
pmd = boot_crst_alloc ( _SEGMENT_ENTRY_EMPTY ) ;
pud_populate ( & init_mm , pud , pmd ) ;
} else if ( pud_large ( * pud ) ) {
continue ;
}
2022-12-15 12:33:52 +03:00
pgtable_pmd_populate ( pud , addr , next , mode ) ;
2022-12-13 13:35:11 +03:00
}
2023-04-06 14:31:29 +03:00
if ( mode = = POPULATE_DIRECT )
update_page_count ( PG_DIRECT_MAP_2G , pages ) ;
2022-12-13 13:35:11 +03:00
}
2022-12-15 12:33:52 +03:00
static void pgtable_p4d_populate ( pgd_t * pgd , unsigned long addr , unsigned long end ,
enum populate_mode mode )
2022-12-13 13:35:11 +03:00
{
unsigned long next ;
p4d_t * p4d ;
pud_t * pud ;
p4d = p4d_offset ( pgd , addr ) ;
for ( ; addr < end ; addr = next , p4d + + ) {
next = p4d_addr_end ( addr , end ) ;
if ( p4d_none ( * p4d ) ) {
2023-02-10 00:05:11 +03:00
if ( kasan_p4d_populate_zero_shadow ( p4d , addr , next , mode ) )
continue ;
2022-12-13 13:35:11 +03:00
pud = boot_crst_alloc ( _REGION3_ENTRY_EMPTY ) ;
p4d_populate ( & init_mm , p4d , pud ) ;
}
2022-12-15 12:33:52 +03:00
pgtable_pud_populate ( p4d , addr , next , mode ) ;
2022-12-13 13:35:11 +03:00
}
}
2022-12-15 12:33:52 +03:00
static void pgtable_populate ( unsigned long addr , unsigned long end , enum populate_mode mode )
2022-12-13 13:35:11 +03:00
{
unsigned long next ;
pgd_t * pgd ;
p4d_t * p4d ;
pgd = pgd_offset ( & init_mm , addr ) ;
for ( ; addr < end ; addr = next , pgd + + ) {
next = pgd_addr_end ( addr , end ) ;
if ( pgd_none ( * pgd ) ) {
2023-02-10 00:05:11 +03:00
if ( kasan_pgd_populate_zero_shadow ( pgd , addr , next , mode ) )
continue ;
2022-12-13 13:35:11 +03:00
p4d = boot_crst_alloc ( _REGION2_ENTRY_EMPTY ) ;
pgd_populate ( & init_mm , pgd , p4d ) ;
}
2023-02-10 00:05:11 +03:00
# ifdef CONFIG_KASAN
if ( mode = = POPULATE_KASAN_SHALLOW )
continue ;
# endif
2022-12-15 12:33:52 +03:00
pgtable_p4d_populate ( pgd , addr , next , mode ) ;
2022-12-13 13:35:11 +03:00
}
}
2023-01-29 01:55:04 +03:00
void setup_vmem ( unsigned long asce_limit )
2022-12-13 13:35:11 +03:00
{
2023-01-23 17:24:17 +03:00
unsigned long start , end ;
2022-12-13 13:35:11 +03:00
unsigned long asce_type ;
unsigned long asce_bits ;
2023-01-23 17:24:17 +03:00
int i ;
2022-12-13 13:35:11 +03:00
if ( asce_limit = = _REGION1_SIZE ) {
asce_type = _REGION2_ENTRY_EMPTY ;
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH ;
} else {
asce_type = _REGION3_ENTRY_EMPTY ;
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH ;
}
s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH ;
crst_table_init ( ( unsigned long * ) swapper_pg_dir , asce_type ) ;
crst_table_init ( ( unsigned long * ) invalid_pg_dir , _REGION3_ENTRY_EMPTY ) ;
/*
* To allow prefixing the lowcore must be mapped with 4 KB pages .
* To prevent creation of a large page at address 0 first map
* the lowcore and create the identity mapping only afterwards .
*/
2023-04-06 14:31:28 +03:00
pgtable_populate ( 0 , sizeof ( struct lowcore ) , POPULATE_DIRECT ) ;
2023-02-08 20:11:25 +03:00
for_each_physmem_usable_range ( i , & start , & end )
2023-04-06 14:31:28 +03:00
pgtable_populate ( start , end , POPULATE_DIRECT ) ;
2022-12-19 23:08:27 +03:00
pgtable_populate ( __abs_lowcore , __abs_lowcore + sizeof ( struct lowcore ) ,
POPULATE_ABS_LOWCORE ) ;
2022-12-11 10:18:57 +03:00
pgtable_populate ( __memcpy_real_area , __memcpy_real_area + PAGE_SIZE ,
POPULATE_NONE ) ;
memcpy_real_ptep = __virt_to_kpte ( __memcpy_real_area ) ;
2022-12-13 13:35:11 +03:00
2023-02-10 00:05:11 +03:00
kasan_populate_shadow ( ) ;
2022-12-13 13:35:11 +03:00
S390_lowcore . kernel_asce = swapper_pg_dir | asce_bits ;
S390_lowcore . user_asce = s390_invalid_asce ;
__ctl_load ( S390_lowcore . kernel_asce , 1 , 1 ) ;
__ctl_load ( S390_lowcore . user_asce , 7 , 7 ) ;
__ctl_load ( S390_lowcore . kernel_asce , 13 , 13 ) ;
init_mm . context . asce = S390_lowcore . kernel_asce ;
}