2017-11-17 14:29:13 +01:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/kasan.h>
# include <linux/sched/task.h>
# include <linux/memblock.h>
2020-06-08 21:32:38 -07:00
# include <linux/pgtable.h>
2020-06-08 21:32:42 -07:00
# include <asm/pgalloc.h>
2017-11-17 14:29:13 +01:00
# include <asm/kasan.h>
2018-09-13 10:59:25 +02:00
# include <asm/mem_detect.h>
2017-11-17 14:29:13 +01:00
# include <asm/processor.h>
# include <asm/sclp.h>
2017-11-17 18:44:28 +01:00
# include <asm/facility.h>
2017-11-17 14:29:13 +01:00
# include <asm/sections.h>
# include <asm/setup.h>
2020-09-11 11:44:47 +02:00
# include <asm/uv.h>
2017-11-17 14:29:13 +01:00
2017-11-17 18:44:28 +01:00
static unsigned long segment_pos __initdata ;
static unsigned long segment_low __initdata ;
2017-11-17 14:29:13 +01:00
static unsigned long pgalloc_pos __initdata ;
static unsigned long pgalloc_low __initdata ;
2017-11-20 12:56:10 +01:00
static unsigned long pgalloc_freeable __initdata ;
2017-11-17 18:44:28 +01:00
static bool has_edat __initdata ;
static bool has_nx __initdata ;
2017-11-17 14:29:13 +01:00
# define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
static pgd_t early_pg_dir [ PTRS_PER_PGD ] __initdata __aligned ( PAGE_SIZE ) ;
static void __init kasan_early_panic ( const char * reason )
{
sclp_early_printk ( " The Linux kernel failed to boot with the KernelAddressSanitizer: \n " ) ;
sclp_early_printk ( reason ) ;
2019-05-08 13:36:06 +02:00
disabled_wait ( ) ;
2017-11-17 14:29:13 +01:00
}
2017-11-17 18:44:28 +01:00
static void * __init kasan_early_alloc_segment ( void )
{
segment_pos - = _SEGMENT_SIZE ;
if ( segment_pos < segment_low )
kasan_early_panic ( " out of memory during initialisation \n " ) ;
return ( void * ) segment_pos ;
}
2017-11-17 14:29:13 +01:00
static void * __init kasan_early_alloc_pages ( unsigned int order )
{
pgalloc_pos - = ( PAGE_SIZE < < order ) ;
if ( pgalloc_pos < pgalloc_low )
kasan_early_panic ( " out of memory during initialisation \n " ) ;
return ( void * ) pgalloc_pos ;
}
static void * __init kasan_early_crst_alloc ( unsigned long val )
{
unsigned long * table ;
table = kasan_early_alloc_pages ( CRST_ALLOC_ORDER ) ;
if ( table )
crst_table_init ( table , val ) ;
return table ;
}
static pte_t * __init kasan_early_pte_alloc ( void )
{
static void * pte_leftover ;
pte_t * pte ;
BUILD_BUG_ON ( _PAGE_TABLE_SIZE * 2 ! = PAGE_SIZE ) ;
if ( ! pte_leftover ) {
pte_leftover = kasan_early_alloc_pages ( 0 ) ;
pte = pte_leftover + _PAGE_TABLE_SIZE ;
} else {
pte = pte_leftover ;
pte_leftover = NULL ;
}
memset64 ( ( u64 * ) pte , _PAGE_INVALID , PTRS_PER_PTE ) ;
return pte ;
}
enum populate_mode {
POPULATE_ONE2ONE ,
POPULATE_MAP ,
2019-08-02 12:42:59 +02:00
POPULATE_ZERO_SHADOW ,
POPULATE_SHALLOW
2017-11-17 14:29:13 +01:00
} ;
2020-10-05 09:13:15 +02:00
static void __init kasan_early_pgtable_populate ( unsigned long address ,
2017-11-17 14:29:13 +01:00
unsigned long end ,
enum populate_mode mode )
{
2017-11-17 18:44:28 +01:00
unsigned long pgt_prot_zero , pgt_prot , sgt_prot ;
2017-11-17 14:29:13 +01:00
pgd_t * pg_dir ;
p4d_t * p4_dir ;
pud_t * pu_dir ;
pmd_t * pm_dir ;
pte_t * pt_dir ;
pgt_prot_zero = pgprot_val ( PAGE_KERNEL_RO ) ;
2017-11-17 18:44:28 +01:00
if ( ! has_nx )
pgt_prot_zero & = ~ _PAGE_NOEXEC ;
2020-09-10 22:25:13 +02:00
pgt_prot = pgprot_val ( PAGE_KERNEL ) ;
sgt_prot = pgprot_val ( SEGMENT_KERNEL ) ;
if ( ! has_nx | | mode = = POPULATE_ONE2ONE ) {
pgt_prot & = ~ _PAGE_NOEXEC ;
sgt_prot & = ~ _SEGMENT_ENTRY_NOEXEC ;
}
2017-11-17 14:29:13 +01:00
2021-08-06 12:55:08 +02:00
/*
* The first 1 MB of 1 : 1 mapping is mapped with 4 KB pages
*/
2017-11-17 14:29:13 +01:00
while ( address < end ) {
pg_dir = pgd_offset_k ( address ) ;
if ( pgd_none ( * pg_dir ) ) {
if ( mode = = POPULATE_ZERO_SHADOW & &
IS_ALIGNED ( address , PGDIR_SIZE ) & &
end - address > = PGDIR_SIZE ) {
2018-12-28 00:30:01 -08:00
pgd_populate ( & init_mm , pg_dir ,
kasan_early_shadow_p4d ) ;
2017-11-17 14:29:13 +01:00
address = ( address + PGDIR_SIZE ) & PGDIR_MASK ;
continue ;
}
p4_dir = kasan_early_crst_alloc ( _REGION2_ENTRY_EMPTY ) ;
pgd_populate ( & init_mm , pg_dir , p4_dir ) ;
}
2020-10-15 10:01:42 +02:00
if ( mode = = POPULATE_SHALLOW ) {
2019-08-02 12:42:59 +02:00
address = ( address + P4D_SIZE ) & P4D_MASK ;
continue ;
}
2017-11-17 14:29:13 +01:00
p4_dir = p4d_offset ( pg_dir , address ) ;
if ( p4d_none ( * p4_dir ) ) {
if ( mode = = POPULATE_ZERO_SHADOW & &
IS_ALIGNED ( address , P4D_SIZE ) & &
end - address > = P4D_SIZE ) {
2018-12-28 00:30:01 -08:00
p4d_populate ( & init_mm , p4_dir ,
kasan_early_shadow_pud ) ;
2017-11-17 14:29:13 +01:00
address = ( address + P4D_SIZE ) & P4D_MASK ;
continue ;
}
pu_dir = kasan_early_crst_alloc ( _REGION3_ENTRY_EMPTY ) ;
p4d_populate ( & init_mm , p4_dir , pu_dir ) ;
}
pu_dir = pud_offset ( p4_dir , address ) ;
if ( pud_none ( * pu_dir ) ) {
if ( mode = = POPULATE_ZERO_SHADOW & &
IS_ALIGNED ( address , PUD_SIZE ) & &
end - address > = PUD_SIZE ) {
2018-12-28 00:30:01 -08:00
pud_populate ( & init_mm , pu_dir ,
kasan_early_shadow_pmd ) ;
2017-11-17 14:29:13 +01:00
address = ( address + PUD_SIZE ) & PUD_MASK ;
continue ;
}
pm_dir = kasan_early_crst_alloc ( _SEGMENT_ENTRY_EMPTY ) ;
pud_populate ( & init_mm , pu_dir , pm_dir ) ;
}
pm_dir = pmd_offset ( pu_dir , address ) ;
if ( pmd_none ( * pm_dir ) ) {
2021-08-06 12:55:08 +02:00
if ( IS_ALIGNED ( address , PMD_SIZE ) & &
2017-11-17 14:29:13 +01:00
end - address > = PMD_SIZE ) {
2021-08-06 12:55:08 +02:00
if ( mode = = POPULATE_ZERO_SHADOW ) {
pmd_populate ( & init_mm , pm_dir , kasan_early_shadow_pte ) ;
address = ( address + PMD_SIZE ) & PMD_MASK ;
continue ;
} else if ( has_edat & & address ) {
void * page ;
if ( mode = = POPULATE_ONE2ONE ) {
page = ( void * ) address ;
} else {
page = kasan_early_alloc_segment ( ) ;
memset ( page , 0 , _SEGMENT_SIZE ) ;
}
pmd_val ( * pm_dir ) = __pa ( page ) | sgt_prot ;
address = ( address + PMD_SIZE ) & PMD_MASK ;
continue ;
2017-11-17 18:44:28 +01:00
}
}
2017-11-17 14:29:13 +01:00
pt_dir = kasan_early_pte_alloc ( ) ;
pmd_populate ( & init_mm , pm_dir , pt_dir ) ;
2017-11-17 18:44:28 +01:00
} else if ( pmd_large ( * pm_dir ) ) {
address = ( address + PMD_SIZE ) & PMD_MASK ;
continue ;
2017-11-17 14:29:13 +01:00
}
pt_dir = pte_offset_kernel ( pm_dir , address ) ;
if ( pte_none ( * pt_dir ) ) {
void * page ;
switch ( mode ) {
case POPULATE_ONE2ONE :
page = ( void * ) address ;
pte_val ( * pt_dir ) = __pa ( page ) | pgt_prot ;
break ;
case POPULATE_MAP :
page = kasan_early_alloc_pages ( 0 ) ;
memset ( page , 0 , PAGE_SIZE ) ;
pte_val ( * pt_dir ) = __pa ( page ) | pgt_prot ;
break ;
case POPULATE_ZERO_SHADOW :
2018-12-28 00:30:01 -08:00
page = kasan_early_shadow_page ;
2017-11-17 14:29:13 +01:00
pte_val ( * pt_dir ) = __pa ( page ) | pgt_prot_zero ;
break ;
2019-08-02 12:42:59 +02:00
case POPULATE_SHALLOW :
/* should never happen */
break ;
2017-11-17 14:29:13 +01:00
}
}
address + = PAGE_SIZE ;
}
}
static void __init kasan_set_pgd ( pgd_t * pgd , unsigned long asce_type )
{
unsigned long asce_bits ;
asce_bits = asce_type | _ASCE_TABLE_LENGTH ;
S390_lowcore . kernel_asce = ( __pa ( pgd ) & PAGE_MASK ) | asce_bits ;
S390_lowcore . user_asce = S390_lowcore . kernel_asce ;
__ctl_load ( S390_lowcore . kernel_asce , 1 , 1 ) ;
__ctl_load ( S390_lowcore . kernel_asce , 7 , 7 ) ;
__ctl_load ( S390_lowcore . kernel_asce , 13 , 13 ) ;
}
static void __init kasan_enable_dat ( void )
{
psw_t psw ;
psw . mask = __extract_psw ( ) ;
psw_bits ( psw ) . dat = 1 ;
psw_bits ( psw ) . as = PSW_BITS_AS_HOME ;
__load_psw_mask ( psw . mask ) ;
}
2017-11-17 18:44:28 +01:00
static void __init kasan_early_detect_facilities ( void )
{
if ( test_facility ( 8 ) ) {
has_edat = true ;
__ctl_set_bit ( 0 , 23 ) ;
}
if ( ! noexec_disabled & & test_facility ( 130 ) ) {
has_nx = true ;
__ctl_set_bit ( 0 , 20 ) ;
}
}
2017-11-17 14:29:13 +01:00
void __init kasan_early_init ( void )
{
unsigned long shadow_alloc_size ;
unsigned long initrd_end ;
unsigned long memsize ;
unsigned long pgt_prot = pgprot_val ( PAGE_KERNEL_RO ) ;
pte_t pte_z ;
2018-12-28 00:30:01 -08:00
pmd_t pmd_z = __pmd ( __pa ( kasan_early_shadow_pte ) | _SEGMENT_ENTRY ) ;
pud_t pud_z = __pud ( __pa ( kasan_early_shadow_pmd ) | _REGION3_ENTRY ) ;
p4d_t p4d_z = __p4d ( __pa ( kasan_early_shadow_pud ) | _REGION2_ENTRY ) ;
2017-11-17 14:29:13 +01:00
2017-11-17 18:44:28 +01:00
kasan_early_detect_facilities ( ) ;
if ( ! has_nx )
pgt_prot & = ~ _PAGE_NOEXEC ;
2018-12-28 00:30:01 -08:00
pte_z = __pte ( __pa ( kasan_early_shadow_page ) | pgt_prot ) ;
2017-11-17 14:29:13 +01:00
2018-09-13 10:59:25 +02:00
memsize = get_mem_detect_end ( ) ;
if ( ! memsize )
kasan_early_panic ( " cannot detect physical memory size \n " ) ;
2020-10-19 11:01:33 +02:00
/*
* Kasan currently supports standby memory but only if it follows
* online memory ( default allocation ) , i . e . no memory holes .
* - memsize represents end of online memory
* - ident_map_size represents online + standby and memory limits
* accounted .
* Kasan maps " memsize " right away .
* [ 0 , memsize ] - as identity mapping
* [ __sha ( 0 ) , __sha ( memsize ) ] - shadow memory for identity mapping
* The rest [ memsize , ident_map_size ] if memsize < ident_map_size
* could be mapped / unmapped dynamically later during memory hotplug .
*/
memsize = min ( memsize , ident_map_size ) ;
2018-09-13 10:59:25 +02:00
2020-10-15 10:01:42 +02:00
BUILD_BUG_ON ( ! IS_ALIGNED ( KASAN_SHADOW_START , P4D_SIZE ) ) ;
BUILD_BUG_ON ( ! IS_ALIGNED ( KASAN_SHADOW_END , P4D_SIZE ) ) ;
crst_table_init ( ( unsigned long * ) early_pg_dir , _REGION2_ENTRY_EMPTY ) ;
2017-11-17 14:29:13 +01:00
/* init kasan zero shadow */
2018-12-28 00:30:01 -08:00
crst_table_init ( ( unsigned long * ) kasan_early_shadow_p4d ,
p4d_val ( p4d_z ) ) ;
crst_table_init ( ( unsigned long * ) kasan_early_shadow_pud ,
pud_val ( pud_z ) ) ;
crst_table_init ( ( unsigned long * ) kasan_early_shadow_pmd ,
pmd_val ( pmd_z ) ) ;
memset64 ( ( u64 * ) kasan_early_shadow_pte , pte_val ( pte_z ) , PTRS_PER_PTE ) ;
2017-11-17 14:29:13 +01:00
shadow_alloc_size = memsize > > KASAN_SHADOW_SCALE_SHIFT ;
pgalloc_low = round_up ( ( unsigned long ) _end , _SEGMENT_SIZE ) ;
if ( IS_ENABLED ( CONFIG_BLK_DEV_INITRD ) ) {
initrd_end =
2021-06-15 14:15:07 +02:00
round_up ( initrd_data . start + initrd_data . size , _SEGMENT_SIZE ) ;
2017-11-17 14:29:13 +01:00
pgalloc_low = max ( pgalloc_low , initrd_end ) ;
}
if ( pgalloc_low + shadow_alloc_size > memsize )
kasan_early_panic ( " out of memory during initialisation \n " ) ;
2017-11-17 18:44:28 +01:00
if ( has_edat ) {
segment_pos = round_down ( memsize , _SEGMENT_SIZE ) ;
segment_low = segment_pos - shadow_alloc_size ;
pgalloc_pos = segment_low ;
} else {
pgalloc_pos = memsize ;
}
2017-11-17 14:29:13 +01:00
init_mm . pgd = early_pg_dir ;
/*
* Current memory layout :
2019-08-02 12:42:59 +02:00
* + - 0 - - - - - - - - - - - - - + + - shadow start - +
* | 1 : 1 ram mapping | / | 1 / 8 ram |
* | | / | |
* + - end of ram - - - - + / + - - - - - - - - - - - - - - - - +
* | . . . gap . . . | / | |
* | | / | kasan |
* + - shadow start - - + | zero |
* | 1 / 8 addr space | | page |
* + - shadow end - + | mapping |
* | . . . gap . . . | \ | ( untracked ) |
* + - vmalloc area - + \ | |
* | vmalloc_size | \ | |
* + - modules vaddr - + \ + - - - - - - - - - - - - - - - - +
* | 2 Gb | \ | unmapped | allocated per module
* + - - - - - - - - - - - - - - - - - + + - shadow end - - - +
*
* Current memory layout ( KASAN_VMALLOC ) :
* + - 0 - - - - - - - - - - - - - + + - shadow start - +
* | 1 : 1 ram mapping | / | 1 / 8 ram |
* | | / | |
* + - end of ram - - - - + / + - - - - - - - - - - - - - - - - +
* | . . . gap . . . | / | kasan |
* | | / | zero |
* + - shadow start - - + | page |
* | 1 / 8 addr space | | mapping |
* + - shadow end - + | ( untracked ) |
* | . . . gap . . . | \ | |
* + - vmalloc area - + \ + - vmalloc area - +
* | vmalloc_size | \ | shallow populate |
* + - modules vaddr - + \ + - modules area - +
* | 2 Gb | \ | shallow populate |
* + - - - - - - - - - - - - - - - - - + + - shadow end - - - +
2017-11-17 14:29:13 +01:00
*/
2017-11-17 18:22:24 +01:00
/* populate kasan shadow (for identity mapping and zero page mapping) */
2020-10-05 09:13:15 +02:00
kasan_early_pgtable_populate ( __sha ( 0 ) , __sha ( memsize ) , POPULATE_MAP ) ;
2019-08-02 12:42:59 +02:00
if ( IS_ENABLED ( CONFIG_KASAN_VMALLOC ) ) {
/* shallowly populate kasan shadow for vmalloc and modules */
2020-10-06 22:12:39 +02:00
kasan_early_pgtable_populate ( __sha ( VMALLOC_START ) , __sha ( MODULES_END ) ,
2020-10-05 09:13:15 +02:00
POPULATE_SHALLOW ) ;
2019-08-02 12:42:59 +02:00
}
/* populate kasan shadow for untracked memory */
2020-10-06 22:12:39 +02:00
kasan_early_pgtable_populate ( __sha ( ident_map_size ) ,
IS_ENABLED ( CONFIG_KASAN_VMALLOC ) ?
__sha ( VMALLOC_START ) :
__sha ( MODULES_VADDR ) ,
2017-11-17 14:29:13 +01:00
POPULATE_ZERO_SHADOW ) ;
2020-10-06 22:12:39 +02:00
kasan_early_pgtable_populate ( __sha ( MODULES_END ) , __sha ( _REGION1_SIZE ) ,
2020-09-11 11:44:47 +02:00
POPULATE_ZERO_SHADOW ) ;
2017-11-20 12:56:10 +01:00
/* memory allocated for identity mapping structs will be freed later */
pgalloc_freeable = pgalloc_pos ;
/* populate identity mapping */
2020-10-05 09:13:15 +02:00
kasan_early_pgtable_populate ( 0 , memsize , POPULATE_ONE2ONE ) ;
2020-10-15 10:01:42 +02:00
kasan_set_pgd ( early_pg_dir , _ASCE_TYPE_REGION2 ) ;
2017-11-17 14:29:13 +01:00
kasan_enable_dat ( ) ;
/* enable kasan */
init_task . kasan_depth = 0 ;
memblock_reserve ( pgalloc_pos , memsize - pgalloc_pos ) ;
sclp_early_printk ( " KernelAddressSanitizer initialized \n " ) ;
}
2020-10-05 10:28:48 +02:00
void __init kasan_copy_shadow_mapping ( void )
2017-11-17 14:29:13 +01:00
{
/*
* At this point we are still running on early pages setup early_pg_dir ,
* while swapper_pg_dir has just been initialized with identity mapping .
* Carry over shadow memory region from early_pg_dir to swapper_pg_dir .
*/
pgd_t * pg_dir_src ;
pgd_t * pg_dir_dst ;
p4d_t * p4_dir_src ;
p4d_t * p4_dir_dst ;
pg_dir_src = pgd_offset_raw ( early_pg_dir , KASAN_SHADOW_START ) ;
2020-10-05 10:28:48 +02:00
pg_dir_dst = pgd_offset_raw ( init_mm . pgd , KASAN_SHADOW_START ) ;
2017-11-17 14:29:13 +01:00
p4_dir_src = p4d_offset ( pg_dir_src , KASAN_SHADOW_START ) ;
p4_dir_dst = p4d_offset ( pg_dir_dst , KASAN_SHADOW_START ) ;
2020-10-15 10:01:42 +02:00
memcpy ( p4_dir_dst , p4_dir_src ,
( KASAN_SHADOW_SIZE > > P4D_SHIFT ) * sizeof ( p4d_t ) ) ;
2017-11-17 14:29:13 +01:00
}
2017-11-20 12:56:10 +01:00
void __init kasan_free_early_identity ( void )
{
2021-11-05 13:43:19 -07:00
memblock_phys_free ( pgalloc_pos , pgalloc_freeable - pgalloc_pos ) ;
2017-11-20 12:56:10 +01:00
}