2017-12-03 13:28:52 -08:00
/*
* Xtensa KASAN shadow map initialization
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2017 Cadence Design Systems Inc .
*/
2018-10-30 15:09:49 -07:00
# include <linux/memblock.h>
2017-12-03 13:28:52 -08:00
# include <linux/init_task.h>
# include <linux/kasan.h>
# include <linux/kernel.h>
# include <asm/initialize_mmu.h>
# include <asm/tlbflush.h>
void __init kasan_early_init ( void )
{
unsigned long vaddr = KASAN_SHADOW_START ;
2020-06-08 21:33:05 -07:00
pmd_t * pmd = pmd_off_k ( vaddr ) ;
2017-12-03 13:28:52 -08:00
int i ;
for ( i = 0 ; i < PTRS_PER_PTE ; + + i )
2018-12-28 00:30:01 -08:00
set_pte ( kasan_early_shadow_pte + i ,
mk_pte ( virt_to_page ( kasan_early_shadow_page ) ,
PAGE_KERNEL ) ) ;
2017-12-03 13:28:52 -08:00
for ( vaddr = 0 ; vaddr < KASAN_SHADOW_SIZE ; vaddr + = PMD_SIZE , + + pmd ) {
BUG_ON ( ! pmd_none ( * pmd ) ) ;
2018-12-28 00:30:01 -08:00
set_pmd ( pmd , __pmd ( ( unsigned long ) kasan_early_shadow_pte ) ) ;
2017-12-03 13:28:52 -08:00
}
}
static void __init populate ( void * start , void * end )
{
unsigned long n_pages = ( end - start ) / PAGE_SIZE ;
unsigned long n_pmds = n_pages / PTRS_PER_PTE ;
unsigned long i , j ;
unsigned long vaddr = ( unsigned long ) start ;
2020-06-08 21:33:05 -07:00
pmd_t * pmd = pmd_off_k ( vaddr ) ;
2018-10-30 15:08:04 -07:00
pte_t * pte = memblock_alloc ( n_pages * sizeof ( pte_t ) , PAGE_SIZE ) ;
2017-12-03 13:28:52 -08:00
2019-03-11 23:30:31 -07:00
if ( ! pte )
panic ( " %s: Failed to allocate %lu bytes align=0x%lx \n " ,
__func__ , n_pages * sizeof ( pte_t ) , PAGE_SIZE ) ;
2017-12-03 13:28:52 -08:00
pr_debug ( " %s: %p - %p \n " , __func__ , start , end ) ;
for ( i = j = 0 ; i < n_pmds ; + + i ) {
int k ;
for ( k = 0 ; k < PTRS_PER_PTE ; + + k , + + j ) {
phys_addr_t phys =
2019-11-13 16:06:42 -08:00
memblock_phys_alloc_range ( PAGE_SIZE , PAGE_SIZE ,
0 ,
MEMBLOCK_ALLOC_ANYWHERE ) ;
2017-12-03 13:28:52 -08:00
2019-03-11 23:29:26 -07:00
if ( ! phys )
panic ( " Failed to allocate page table page \n " ) ;
2017-12-03 13:28:52 -08:00
set_pte ( pte + j , pfn_pte ( PHYS_PFN ( phys ) , PAGE_KERNEL ) ) ;
}
}
for ( i = 0 ; i < n_pmds ; + + i , pte + = PTRS_PER_PTE )
set_pmd ( pmd + i , __pmd ( ( unsigned long ) pte ) ) ;
local_flush_tlb_all ( ) ;
memset ( start , 0 , end - start ) ;
}
void __init kasan_init ( void )
{
int i ;
BUILD_BUG_ON ( KASAN_SHADOW_OFFSET ! = KASAN_SHADOW_START -
( KASAN_START_VADDR > > KASAN_SHADOW_SCALE_SHIFT ) ) ;
BUILD_BUG_ON ( VMALLOC_START < KASAN_START_VADDR ) ;
/*
* Replace shadow map pages that cover addresses from VMALLOC area
* start to the end of KSEG with clean writable pages .
*/
populate ( kasan_mem_to_shadow ( ( void * ) VMALLOC_START ) ,
kasan_mem_to_shadow ( ( void * ) XCHAL_KSEG_BYPASS_VADDR ) ) ;
2018-12-28 00:30:01 -08:00
/*
* Write protect kasan_early_shadow_page and zero - initialize it again .
*/
2017-12-03 13:28:52 -08:00
for ( i = 0 ; i < PTRS_PER_PTE ; + + i )
2018-12-28 00:30:01 -08:00
set_pte ( kasan_early_shadow_pte + i ,
mk_pte ( virt_to_page ( kasan_early_shadow_page ) ,
PAGE_KERNEL_RO ) ) ;
2017-12-03 13:28:52 -08:00
local_flush_tlb_all ( ) ;
2018-12-28 00:30:01 -08:00
memset ( kasan_early_shadow_page , 0 , PAGE_SIZE ) ;
2017-12-03 13:28:52 -08:00
/* At this point kasan is fully initialized. Enable error messages. */
current - > kasan_depth = 0 ;
pr_info ( " KernelAddressSanitizer initialized \n " ) ;
}