2015-10-12 18:52:58 +03:00
/*
* This file contains kasan initialization code for ARM64 .
*
* Copyright ( c ) 2015 Samsung Electronics Co . , Ltd .
* Author : Andrey Ryabinin < ryabinin . a . a @ gmail . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
*/
# define pr_fmt(fmt) "kasan: " fmt
# include <linux/kasan.h>
# include <linux/kernel.h>
# include <linux/memblock.h>
# include <linux/start_kernel.h>
# include <asm/page.h>
# include <asm/pgalloc.h>
# include <asm/pgtable.h>
# include <asm/tlbflush.h>
static pgd_t tmp_pg_dir [ PTRS_PER_PGD ] __initdata __aligned ( PGD_SIZE ) ;
static void __init kasan_early_pte_populate ( pmd_t * pmd , unsigned long addr ,
unsigned long end )
{
pte_t * pte ;
unsigned long next ;
if ( pmd_none ( * pmd ) )
pmd_populate_kernel ( & init_mm , pmd , kasan_zero_pte ) ;
pte = pte_offset_kernel ( pmd , addr ) ;
do {
next = addr + PAGE_SIZE ;
set_pte ( pte , pfn_pte ( virt_to_pfn ( kasan_zero_page ) ,
PAGE_KERNEL ) ) ;
} while ( pte + + , addr = next , addr ! = end & & pte_none ( * pte ) ) ;
}
static void __init kasan_early_pmd_populate ( pud_t * pud ,
unsigned long addr ,
unsigned long end )
{
pmd_t * pmd ;
unsigned long next ;
if ( pud_none ( * pud ) )
pud_populate ( & init_mm , pud , kasan_zero_pmd ) ;
pmd = pmd_offset ( pud , addr ) ;
do {
next = pmd_addr_end ( addr , end ) ;
kasan_early_pte_populate ( pmd , addr , next ) ;
} while ( pmd + + , addr = next , addr ! = end & & pmd_none ( * pmd ) ) ;
}
static void __init kasan_early_pud_populate ( pgd_t * pgd ,
unsigned long addr ,
unsigned long end )
{
pud_t * pud ;
unsigned long next ;
if ( pgd_none ( * pgd ) )
pgd_populate ( & init_mm , pgd , kasan_zero_pud ) ;
pud = pud_offset ( pgd , addr ) ;
do {
next = pud_addr_end ( addr , end ) ;
kasan_early_pmd_populate ( pud , addr , next ) ;
} while ( pud + + , addr = next , addr ! = end & & pud_none ( * pud ) ) ;
}
static void __init kasan_map_early_shadow ( void )
{
unsigned long addr = KASAN_SHADOW_START ;
unsigned long end = KASAN_SHADOW_END ;
unsigned long next ;
pgd_t * pgd ;
pgd = pgd_offset_k ( addr ) ;
do {
next = pgd_addr_end ( addr , end ) ;
kasan_early_pud_populate ( pgd , addr , next ) ;
} while ( pgd + + , addr = next , addr ! = end ) ;
}
2015-10-13 14:01:06 +01:00
asmlinkage void __init kasan_early_init ( void )
2015-10-12 18:52:58 +03:00
{
BUILD_BUG_ON ( KASAN_SHADOW_OFFSET ! = KASAN_SHADOW_END - ( 1UL < < 61 ) ) ;
BUILD_BUG_ON ( ! IS_ALIGNED ( KASAN_SHADOW_START , PGDIR_SIZE ) ) ;
BUILD_BUG_ON ( ! IS_ALIGNED ( KASAN_SHADOW_END , PGDIR_SIZE ) ) ;
kasan_map_early_shadow ( ) ;
}
static void __init clear_pgds ( unsigned long start ,
unsigned long end )
{
/*
* Remove references to kasan page tables from
* swapper_pg_dir . pgd_clear ( ) can ' t be used
* here because it ' s nop on 2 , 3 - level pagetable setups
*/
for ( ; start < end ; start + = PGDIR_SIZE )
set_pgd ( pgd_offset_k ( start ) , __pgd ( 0 ) ) ;
}
static void __init cpu_set_ttbr1 ( unsigned long ttbr1 )
{
asm (
" msr ttbr1_el1, %0 \n "
" isb "
:
: " r " ( ttbr1 ) ) ;
}
void __init kasan_init ( void )
{
struct memblock_region * reg ;
2016-01-11 14:50:21 +01:00
int i ;
2015-10-12 18:52:58 +03:00
/*
* We are going to perform proper setup of shadow memory .
* At first we should unmap early shadow ( clear_pgds ( ) call bellow ) .
* However , instrumented code couldn ' t execute without shadow memory .
* tmp_pg_dir used to keep early shadow mapped until full shadow
* setup will be finished .
*/
memcpy ( tmp_pg_dir , swapper_pg_dir , sizeof ( tmp_pg_dir ) ) ;
cpu_set_ttbr1 ( __pa ( tmp_pg_dir ) ) ;
flush_tlb_all ( ) ;
clear_pgds ( KASAN_SHADOW_START , KASAN_SHADOW_END ) ;
kasan_populate_zero_shadow ( ( void * ) KASAN_SHADOW_START ,
kasan_mem_to_shadow ( ( void * ) MODULES_VADDR ) ) ;
for_each_memblock ( memory , reg ) {
void * start = ( void * ) __phys_to_virt ( reg - > base ) ;
void * end = ( void * ) __phys_to_virt ( reg - > base + reg - > size ) ;
if ( start > = end )
break ;
/*
* end + 1 here is intentional . We check several shadow bytes in
* advance to slightly speed up fastpath . In some rare cases
* we could cross boundary of mapped shadow , so we just map
* some more here .
*/
vmemmap_populate ( ( unsigned long ) kasan_mem_to_shadow ( start ) ,
( unsigned long ) kasan_mem_to_shadow ( end ) + 1 ,
pfn_to_nid ( virt_to_pfn ( start ) ) ) ;
}
2016-01-11 14:50:21 +01:00
/*
* KAsan may reuse the contents of kasan_zero_pte directly , so we
* should make sure that it maps the zero page read - only .
*/
for ( i = 0 ; i < PTRS_PER_PTE ; i + + )
set_pte ( & kasan_zero_pte [ i ] ,
pfn_pte ( virt_to_pfn ( kasan_zero_page ) , PAGE_KERNEL_RO ) ) ;
2015-10-12 18:52:58 +03:00
memset ( kasan_zero_page , 0 , PAGE_SIZE ) ;
cpu_set_ttbr1 ( __pa ( swapper_pg_dir ) ) ;
flush_tlb_all ( ) ;
/* At this point kasan is fully initialized. Enable error messages */
init_task . kasan_depth = 0 ;
pr_info ( " KernelAddressSanitizer initialized \n " ) ;
}