2020-04-27 19:00:07 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Shadow Call Stack support .
*
* Copyright ( C ) 2019 Google LLC
*/
# include <linux/kasan.h>
2020-04-27 19:00:08 +03:00
# include <linux/mm.h>
2020-04-27 19:00:07 +03:00
# include <linux/scs.h>
# include <linux/slab.h>
2020-04-27 19:00:08 +03:00
# include <linux/vmstat.h>
2020-04-27 19:00:07 +03:00
# include <asm/scs.h>
static struct kmem_cache * scs_cache ;
static void * scs_alloc ( int node )
{
void * s ;
s = kmem_cache_alloc_node ( scs_cache , GFP_SCS , node ) ;
if ( s ) {
* __scs_magic ( s ) = SCS_END_MAGIC ;
/*
* Poison the allocation to catch unintentional accesses to
* the shadow stack when KASAN is enabled .
*/
kasan_poison_object_data ( scs_cache , s ) ;
}
return s ;
}
static void scs_free ( void * s )
{
kasan_unpoison_object_data ( scs_cache , s ) ;
kmem_cache_free ( scs_cache , s ) ;
}
void __init scs_init ( void )
{
scs_cache = kmem_cache_create ( " scs_cache " , SCS_SIZE , 0 , 0 , NULL ) ;
}
2020-04-27 19:00:08 +03:00
static struct page * __scs_page ( struct task_struct * tsk )
{
return virt_to_page ( task_scs ( tsk ) ) ;
}
static void scs_account ( struct task_struct * tsk , int account )
{
mod_zone_page_state ( page_zone ( __scs_page ( tsk ) ) , NR_KERNEL_SCS_KB ,
account * ( SCS_SIZE / 1024 ) ) ;
}
2020-04-27 19:00:07 +03:00
int scs_prepare ( struct task_struct * tsk , int node )
{
void * s = scs_alloc ( node ) ;
if ( ! s )
return - ENOMEM ;
task_scs ( tsk ) = s ;
task_scs_offset ( tsk ) = 0 ;
2020-04-27 19:00:08 +03:00
scs_account ( tsk , 1 ) ;
2020-04-27 19:00:07 +03:00
return 0 ;
}
void scs_release ( struct task_struct * tsk )
{
void * s = task_scs ( tsk ) ;
if ( ! s )
return ;
WARN ( scs_corrupted ( tsk ) , " corrupted shadow stack detected when freeing task \n " ) ;
2020-04-27 19:00:08 +03:00
scs_account ( tsk , - 1 ) ;
2020-04-27 19:00:07 +03:00
scs_free ( s ) ;
}