2020-04-27 19:00:07 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Shadow Call Stack support .
*
* Copyright ( C ) 2019 Google LLC
*/
2020-12-01 02:34:41 +03:00
# include <linux/cpuhotplug.h>
2020-04-27 19:00:07 +03:00
# include <linux/kasan.h>
2020-04-27 19:00:08 +03:00
# include <linux/mm.h>
2020-04-27 19:00:07 +03:00
# include <linux/scs.h>
2020-12-01 02:34:41 +03:00
# include <linux/vmalloc.h>
2020-04-27 19:00:08 +03:00
# include <linux/vmstat.h>
2020-04-27 19:00:07 +03:00
2020-05-15 16:43:11 +03:00
static void __scs_account ( void * s , int account )
{
2020-12-01 02:34:41 +03:00
struct page * scs_page = vmalloc_to_page ( s ) ;
2020-05-15 16:43:11 +03:00
2020-08-07 09:21:37 +03:00
mod_node_page_state ( page_pgdat ( scs_page ) , NR_KERNEL_SCS_KB ,
2020-05-15 16:43:11 +03:00
account * ( SCS_SIZE / SZ_1K ) ) ;
}
2020-12-01 02:34:41 +03:00
/* Matches NR_CACHED_STACKS for VMAP_STACK */
# define NR_CACHED_SCS 2
static DEFINE_PER_CPU ( void * , scs_cache [ NR_CACHED_SCS ] ) ;
static void * __scs_alloc ( int node )
2020-04-27 19:00:07 +03:00
{
2020-12-01 02:34:41 +03:00
int i ;
void * s ;
for ( i = 0 ; i < NR_CACHED_SCS ; i + + ) {
s = this_cpu_xchg ( scs_cache [ i ] , NULL ) ;
if ( s ) {
kasan_unpoison_vmalloc ( s , SCS_SIZE ) ;
memset ( s , 0 , SCS_SIZE ) ;
return s ;
}
}
return __vmalloc_node_range ( SCS_SIZE , 1 , VMALLOC_START , VMALLOC_END ,
GFP_SCS , PAGE_KERNEL , 0 , node ,
__builtin_return_address ( 0 ) ) ;
}
2020-05-15 16:43:11 +03:00
2020-12-01 02:34:41 +03:00
void * scs_alloc ( int node )
{
void * s ;
s = __scs_alloc ( node ) ;
2020-05-15 16:43:11 +03:00
if ( ! s )
return NULL ;
2020-04-27 19:00:07 +03:00
2020-05-15 16:43:11 +03:00
* __scs_magic ( s ) = SCS_END_MAGIC ;
/*
* Poison the allocation to catch unintentional accesses to
* the shadow stack when KASAN is enabled .
*/
2020-12-01 02:34:41 +03:00
kasan_poison_vmalloc ( s , SCS_SIZE ) ;
2020-05-15 16:43:11 +03:00
__scs_account ( s , 1 ) ;
2020-04-27 19:00:07 +03:00
return s ;
}
2020-12-01 02:34:41 +03:00
void scs_free ( void * s )
2020-04-27 19:00:07 +03:00
{
2020-12-01 02:34:41 +03:00
int i ;
2020-05-15 16:43:11 +03:00
__scs_account ( s , - 1 ) ;
2020-12-01 02:34:41 +03:00
/*
* We cannot sleep as this can be called in interrupt context ,
* so use this_cpu_cmpxchg to update the cache , and vfree_atomic
* to free the stack .
*/
for ( i = 0 ; i < NR_CACHED_SCS ; i + + )
if ( this_cpu_cmpxchg ( scs_cache [ i ] , 0 , s ) = = NULL )
return ;
vfree_atomic ( s ) ;
}
static int scs_cleanup ( unsigned int cpu )
{
int i ;
void * * cache = per_cpu_ptr ( scs_cache , cpu ) ;
for ( i = 0 ; i < NR_CACHED_SCS ; i + + ) {
vfree ( cache [ i ] ) ;
cache [ i ] = NULL ;
}
return 0 ;
2020-04-27 19:00:07 +03:00
}
void __init scs_init ( void )
{
2020-12-01 02:34:41 +03:00
cpuhp_setup_state ( CPUHP_BP_PREPARE_DYN , " scs:scs_cache " , NULL ,
scs_cleanup ) ;
2020-04-27 19:00:07 +03:00
}
int scs_prepare ( struct task_struct * tsk , int node )
{
void * s = scs_alloc ( node ) ;
if ( ! s )
return - ENOMEM ;
2020-05-15 16:11:05 +03:00
task_scs ( tsk ) = task_scs_sp ( tsk ) = s ;
2020-04-27 19:00:07 +03:00
return 0 ;
}
2020-04-27 19:00:09 +03:00
static void scs_check_usage ( struct task_struct * tsk )
{
static unsigned long highest ;
unsigned long * p , prev , curr = highest , used = 0 ;
if ( ! IS_ENABLED ( CONFIG_DEBUG_STACK_USAGE ) )
return ;
for ( p = task_scs ( tsk ) ; p < __scs_magic ( tsk ) ; + + p ) {
if ( ! READ_ONCE_NOCHECK ( * p ) )
break ;
2020-06-03 14:16:37 +03:00
used + = sizeof ( * p ) ;
2020-04-27 19:00:09 +03:00
}
while ( used > curr ) {
prev = cmpxchg_relaxed ( & highest , curr , used ) ;
if ( prev = = curr ) {
pr_info ( " %s (%d): highest shadow stack usage: %lu bytes \n " ,
tsk - > comm , task_pid_nr ( tsk ) , used ) ;
break ;
}
curr = prev ;
}
}
2020-04-27 19:00:07 +03:00
void scs_release ( struct task_struct * tsk )
{
void * s = task_scs ( tsk ) ;
if ( ! s )
return ;
2020-05-15 16:56:05 +03:00
WARN ( task_scs_end_corrupted ( tsk ) ,
" corrupted shadow stack detected when freeing task \n " ) ;
2020-04-27 19:00:09 +03:00
scs_check_usage ( tsk ) ;
2020-04-27 19:00:07 +03:00
scs_free ( s ) ;
}