2018-12-28 11:31:14 +03:00
// SPDX-License-Identifier: GPL-2.0
2018-12-28 11:30:38 +03:00
/*
* This file contains generic KASAN specific error reporting code .
*
* Copyright ( c ) 2014 Samsung Electronics Co . , Ltd .
* Author : Andrey Ryabinin < ryabinin . a . a @ gmail . com >
*
* Some code borrowed from https : //github.com/xairy/kasan-prototype by
* Andrey Konovalov < andreyknvl @ gmail . com >
*/
# include <linux/bitops.h>
# include <linux/ftrace.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/printk.h>
# include <linux/sched.h>
2020-12-22 23:00:49 +03:00
# include <linux/sched/task_stack.h>
2018-12-28 11:30:38 +03:00
# include <linux/slab.h>
# include <linux/stackdepot.h>
# include <linux/stacktrace.h>
# include <linux/string.h>
# include <linux/types.h>
# include <linux/kasan.h>
# include <linux/module.h>
# include <asm/sections.h>
# include "kasan.h"
# include "../slab.h"
2021-02-24 23:05:05 +03:00
void * kasan_find_first_bad_addr ( void * addr , size_t size )
2018-12-28 11:30:38 +03:00
{
2018-12-28 11:30:42 +03:00
void * p = addr ;
2018-12-28 11:30:38 +03:00
2018-12-28 11:30:42 +03:00
while ( p < addr + size & & ! ( * ( u8 * ) kasan_mem_to_shadow ( p ) ) )
2020-12-22 23:00:24 +03:00
p + = KASAN_GRANULE_SIZE ;
2018-12-28 11:30:42 +03:00
return p ;
2018-12-28 11:30:38 +03:00
}
static const char * get_shadow_bug_type ( struct kasan_access_info * info )
{
const char * bug_type = " unknown-crash " ;
u8 * shadow_addr ;
shadow_addr = ( u8 * ) kasan_mem_to_shadow ( info - > first_bad_addr ) ;
/*
2020-12-22 23:00:24 +03:00
* If shadow byte value is in [ 0 , KASAN_GRANULE_SIZE ) we can look
2018-12-28 11:30:38 +03:00
* at the next shadow byte to determine the type of the bad access .
*/
2020-12-22 23:00:24 +03:00
if ( * shadow_addr > 0 & & * shadow_addr < = KASAN_GRANULE_SIZE - 1 )
2018-12-28 11:30:38 +03:00
shadow_addr + + ;
switch ( * shadow_addr ) {
2020-12-22 23:00:24 +03:00
case 0 . . . KASAN_GRANULE_SIZE - 1 :
2018-12-28 11:30:38 +03:00
/*
* In theory it ' s still possible to see these shadow values
* due to a data race in the kernel code .
*/
bug_type = " out-of-bounds " ;
break ;
case KASAN_PAGE_REDZONE :
case KASAN_KMALLOC_REDZONE :
bug_type = " slab-out-of-bounds " ;
break ;
case KASAN_GLOBAL_REDZONE :
bug_type = " global-out-of-bounds " ;
break ;
case KASAN_STACK_LEFT :
case KASAN_STACK_MID :
case KASAN_STACK_RIGHT :
case KASAN_STACK_PARTIAL :
bug_type = " stack-out-of-bounds " ;
break ;
case KASAN_FREE_PAGE :
case KASAN_KMALLOC_FREE :
2020-08-07 09:24:39 +03:00
case KASAN_KMALLOC_FREETRACK :
2018-12-28 11:30:38 +03:00
bug_type = " use-after-free " ;
break ;
case KASAN_ALLOCA_LEFT :
case KASAN_ALLOCA_RIGHT :
bug_type = " alloca-out-of-bounds " ;
break ;
kasan: support backing vmalloc space with real shadow memory
Patch series "kasan: support backing vmalloc space with real shadow
memory", v11.
Currently, vmalloc space is backed by the early shadow page. This means
that kasan is incompatible with VMAP_STACK.
This series provides a mechanism to back vmalloc space with real,
dynamically allocated memory. I have only wired up x86, because that's
the only currently supported arch I can work with easily, but it's very
easy to wire up other architectures, and it appears that there is some
work-in-progress code to do this on arm64 and s390.
This has been discussed before in the context of VMAP_STACK:
- https://bugzilla.kernel.org/show_bug.cgi?id=202009
- https://lkml.org/lkml/2018/7/22/198
- https://lkml.org/lkml/2019/7/19/822
In terms of implementation details:
Most mappings in vmalloc space are small, requiring less than a full
page of shadow space. Allocating a full shadow page per mapping would
therefore be wasteful. Furthermore, to ensure that different mappings
use different shadow pages, mappings would have to be aligned to
KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE.
Instead, share backing space across multiple mappings. Allocate a
backing page when a mapping in vmalloc space uses a particular page of
the shadow region. This page can be shared by other vmalloc mappings
later on.
We hook in to the vmap infrastructure to lazily clean up unused shadow
memory.
Testing with test_vmalloc.sh on an x86 VM with 2 vCPUs shows that:
- Turning on KASAN, inline instrumentation, without vmalloc, introuduces
a 4.1x-4.2x slowdown in vmalloc operations.
- Turning this on introduces the following slowdowns over KASAN:
* ~1.76x slower single-threaded (test_vmalloc.sh performance)
* ~2.18x slower when both cpus are performing operations
simultaneously (test_vmalloc.sh sequential_test_order=1)
This is unfortunate but given that this is a debug feature only, not the
end of the world. The benchmarks are also a stress-test for the vmalloc
subsystem: they're not indicative of an overall 2x slowdown!
This patch (of 4):
Hook into vmalloc and vmap, and dynamically allocate real shadow memory
to back the mappings.
Most mappings in vmalloc space are small, requiring less than a full
page of shadow space. Allocating a full shadow page per mapping would
therefore be wasteful. Furthermore, to ensure that different mappings
use different shadow pages, mappings would have to be aligned to
KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE.
Instead, share backing space across multiple mappings. Allocate a
backing page when a mapping in vmalloc space uses a particular page of
the shadow region. This page can be shared by other vmalloc mappings
later on.
We hook in to the vmap infrastructure to lazily clean up unused shadow
memory.
To avoid the difficulties around swapping mappings around, this code
expects that the part of the shadow region that covers the vmalloc space
will not be covered by the early shadow page, but will be left unmapped.
This will require changes in arch-specific code.
This allows KASAN with VMAP_STACK, and may be helpful for architectures
that do not have a separate module space (e.g. powerpc64, which I am
currently working on). It also allows relaxing the module alignment
back to PAGE_SIZE.
Testing with test_vmalloc.sh on an x86 VM with 2 vCPUs shows that:
- Turning on KASAN, inline instrumentation, without vmalloc, introuduces
a 4.1x-4.2x slowdown in vmalloc operations.
- Turning this on introduces the following slowdowns over KASAN:
* ~1.76x slower single-threaded (test_vmalloc.sh performance)
* ~2.18x slower when both cpus are performing operations
simultaneously (test_vmalloc.sh sequential_test_order=3D1)
This is unfortunate but given that this is a debug feature only, not the
end of the world.
The full benchmark results are:
Performance
No KASAN KASAN original x baseline KASAN vmalloc x baseline x KASAN
fix_size_alloc_test 662004 11404956 17.23 19144610 28.92 1.68
full_fit_alloc_test 710950 12029752 16.92 13184651 18.55 1.10
long_busy_list_alloc_test 9431875 43990172 4.66 82970178 8.80 1.89
random_size_alloc_test 5033626 23061762 4.58 47158834 9.37 2.04
fix_align_alloc_test 1252514 15276910 12.20 31266116 24.96 2.05
random_size_align_alloc_te 1648501 14578321 8.84 25560052 15.51 1.75
align_shift_alloc_test 147 830 5.65 5692 38.72 6.86
pcpu_alloc_test 80732 125520 1.55 140864 1.74 1.12
Total Cycles 119240774314 763211341128 6.40 1390338696894 11.66 1.82
Sequential, 2 cpus
No KASAN KASAN original x baseline KASAN vmalloc x baseline x KASAN
fix_size_alloc_test 1423150 14276550 10.03 27733022 19.49 1.94
full_fit_alloc_test 1754219 14722640 8.39 15030786 8.57 1.02
long_busy_list_alloc_test 11451858 52154973 4.55 107016027 9.34 2.05
random_size_alloc_test 5989020 26735276 4.46 68885923 11.50 2.58
fix_align_alloc_test 2050976 20166900 9.83 50491675 24.62 2.50
random_size_align_alloc_te 2858229 17971700 6.29 38730225 13.55 2.16
align_shift_alloc_test 405 6428 15.87 26253 64.82 4.08
pcpu_alloc_test 127183 151464 1.19 216263 1.70 1.43
Total Cycles 54181269392 308723699764 5.70 650772566394 12.01 2.11
fix_size_alloc_test 1420404 14289308 10.06 27790035 19.56 1.94
full_fit_alloc_test 1736145 14806234 8.53 15274301 8.80 1.03
long_busy_list_alloc_test 11404638 52270785 4.58 107550254 9.43 2.06
random_size_alloc_test 6017006 26650625 4.43 68696127 11.42 2.58
fix_align_alloc_test 2045504 20280985 9.91 50414862 24.65 2.49
random_size_align_alloc_te 2845338 17931018 6.30 38510276 13.53 2.15
align_shift_alloc_test 472 3760 7.97 9656 20.46 2.57
pcpu_alloc_test 118643 132732 1.12 146504 1.23 1.10
Total Cycles 54040011688 309102805492 5.72 651325675652 12.05 2.11
[dja@axtens.net: fixups]
Link: http://lkml.kernel.org/r/20191120052719.7201-1-dja@axtens.net
Link: https://bugzilla.kernel.org/show_bug.cgi?id=3D202009
Link: http://lkml.kernel.org/r/20191031093909.9228-2-dja@axtens.net
Signed-off-by: Mark Rutland <mark.rutland@arm.com> [shadow rework]
Signed-off-by: Daniel Axtens <dja@axtens.net>
Co-developed-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Vasily Gorbik <gor@linux.ibm.com>
Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Qian Cai <cai@lca.pw>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-12-01 04:54:50 +03:00
case KASAN_VMALLOC_INVALID :
bug_type = " vmalloc-out-of-bounds " ;
break ;
2018-12-28 11:30:38 +03:00
}
return bug_type ;
}
static const char * get_wild_bug_type ( struct kasan_access_info * info )
{
const char * bug_type = " unknown-crash " ;
if ( ( unsigned long ) info - > access_addr < PAGE_SIZE )
bug_type = " null-ptr-deref " ;
else if ( ( unsigned long ) info - > access_addr < TASK_SIZE )
bug_type = " user-memory-access " ;
else
bug_type = " wild-memory-access " ;
return bug_type ;
}
2021-02-24 23:05:05 +03:00
const char * kasan_get_bug_type ( struct kasan_access_info * info )
2018-12-28 11:30:38 +03:00
{
2020-04-02 07:09:37 +03:00
/*
* If access_size is a negative number , then it has reason to be
* defined as out - of - bounds bug type .
*
* Casting negative numbers to size_t would indeed turn up as
* a large size_t and its value will be larger than ULONG_MAX / 2 ,
* so that this can qualify as out - of - bounds .
*/
if ( info - > access_addr + info - > access_size < info - > access_addr )
return " out-of-bounds " ;
2020-12-22 23:01:07 +03:00
if ( addr_has_metadata ( info - > access_addr ) )
2018-12-28 11:30:38 +03:00
return get_shadow_bug_type ( info ) ;
return get_wild_bug_type ( info ) ;
}
2021-02-24 23:05:05 +03:00
void kasan_metadata_fetch_row ( char * buffer , void * row )
2020-12-22 23:01:17 +03:00
{
memcpy ( buffer , kasan_mem_to_shadow ( row ) , META_BYTES_PER_ROW ) ;
}
2021-04-17 01:46:00 +03:00
# ifdef CONFIG_KASAN_STACK
2020-12-22 23:00:49 +03:00
static bool __must_check tokenize_frame_descr ( const char * * frame_descr ,
char * token , size_t max_tok_len ,
unsigned long * value )
{
const char * sep = strchr ( * frame_descr , ' ' ) ;
if ( sep = = NULL )
sep = * frame_descr + strlen ( * frame_descr ) ;
if ( token ! = NULL ) {
const size_t tok_len = sep - * frame_descr ;
if ( tok_len + 1 > max_tok_len ) {
pr_err ( " KASAN internal error: frame description too long: %s \n " ,
* frame_descr ) ;
return false ;
}
/* Copy token (+ 1 byte for '\0'). */
2021-04-30 08:59:43 +03:00
strscpy ( token , * frame_descr , tok_len + 1 ) ;
2020-12-22 23:00:49 +03:00
}
/* Advance frame_descr past separator. */
* frame_descr = sep + 1 ;
if ( value ! = NULL & & kstrtoul ( token , 10 , value ) ) {
pr_err ( " KASAN internal error: not a valid number: %s \n " , token ) ;
return false ;
}
return true ;
}
static void print_decoded_frame_descr ( const char * frame_descr )
{
/*
* We need to parse the following string :
* " n alloc_1 alloc_2 ... alloc_n "
* where alloc_i looks like
* " offset size len name "
* or " offset size len name:line " .
*/
char token [ 64 ] ;
unsigned long num_objects ;
if ( ! tokenize_frame_descr ( & frame_descr , token , sizeof ( token ) ,
& num_objects ) )
return ;
pr_err ( " \n " ) ;
pr_err ( " this frame has %lu %s: \n " , num_objects ,
num_objects = = 1 ? " object " : " objects " ) ;
while ( num_objects - - ) {
unsigned long offset ;
unsigned long size ;
/* access offset */
if ( ! tokenize_frame_descr ( & frame_descr , token , sizeof ( token ) ,
& offset ) )
return ;
/* access size */
if ( ! tokenize_frame_descr ( & frame_descr , token , sizeof ( token ) ,
& size ) )
return ;
/* name length (unused) */
if ( ! tokenize_frame_descr ( & frame_descr , NULL , 0 , NULL ) )
return ;
/* object name */
if ( ! tokenize_frame_descr ( & frame_descr , token , sizeof ( token ) ,
NULL ) )
return ;
/* Strip line number; without filename it's not very helpful. */
strreplace ( token , ' : ' , ' \0 ' ) ;
/* Finally, print object information. */
pr_err ( " [%lu, %lu) '%s' " , offset , offset + size , token ) ;
}
}
static bool __must_check get_address_stack_frame_info ( const void * addr ,
unsigned long * offset ,
const char * * frame_descr ,
const void * * frame_pc )
{
unsigned long aligned_addr ;
unsigned long mem_ptr ;
const u8 * shadow_bottom ;
const u8 * shadow_ptr ;
const unsigned long * frame ;
BUILD_BUG_ON ( IS_ENABLED ( CONFIG_STACK_GROWSUP ) ) ;
/*
* NOTE : We currently only support printing frame information for
* accesses to the task ' s own stack .
*/
if ( ! object_is_on_stack ( addr ) )
return false ;
aligned_addr = round_down ( ( unsigned long ) addr , sizeof ( long ) ) ;
mem_ptr = round_down ( aligned_addr , KASAN_GRANULE_SIZE ) ;
shadow_ptr = kasan_mem_to_shadow ( ( void * ) aligned_addr ) ;
shadow_bottom = kasan_mem_to_shadow ( end_of_stack ( current ) ) ;
while ( shadow_ptr > = shadow_bottom & & * shadow_ptr ! = KASAN_STACK_LEFT ) {
shadow_ptr - - ;
mem_ptr - = KASAN_GRANULE_SIZE ;
}
while ( shadow_ptr > = shadow_bottom & & * shadow_ptr = = KASAN_STACK_LEFT ) {
shadow_ptr - - ;
mem_ptr - = KASAN_GRANULE_SIZE ;
}
if ( shadow_ptr < shadow_bottom )
return false ;
frame = ( const unsigned long * ) ( mem_ptr + KASAN_GRANULE_SIZE ) ;
if ( frame [ 0 ] ! = KASAN_CURRENT_STACK_FRAME_MAGIC ) {
pr_err ( " KASAN internal error: frame info validation failed; invalid marker: %lu \n " ,
frame [ 0 ] ) ;
return false ;
}
* offset = ( unsigned long ) addr - ( unsigned long ) frame ;
* frame_descr = ( const char * ) frame [ 1 ] ;
* frame_pc = ( void * ) frame [ 2 ] ;
return true ;
}
2021-02-24 23:05:05 +03:00
void kasan_print_address_stack_frame ( const void * addr )
2020-12-22 23:00:49 +03:00
{
unsigned long offset ;
const char * frame_descr ;
const void * frame_pc ;
if ( ! get_address_stack_frame_info ( addr , & offset , & frame_descr ,
& frame_pc ) )
return ;
/*
* get_address_stack_frame_info only returns true if the given addr is
* on the current task ' s stack .
*/
pr_err ( " \n " ) ;
pr_err ( " addr %px is located in stack of task %s/%d at offset %lu in frame: \n " ,
addr , current - > comm , task_pid_nr ( current ) , offset ) ;
pr_err ( " %pS \n " , frame_pc ) ;
if ( ! frame_descr )
return ;
print_decoded_frame_descr ( frame_descr ) ;
}
# endif /* CONFIG_KASAN_STACK */
2018-12-28 11:30:38 +03:00
# define DEFINE_ASAN_REPORT_LOAD(size) \
void __asan_report_load # # size # # _noabort ( unsigned long addr ) \
{ \
kasan_report ( addr , size , false , _RET_IP_ ) ; \
} \
EXPORT_SYMBOL ( __asan_report_load # # size # # _noabort )
# define DEFINE_ASAN_REPORT_STORE(size) \
void __asan_report_store # # size # # _noabort ( unsigned long addr ) \
{ \
kasan_report ( addr , size , true , _RET_IP_ ) ; \
} \
EXPORT_SYMBOL ( __asan_report_store # # size # # _noabort )
DEFINE_ASAN_REPORT_LOAD ( 1 ) ;
DEFINE_ASAN_REPORT_LOAD ( 2 ) ;
DEFINE_ASAN_REPORT_LOAD ( 4 ) ;
DEFINE_ASAN_REPORT_LOAD ( 8 ) ;
DEFINE_ASAN_REPORT_LOAD ( 16 ) ;
DEFINE_ASAN_REPORT_STORE ( 1 ) ;
DEFINE_ASAN_REPORT_STORE ( 2 ) ;
DEFINE_ASAN_REPORT_STORE ( 4 ) ;
DEFINE_ASAN_REPORT_STORE ( 8 ) ;
DEFINE_ASAN_REPORT_STORE ( 16 ) ;
void __asan_report_load_n_noabort ( unsigned long addr , size_t size )
{
kasan_report ( addr , size , false , _RET_IP_ ) ;
}
EXPORT_SYMBOL ( __asan_report_load_n_noabort ) ;
void __asan_report_store_n_noabort ( unsigned long addr , size_t size )
{
kasan_report ( addr , size , true , _RET_IP_ ) ;
}
EXPORT_SYMBOL ( __asan_report_store_n_noabort ) ;