2018-12-28 11:31:14 +03:00
// SPDX-License-Identifier: GPL-2.0
2018-12-28 11:29:45 +03:00
/*
2020-12-22 23:00:32 +03:00
* This file contains common KASAN code .
2018-12-28 11:29:45 +03:00
*
* Copyright ( c ) 2014 Samsung Electronics Co . , Ltd .
* Author : Andrey Ryabinin < ryabinin . a . a @ gmail . com >
*
* Some code borrowed from https : //github.com/xairy/kasan-prototype by
* Andrey Konovalov < andreyknvl @ gmail . com >
*/
# include <linux/export.h>
# include <linux/init.h>
# include <linux/kasan.h>
# include <linux/kernel.h>
# include <linux/linkage.h>
# include <linux/memblock.h>
# include <linux/memory.h>
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/printk.h>
# include <linux/sched.h>
# include <linux/sched/task_stack.h>
# include <linux/slab.h>
# include <linux/stacktrace.h>
# include <linux/string.h>
# include <linux/types.h>
# include <linux/bug.h>
# include "kasan.h"
# include "../slab.h"
2021-11-05 23:35:43 +03:00
depot_stack_handle_t kasan_save_stack ( gfp_t flags , bool can_alloc )
2018-12-28 11:29:45 +03:00
{
unsigned long entries [ KASAN_STACK_DEPTH ] ;
2019-04-25 12:45:02 +03:00
unsigned int nr_entries ;
2018-12-28 11:29:45 +03:00
2019-04-25 12:45:02 +03:00
nr_entries = stack_trace_save ( entries , ARRAY_SIZE ( entries ) , 0 ) ;
2021-11-05 23:35:43 +03:00
return __stack_depot_save ( entries , nr_entries , flags , can_alloc ) ;
2018-12-28 11:29:45 +03:00
}
2020-08-07 09:24:39 +03:00
void kasan_set_track ( struct kasan_track * track , gfp_t flags )
2018-12-28 11:29:45 +03:00
{
track - > pid = current - > pid ;
2021-11-05 23:35:43 +03:00
track - > stack = kasan_save_stack ( flags , true ) ;
2018-12-28 11:29:45 +03:00
}
2020-12-22 23:00:56 +03:00
# if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
2018-12-28 11:29:45 +03:00
void kasan_enable_current ( void )
{
current - > kasan_depth + + ;
}
2021-06-29 05:34:33 +03:00
EXPORT_SYMBOL ( kasan_enable_current ) ;
2018-12-28 11:29:45 +03:00
void kasan_disable_current ( void )
{
current - > kasan_depth - - ;
}
2021-06-29 05:34:33 +03:00
EXPORT_SYMBOL ( kasan_disable_current ) ;
2020-12-22 23:00:56 +03:00
# endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
2018-12-28 11:29:45 +03:00
2020-12-22 23:03:10 +03:00
void __kasan_unpoison_range ( const void * address , size_t size )
2020-12-22 23:00:21 +03:00
{
2021-04-30 08:59:59 +03:00
kasan_unpoison ( address , size , false ) ;
2020-12-22 23:00:21 +03:00
}
2021-04-17 01:46:00 +03:00
# ifdef CONFIG_KASAN_STACK
2018-12-28 11:29:45 +03:00
/* Unpoison the entire stack for a task. */
void kasan_unpoison_task_stack ( struct task_struct * task )
{
2020-12-22 23:02:49 +03:00
void * base = task_stack_page ( task ) ;
2021-04-30 08:59:59 +03:00
kasan_unpoison ( base , THREAD_SIZE , false ) ;
2018-12-28 11:29:45 +03:00
}
/* Unpoison the stack for the current task beyond a watermark sp value. */
asmlinkage void kasan_unpoison_task_stack_below ( const void * watermark )
{
/*
* Calculate the task stack base address . Avoid using ' current '
* because this function is called by early resume code which hasn ' t
* yet set up the percpu register ( % gs ) .
*/
void * base = ( void * ) ( ( unsigned long ) watermark & ~ ( THREAD_SIZE - 1 ) ) ;
2021-04-30 08:59:59 +03:00
kasan_unpoison ( base , watermark - base , false ) ;
2018-12-28 11:29:45 +03:00
}
2020-12-22 23:02:42 +03:00
# endif /* CONFIG_KASAN_STACK */
2018-12-28 11:29:45 +03:00
2020-12-22 23:03:31 +03:00
/*
* Only allow cache merging when stack collection is disabled and no metadata
* is present .
*/
slab_flags_t __kasan_never_merge ( void )
{
if ( kasan_stack_collection_enabled ( ) )
return SLAB_KASAN ;
return 0 ;
}
2021-06-03 02:52:28 +03:00
void __kasan_unpoison_pages ( struct page * page , unsigned int order , bool init )
2018-12-28 11:29:45 +03:00
{
2018-12-28 11:30:57 +03:00
u8 tag ;
unsigned long i ;
2018-12-28 11:30:50 +03:00
if ( unlikely ( PageHighMem ( page ) ) )
return ;
2018-12-28 11:30:57 +03:00
2021-02-24 23:05:05 +03:00
tag = kasan_random_tag ( ) ;
2018-12-28 11:30:57 +03:00
for ( i = 0 ; i < ( 1 < < order ) ; i + + )
page_kasan_tag_set ( page + i , tag ) ;
2021-04-30 09:00:02 +03:00
kasan_unpoison ( page_address ( page ) , PAGE_SIZE < < order , init ) ;
2018-12-28 11:29:45 +03:00
}
2021-06-03 02:52:28 +03:00
void __kasan_poison_pages ( struct page * page , unsigned int order , bool init )
2018-12-28 11:29:45 +03:00
{
if ( likely ( ! PageHighMem ( page ) ) )
2021-02-24 23:05:05 +03:00
kasan_poison ( page_address ( page ) , PAGE_SIZE < < order ,
2021-04-30 09:00:02 +03:00
KASAN_FREE_PAGE , init ) ;
2018-12-28 11:29:45 +03:00
}
/*
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime .
* For larger allocations larger redzones are used .
*/
static inline unsigned int optimal_redzone ( unsigned int object_size )
{
return
object_size < = 64 - 16 ? 16 :
object_size < = 128 - 32 ? 32 :
object_size < = 512 - 64 ? 64 :
object_size < = 4096 - 128 ? 128 :
object_size < = ( 1 < < 14 ) - 256 ? 256 :
object_size < = ( 1 < < 15 ) - 512 ? 512 :
object_size < = ( 1 < < 16 ) - 1024 ? 1024 : 2048 ;
}
2020-12-22 23:03:10 +03:00
void __kasan_cache_create ( struct kmem_cache * cache , unsigned int * size ,
slab_flags_t * flags )
2018-12-28 11:29:45 +03:00
{
2020-12-22 23:03:28 +03:00
unsigned int ok_size ;
unsigned int optimal_size ;
/*
* SLAB_KASAN is used to mark caches as ones that are sanitized by
* KASAN . Currently this flag is used in two places :
* 1. In slab_ksize ( ) when calculating the size of the accessible
* memory within the object .
* 2. In slab_common . c to prevent merging of sanitized caches .
*/
* flags | = SLAB_KASAN ;
2018-12-28 11:29:45 +03:00
2020-12-22 23:03:28 +03:00
if ( ! kasan_stack_collection_enabled ( ) )
2020-12-22 23:03:06 +03:00
return ;
2020-12-22 23:03:28 +03:00
ok_size = * size ;
/* Add alloc meta into redzone. */
2018-12-28 11:29:45 +03:00
cache - > kasan_info . alloc_meta_offset = * size ;
* size + = sizeof ( struct kasan_alloc_meta ) ;
2020-12-22 23:03:28 +03:00
/*
* If alloc meta doesn ' t fit , don ' t add it .
* This can only happen with SLAB , as it has KMALLOC_MAX_SIZE equal
* to KMALLOC_MAX_CACHE_SIZE and doesn ' t fall back to page_alloc for
* larger sizes .
*/
if ( * size > KMALLOC_MAX_SIZE ) {
cache - > kasan_info . alloc_meta_offset = 0 ;
* size = ok_size ;
/* Continue, since free meta might still fit. */
2018-12-28 11:29:45 +03:00
}
2020-12-22 23:03:28 +03:00
/* Only the generic mode uses free meta or flexible redzones. */
if ( ! IS_ENABLED ( CONFIG_KASAN_GENERIC ) ) {
cache - > kasan_info . free_meta_offset = KASAN_NO_FREE_META ;
return ;
}
2018-12-28 11:29:45 +03:00
/*
2020-12-22 23:03:28 +03:00
* Add free meta into redzone when it ' s not possible to store
* it in the object . This is the case when :
* 1. Object is SLAB_TYPESAFE_BY_RCU , which means that it can
* be touched after it was freed , or
* 2. Object has a constructor , which means it ' s expected to
* retain its content until the next allocation , or
* 3. Object is too small .
* Otherwise cache - > kasan_info . free_meta_offset = 0 is implied .
2018-12-28 11:29:45 +03:00
*/
2020-12-22 23:03:28 +03:00
if ( ( cache - > flags & SLAB_TYPESAFE_BY_RCU ) | | cache - > ctor | |
cache - > object_size < sizeof ( struct kasan_free_meta ) ) {
ok_size = * size ;
cache - > kasan_info . free_meta_offset = * size ;
* size + = sizeof ( struct kasan_free_meta ) ;
/* If free meta doesn't fit, don't add it. */
if ( * size > KMALLOC_MAX_SIZE ) {
cache - > kasan_info . free_meta_offset = KASAN_NO_FREE_META ;
* size = ok_size ;
}
2018-12-28 11:29:45 +03:00
}
2020-12-22 23:03:28 +03:00
/* Calculate size with optimal redzone. */
optimal_size = cache - > object_size + optimal_redzone ( cache - > object_size ) ;
/* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
if ( optimal_size > KMALLOC_MAX_SIZE )
optimal_size = KMALLOC_MAX_SIZE ;
/* Use optimal size if the size with added metas is not large enough. */
if ( * size < optimal_size )
* size = optimal_size ;
2018-12-28 11:29:45 +03:00
}
2021-02-26 04:19:55 +03:00
void __kasan_cache_create_kmalloc ( struct kmem_cache * cache )
{
cache - > kasan_info . is_kmalloc = true ;
}
2020-12-22 23:03:10 +03:00
size_t __kasan_metadata_size ( struct kmem_cache * cache )
2018-12-28 11:29:45 +03:00
{
2020-12-22 23:03:06 +03:00
if ( ! kasan_stack_collection_enabled ( ) )
return 0 ;
2018-12-28 11:29:45 +03:00
return ( cache - > kasan_info . alloc_meta_offset ?
sizeof ( struct kasan_alloc_meta ) : 0 ) +
( cache - > kasan_info . free_meta_offset ?
sizeof ( struct kasan_free_meta ) : 0 ) ;
}
2020-12-22 23:02:34 +03:00
struct kasan_alloc_meta * kasan_get_alloc_meta ( struct kmem_cache * cache ,
const void * object )
2018-12-28 11:29:45 +03:00
{
2020-12-22 23:03:28 +03:00
if ( ! cache - > kasan_info . alloc_meta_offset )
return NULL ;
2020-12-22 23:02:52 +03:00
return kasan_reset_tag ( object ) + cache - > kasan_info . alloc_meta_offset ;
2018-12-28 11:29:45 +03:00
}
2020-12-22 23:03:28 +03:00
# ifdef CONFIG_KASAN_GENERIC
2020-12-22 23:02:34 +03:00
struct kasan_free_meta * kasan_get_free_meta ( struct kmem_cache * cache ,
const void * object )
2018-12-28 11:29:45 +03:00
{
BUILD_BUG_ON ( sizeof ( struct kasan_free_meta ) > 32 ) ;
2020-12-22 23:03:28 +03:00
if ( cache - > kasan_info . free_meta_offset = = KASAN_NO_FREE_META )
return NULL ;
2020-12-22 23:02:52 +03:00
return kasan_reset_tag ( object ) + cache - > kasan_info . free_meta_offset ;
2018-12-28 11:29:45 +03:00
}
2020-12-22 23:03:28 +03:00
# endif
2018-12-28 11:29:45 +03:00
2021-10-04 16:46:46 +03:00
void __kasan_poison_slab ( struct slab * slab )
2018-12-28 11:29:45 +03:00
{
2021-10-04 16:46:46 +03:00
struct page * page = slab_page ( slab ) ;
2018-12-28 11:30:57 +03:00
unsigned long i ;
2019-09-24 01:34:30 +03:00
for ( i = 0 ; i < compound_nr ( page ) ; i + + )
2018-12-28 11:30:57 +03:00
page_kasan_tag_reset ( page + i ) ;
2021-02-24 23:05:05 +03:00
kasan_poison ( page_address ( page ) , page_size ( page ) ,
2021-04-30 08:59:59 +03:00
KASAN_KMALLOC_REDZONE , false ) ;
2018-12-28 11:29:45 +03:00
}
2020-12-22 23:03:10 +03:00
void __kasan_unpoison_object_data ( struct kmem_cache * cache , void * object )
2018-12-28 11:29:45 +03:00
{
2021-04-30 08:59:59 +03:00
kasan_unpoison ( object , cache - > object_size , false ) ;
2018-12-28 11:29:45 +03:00
}
2020-12-22 23:03:10 +03:00
void __kasan_poison_object_data ( struct kmem_cache * cache , void * object )
2018-12-28 11:29:45 +03:00
{
2021-02-26 04:20:27 +03:00
kasan_poison ( object , round_up ( cache - > object_size , KASAN_GRANULE_SIZE ) ,
2021-04-30 08:59:59 +03:00
KASAN_KMALLOC_REDZONE , false ) ;
2018-12-28 11:29:45 +03:00
}
2018-12-28 11:30:50 +03:00
/*
2019-01-09 02:23:18 +03:00
* This function assigns a tag to an object considering the following :
* 1. A cache might have a constructor , which might save a pointer to a slab
* object somewhere ( e . g . in the object itself ) . We preassign a tag for
* each object in caches with constructors during slab creation and reuse
* the same tag each time a particular object is allocated .
* 2. A cache might be SLAB_TYPESAFE_BY_RCU , which means objects can be
* accessed after being freed . We preassign tags for objects in these
* caches as well .
* 3. For SLAB allocator we can ' t preassign tags randomly since the freelist
* is stored as an array of indexes instead of a linked list . Assign tags
* based on objects indexes , so that objects that are next to each other
* get different tags .
2018-12-28 11:30:50 +03:00
*/
2021-02-26 04:20:35 +03:00
static inline u8 assign_tag ( struct kmem_cache * cache ,
const void * object , bool init )
2018-12-28 11:30:50 +03:00
{
2020-12-22 23:03:20 +03:00
if ( IS_ENABLED ( CONFIG_KASAN_GENERIC ) )
return 0xff ;
2019-01-09 02:23:18 +03:00
/*
* If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
* set , assign a tag when the object is being allocated ( init = = false ) .
*/
2018-12-28 11:30:50 +03:00
if ( ! cache - > ctor & & ! ( cache - > flags & SLAB_TYPESAFE_BY_RCU ) )
2021-02-24 23:05:05 +03:00
return init ? KASAN_TAG_KERNEL : kasan_random_tag ( ) ;
2018-12-28 11:30:50 +03:00
2019-01-09 02:23:18 +03:00
/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
2018-12-28 11:30:50 +03:00
# ifdef CONFIG_SLAB
2019-01-09 02:23:18 +03:00
/* For SLAB assign tags based on the object index in the freelist. */
2021-11-02 17:42:04 +03:00
return ( u8 ) obj_to_index ( cache , virt_to_slab ( object ) , ( void * ) object ) ;
2018-12-28 11:30:50 +03:00
# else
2019-01-09 02:23:18 +03:00
/*
* For SLUB assign a random tag during slab creation , otherwise reuse
* the already assigned tag .
*/
2021-02-24 23:05:05 +03:00
return init ? kasan_random_tag ( ) : get_tag ( object ) ;
2018-12-28 11:30:50 +03:00
# endif
}
2020-12-22 23:03:10 +03:00
void * __must_check __kasan_init_slab_obj ( struct kmem_cache * cache ,
2018-12-28 11:31:01 +03:00
const void * object )
2018-12-28 11:29:45 +03:00
{
2020-12-22 23:02:34 +03:00
struct kasan_alloc_meta * alloc_meta ;
2018-12-28 11:29:45 +03:00
2020-12-22 23:03:06 +03:00
if ( kasan_stack_collection_enabled ( ) ) {
alloc_meta = kasan_get_alloc_meta ( cache , object ) ;
2020-12-22 23:03:28 +03:00
if ( alloc_meta )
__memset ( alloc_meta , 0 , sizeof ( * alloc_meta ) ) ;
2020-12-22 23:03:06 +03:00
}
2018-12-28 11:29:45 +03:00
2020-12-22 23:03:20 +03:00
/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
kasan, mm: optimize kmalloc poisoning
For allocations from kmalloc caches, kasan_kmalloc() always follows
kasan_slab_alloc(). Currenly, both of them unpoison the whole object,
which is unnecessary.
This patch provides separate implementations for both annotations:
kasan_slab_alloc() unpoisons the whole object, and kasan_kmalloc() only
poisons the redzone.
For generic KASAN, the redzone start might not be aligned to
KASAN_GRANULE_SIZE. Therefore, the poisoning is split in two parts:
kasan_poison_last_granule() poisons the unaligned part, and then
kasan_poison() poisons the rest.
This patch also clarifies alignment guarantees of each of the poisoning
functions and drops the unnecessary round_up() call for redzone_end.
With this change, the early SLUB cache annotation needs to be changed to
kasan_slab_alloc(), as kasan_kmalloc() doesn't unpoison objects now. The
number of poisoned bytes for objects in this cache stays the same, as
kmem_cache_node->object_size is equal to sizeof(struct kmem_cache_node).
Link: https://lkml.kernel.org/r/7e3961cb52be380bc412860332063f5f7ce10d13.1612546384.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-26 04:19:59 +03:00
object = set_tag ( object , assign_tag ( cache , object , true ) ) ;
2018-12-28 11:30:50 +03:00
2018-12-28 11:29:45 +03:00
return ( void * ) object ;
}
2021-04-30 09:00:09 +03:00
static inline bool ____kasan_slab_free ( struct kmem_cache * cache , void * object ,
unsigned long ip , bool quarantine , bool init )
2018-12-28 11:29:45 +03:00
{
2018-12-28 11:30:50 +03:00
u8 tag ;
void * tagged_object ;
2018-12-28 11:29:45 +03:00
2021-06-29 05:40:42 +03:00
if ( ! kasan_arch_is_ready ( ) )
return false ;
2018-12-28 11:30:50 +03:00
tag = get_tag ( object ) ;
tagged_object = object ;
2020-12-22 23:02:52 +03:00
object = kasan_reset_tag ( object ) ;
2018-12-28 11:30:50 +03:00
2021-02-26 04:19:21 +03:00
if ( is_kfence_address ( object ) )
return false ;
2021-11-02 17:42:04 +03:00
if ( unlikely ( nearest_obj ( cache , virt_to_slab ( object ) , object ) ! =
2018-12-28 11:29:45 +03:00
object ) ) {
2018-12-28 11:30:50 +03:00
kasan_report_invalid_free ( tagged_object , ip ) ;
2018-12-28 11:29:45 +03:00
return true ;
}
/* RCU slabs could be legally used after free within the RCU period */
if ( unlikely ( cache - > flags & SLAB_TYPESAFE_BY_RCU ) )
return false ;
2021-02-24 23:05:50 +03:00
if ( ! kasan_byte_accessible ( tagged_object ) ) {
2018-12-28 11:30:50 +03:00
kasan_report_invalid_free ( tagged_object , ip ) ;
2018-12-28 11:29:45 +03:00
return true ;
}
2021-02-26 04:20:27 +03:00
kasan_poison ( object , round_up ( cache - > object_size , KASAN_GRANULE_SIZE ) ,
2021-04-30 09:00:09 +03:00
KASAN_KMALLOC_FREE , init ) ;
2018-12-28 11:29:45 +03:00
2020-12-22 23:03:28 +03:00
if ( ( IS_ENABLED ( CONFIG_KASAN_GENERIC ) & & ! quarantine ) )
2018-12-28 11:29:45 +03:00
return false ;
2021-02-26 04:20:07 +03:00
if ( kasan_stack_collection_enabled ( ) )
kasan_set_free_info ( cache , object , tag ) ;
2019-09-24 01:34:13 +03:00
2021-02-24 23:05:05 +03:00
return kasan_quarantine_put ( cache , object ) ;
2018-12-28 11:29:45 +03:00
}
2021-04-30 09:00:09 +03:00
bool __kasan_slab_free ( struct kmem_cache * cache , void * object ,
unsigned long ip , bool init )
2018-12-28 11:29:45 +03:00
{
2021-04-30 09:00:09 +03:00
return ____kasan_slab_free ( cache , object , ip , true , init ) ;
2018-12-28 11:29:45 +03:00
}
2021-02-26 04:20:35 +03:00
static inline bool ____kasan_kfree_large ( void * ptr , unsigned long ip )
2021-02-26 04:20:11 +03:00
{
if ( ptr ! = page_address ( virt_to_head_page ( ptr ) ) ) {
kasan_report_invalid_free ( ptr , ip ) ;
return true ;
}
if ( ! kasan_byte_accessible ( ptr ) ) {
kasan_report_invalid_free ( ptr , ip ) ;
return true ;
}
/*
2022-03-25 04:10:10 +03:00
* The object will be poisoned by kasan_poison_pages ( ) or
2021-02-26 04:20:11 +03:00
* kasan_slab_free_mempool ( ) .
*/
return false ;
}
void __kasan_kfree_large ( void * ptr , unsigned long ip )
{
____kasan_kfree_large ( ptr , ip ) ;
}
2020-12-22 23:03:13 +03:00
void __kasan_slab_free_mempool ( void * ptr , unsigned long ip )
{
2021-10-04 16:46:46 +03:00
struct folio * folio ;
2020-12-22 23:03:13 +03:00
2021-10-04 16:46:46 +03:00
folio = virt_to_folio ( ptr ) ;
2020-12-22 23:03:13 +03:00
/*
* Even though this function is only called for kmem_cache_alloc and
* kmalloc backed mempool allocations , those allocations can still be
* ! PageSlab ( ) when the size provided to kmalloc is larger than
* KMALLOC_MAX_SIZE , and kmalloc falls back onto page_alloc .
*/
2021-10-04 16:46:46 +03:00
if ( unlikely ( ! folio_test_slab ( folio ) ) ) {
2021-02-26 04:20:11 +03:00
if ( ____kasan_kfree_large ( ptr , ip ) )
2020-12-22 23:03:13 +03:00
return ;
2021-10-04 16:46:46 +03:00
kasan_poison ( ptr , folio_size ( folio ) , KASAN_FREE_PAGE , false ) ;
2020-12-22 23:03:13 +03:00
} else {
2021-10-04 16:46:46 +03:00
struct slab * slab = folio_slab ( folio ) ;
____kasan_slab_free ( slab - > slab_cache , ptr , ip , false , false ) ;
2020-12-22 23:03:13 +03:00
}
}
2021-02-26 04:19:55 +03:00
static void set_alloc_info ( struct kmem_cache * cache , void * object ,
gfp_t flags , bool is_kmalloc )
2020-12-22 23:02:38 +03:00
{
2020-12-22 23:03:28 +03:00
struct kasan_alloc_meta * alloc_meta ;
2021-02-26 04:19:55 +03:00
/* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
if ( cache - > kasan_info . is_kmalloc & & ! is_kmalloc )
return ;
2020-12-22 23:03:28 +03:00
alloc_meta = kasan_get_alloc_meta ( cache , object ) ;
if ( alloc_meta )
kasan_set_track ( & alloc_meta - > alloc_track , flags ) ;
2020-12-22 23:02:38 +03:00
}
kasan, mm: optimize kmalloc poisoning
For allocations from kmalloc caches, kasan_kmalloc() always follows
kasan_slab_alloc(). Currenly, both of them unpoison the whole object,
which is unnecessary.
This patch provides separate implementations for both annotations:
kasan_slab_alloc() unpoisons the whole object, and kasan_kmalloc() only
poisons the redzone.
For generic KASAN, the redzone start might not be aligned to
KASAN_GRANULE_SIZE. Therefore, the poisoning is split in two parts:
kasan_poison_last_granule() poisons the unaligned part, and then
kasan_poison() poisons the rest.
This patch also clarifies alignment guarantees of each of the poisoning
functions and drops the unnecessary round_up() call for redzone_end.
With this change, the early SLUB cache annotation needs to be changed to
kasan_slab_alloc(), as kasan_kmalloc() doesn't unpoison objects now. The
number of poisoned bytes for objects in this cache stays the same, as
kmem_cache_node->object_size is equal to sizeof(struct kmem_cache_node).
Link: https://lkml.kernel.org/r/7e3961cb52be380bc412860332063f5f7ce10d13.1612546384.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-26 04:19:59 +03:00
void * __must_check __kasan_slab_alloc ( struct kmem_cache * cache ,
2021-04-30 09:00:06 +03:00
void * object , gfp_t flags , bool init )
kasan, mm: optimize kmalloc poisoning
For allocations from kmalloc caches, kasan_kmalloc() always follows
kasan_slab_alloc(). Currenly, both of them unpoison the whole object,
which is unnecessary.
This patch provides separate implementations for both annotations:
kasan_slab_alloc() unpoisons the whole object, and kasan_kmalloc() only
poisons the redzone.
For generic KASAN, the redzone start might not be aligned to
KASAN_GRANULE_SIZE. Therefore, the poisoning is split in two parts:
kasan_poison_last_granule() poisons the unaligned part, and then
kasan_poison() poisons the rest.
This patch also clarifies alignment guarantees of each of the poisoning
functions and drops the unnecessary round_up() call for redzone_end.
With this change, the early SLUB cache annotation needs to be changed to
kasan_slab_alloc(), as kasan_kmalloc() doesn't unpoison objects now. The
number of poisoned bytes for objects in this cache stays the same, as
kmem_cache_node->object_size is equal to sizeof(struct kmem_cache_node).
Link: https://lkml.kernel.org/r/7e3961cb52be380bc412860332063f5f7ce10d13.1612546384.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-26 04:19:59 +03:00
{
u8 tag ;
void * tagged_object ;
if ( gfpflags_allow_blocking ( flags ) )
kasan_quarantine_reduce ( ) ;
if ( unlikely ( object = = NULL ) )
return NULL ;
if ( is_kfence_address ( object ) )
return ( void * ) object ;
/*
* Generate and assign random tag for tag - based modes .
* Tag is ignored in set_tag ( ) for the generic mode .
*/
tag = assign_tag ( cache , object , false ) ;
tagged_object = set_tag ( object , tag ) ;
/*
* Unpoison the whole object .
* For kmalloc ( ) allocations , kasan_kmalloc ( ) will do precise poisoning .
*/
2021-04-30 09:00:06 +03:00
kasan_unpoison ( tagged_object , cache - > object_size , init ) ;
kasan, mm: optimize kmalloc poisoning
For allocations from kmalloc caches, kasan_kmalloc() always follows
kasan_slab_alloc(). Currenly, both of them unpoison the whole object,
which is unnecessary.
This patch provides separate implementations for both annotations:
kasan_slab_alloc() unpoisons the whole object, and kasan_kmalloc() only
poisons the redzone.
For generic KASAN, the redzone start might not be aligned to
KASAN_GRANULE_SIZE. Therefore, the poisoning is split in two parts:
kasan_poison_last_granule() poisons the unaligned part, and then
kasan_poison() poisons the rest.
This patch also clarifies alignment guarantees of each of the poisoning
functions and drops the unnecessary round_up() call for redzone_end.
With this change, the early SLUB cache annotation needs to be changed to
kasan_slab_alloc(), as kasan_kmalloc() doesn't unpoison objects now. The
number of poisoned bytes for objects in this cache stays the same, as
kmem_cache_node->object_size is equal to sizeof(struct kmem_cache_node).
Link: https://lkml.kernel.org/r/7e3961cb52be380bc412860332063f5f7ce10d13.1612546384.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-26 04:19:59 +03:00
/* Save alloc info (if possible) for non-kmalloc() allocations. */
if ( kasan_stack_collection_enabled ( ) )
set_alloc_info ( cache , ( void * ) object , flags , false ) ;
return tagged_object ;
}
2021-02-26 04:20:35 +03:00
static inline void * ____kasan_kmalloc ( struct kmem_cache * cache ,
const void * object , size_t size , gfp_t flags )
2018-12-28 11:29:45 +03:00
{
unsigned long redzone_start ;
unsigned long redzone_end ;
if ( gfpflags_allow_blocking ( flags ) )
2021-02-24 23:05:05 +03:00
kasan_quarantine_reduce ( ) ;
2018-12-28 11:29:45 +03:00
if ( unlikely ( object = = NULL ) )
return NULL ;
2021-02-26 04:19:21 +03:00
if ( is_kfence_address ( kasan_reset_tag ( object ) ) )
return ( void * ) object ;
kasan, mm: optimize kmalloc poisoning
For allocations from kmalloc caches, kasan_kmalloc() always follows
kasan_slab_alloc(). Currenly, both of them unpoison the whole object,
which is unnecessary.
This patch provides separate implementations for both annotations:
kasan_slab_alloc() unpoisons the whole object, and kasan_kmalloc() only
poisons the redzone.
For generic KASAN, the redzone start might not be aligned to
KASAN_GRANULE_SIZE. Therefore, the poisoning is split in two parts:
kasan_poison_last_granule() poisons the unaligned part, and then
kasan_poison() poisons the rest.
This patch also clarifies alignment guarantees of each of the poisoning
functions and drops the unnecessary round_up() call for redzone_end.
With this change, the early SLUB cache annotation needs to be changed to
kasan_slab_alloc(), as kasan_kmalloc() doesn't unpoison objects now. The
number of poisoned bytes for objects in this cache stays the same, as
kmem_cache_node->object_size is equal to sizeof(struct kmem_cache_node).
Link: https://lkml.kernel.org/r/7e3961cb52be380bc412860332063f5f7ce10d13.1612546384.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-26 04:19:59 +03:00
/*
* The object has already been unpoisoned by kasan_slab_alloc ( ) for
2021-02-26 04:20:23 +03:00
* kmalloc ( ) or by kasan_krealloc ( ) for krealloc ( ) .
kasan, mm: optimize kmalloc poisoning
For allocations from kmalloc caches, kasan_kmalloc() always follows
kasan_slab_alloc(). Currenly, both of them unpoison the whole object,
which is unnecessary.
This patch provides separate implementations for both annotations:
kasan_slab_alloc() unpoisons the whole object, and kasan_kmalloc() only
poisons the redzone.
For generic KASAN, the redzone start might not be aligned to
KASAN_GRANULE_SIZE. Therefore, the poisoning is split in two parts:
kasan_poison_last_granule() poisons the unaligned part, and then
kasan_poison() poisons the rest.
This patch also clarifies alignment guarantees of each of the poisoning
functions and drops the unnecessary round_up() call for redzone_end.
With this change, the early SLUB cache annotation needs to be changed to
kasan_slab_alloc(), as kasan_kmalloc() doesn't unpoison objects now. The
number of poisoned bytes for objects in this cache stays the same, as
kmem_cache_node->object_size is equal to sizeof(struct kmem_cache_node).
Link: https://lkml.kernel.org/r/7e3961cb52be380bc412860332063f5f7ce10d13.1612546384.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-26 04:19:59 +03:00
*/
/*
* The redzone has byte - level precision for the generic mode .
* Partially poison the last object granule to cover the unaligned
* part of the redzone .
*/
if ( IS_ENABLED ( CONFIG_KASAN_GENERIC ) )
kasan_poison_last_granule ( ( void * ) object , size ) ;
/* Poison the aligned part of the redzone. */
2018-12-28 11:29:45 +03:00
redzone_start = round_up ( ( unsigned long ) ( object + size ) ,
2020-12-22 23:00:24 +03:00
KASAN_GRANULE_SIZE ) ;
2021-02-26 04:20:27 +03:00
redzone_end = round_up ( ( unsigned long ) ( object + cache - > object_size ) ,
KASAN_GRANULE_SIZE ) ;
2021-02-24 23:05:05 +03:00
kasan_poison ( ( void * ) redzone_start , redzone_end - redzone_start ,
2021-04-30 08:59:59 +03:00
KASAN_KMALLOC_REDZONE , false ) ;
2018-12-28 11:29:45 +03:00
kasan, mm: optimize kmalloc poisoning
For allocations from kmalloc caches, kasan_kmalloc() always follows
kasan_slab_alloc(). Currenly, both of them unpoison the whole object,
which is unnecessary.
This patch provides separate implementations for both annotations:
kasan_slab_alloc() unpoisons the whole object, and kasan_kmalloc() only
poisons the redzone.
For generic KASAN, the redzone start might not be aligned to
KASAN_GRANULE_SIZE. Therefore, the poisoning is split in two parts:
kasan_poison_last_granule() poisons the unaligned part, and then
kasan_poison() poisons the rest.
This patch also clarifies alignment guarantees of each of the poisoning
functions and drops the unnecessary round_up() call for redzone_end.
With this change, the early SLUB cache annotation needs to be changed to
kasan_slab_alloc(), as kasan_kmalloc() doesn't unpoison objects now. The
number of poisoned bytes for objects in this cache stays the same, as
kmem_cache_node->object_size is equal to sizeof(struct kmem_cache_node).
Link: https://lkml.kernel.org/r/7e3961cb52be380bc412860332063f5f7ce10d13.1612546384.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-26 04:19:59 +03:00
/*
* Save alloc info ( if possible ) for kmalloc ( ) allocations .
* This also rewrites the alloc info when called from kasan_krealloc ( ) .
*/
2020-12-22 23:03:28 +03:00
if ( kasan_stack_collection_enabled ( ) )
kasan, mm: optimize kmalloc poisoning
For allocations from kmalloc caches, kasan_kmalloc() always follows
kasan_slab_alloc(). Currenly, both of them unpoison the whole object,
which is unnecessary.
This patch provides separate implementations for both annotations:
kasan_slab_alloc() unpoisons the whole object, and kasan_kmalloc() only
poisons the redzone.
For generic KASAN, the redzone start might not be aligned to
KASAN_GRANULE_SIZE. Therefore, the poisoning is split in two parts:
kasan_poison_last_granule() poisons the unaligned part, and then
kasan_poison() poisons the rest.
This patch also clarifies alignment guarantees of each of the poisoning
functions and drops the unnecessary round_up() call for redzone_end.
With this change, the early SLUB cache annotation needs to be changed to
kasan_slab_alloc(), as kasan_kmalloc() doesn't unpoison objects now. The
number of poisoned bytes for objects in this cache stays the same, as
kmem_cache_node->object_size is equal to sizeof(struct kmem_cache_node).
Link: https://lkml.kernel.org/r/7e3961cb52be380bc412860332063f5f7ce10d13.1612546384.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-26 04:19:59 +03:00
set_alloc_info ( cache , ( void * ) object , flags , true ) ;
2018-12-28 11:29:45 +03:00
kasan, mm: optimize kmalloc poisoning
For allocations from kmalloc caches, kasan_kmalloc() always follows
kasan_slab_alloc(). Currenly, both of them unpoison the whole object,
which is unnecessary.
This patch provides separate implementations for both annotations:
kasan_slab_alloc() unpoisons the whole object, and kasan_kmalloc() only
poisons the redzone.
For generic KASAN, the redzone start might not be aligned to
KASAN_GRANULE_SIZE. Therefore, the poisoning is split in two parts:
kasan_poison_last_granule() poisons the unaligned part, and then
kasan_poison() poisons the rest.
This patch also clarifies alignment guarantees of each of the poisoning
functions and drops the unnecessary round_up() call for redzone_end.
With this change, the early SLUB cache annotation needs to be changed to
kasan_slab_alloc(), as kasan_kmalloc() doesn't unpoison objects now. The
number of poisoned bytes for objects in this cache stays the same, as
kmem_cache_node->object_size is equal to sizeof(struct kmem_cache_node).
Link: https://lkml.kernel.org/r/7e3961cb52be380bc412860332063f5f7ce10d13.1612546384.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-26 04:19:59 +03:00
/* Keep the tag that was set by kasan_slab_alloc(). */
return ( void * ) object ;
2019-02-21 09:19:01 +03:00
}
2020-12-22 23:03:10 +03:00
void * __must_check __kasan_kmalloc ( struct kmem_cache * cache , const void * object ,
size_t size , gfp_t flags )
2019-01-09 02:23:18 +03:00
{
kasan, mm: optimize kmalloc poisoning
For allocations from kmalloc caches, kasan_kmalloc() always follows
kasan_slab_alloc(). Currenly, both of them unpoison the whole object,
which is unnecessary.
This patch provides separate implementations for both annotations:
kasan_slab_alloc() unpoisons the whole object, and kasan_kmalloc() only
poisons the redzone.
For generic KASAN, the redzone start might not be aligned to
KASAN_GRANULE_SIZE. Therefore, the poisoning is split in two parts:
kasan_poison_last_granule() poisons the unaligned part, and then
kasan_poison() poisons the rest.
This patch also clarifies alignment guarantees of each of the poisoning
functions and drops the unnecessary round_up() call for redzone_end.
With this change, the early SLUB cache annotation needs to be changed to
kasan_slab_alloc(), as kasan_kmalloc() doesn't unpoison objects now. The
number of poisoned bytes for objects in this cache stays the same, as
kmem_cache_node->object_size is equal to sizeof(struct kmem_cache_node).
Link: https://lkml.kernel.org/r/7e3961cb52be380bc412860332063f5f7ce10d13.1612546384.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-26 04:19:59 +03:00
return ____kasan_kmalloc ( cache , object , size , flags ) ;
2019-01-09 02:23:18 +03:00
}
2020-12-22 23:03:10 +03:00
EXPORT_SYMBOL ( __kasan_kmalloc ) ;
2018-12-28 11:29:45 +03:00
2020-12-22 23:03:10 +03:00
void * __must_check __kasan_kmalloc_large ( const void * ptr , size_t size ,
2018-12-28 11:31:01 +03:00
gfp_t flags )
2018-12-28 11:29:45 +03:00
{
unsigned long redzone_start ;
unsigned long redzone_end ;
if ( gfpflags_allow_blocking ( flags ) )
2021-02-24 23:05:05 +03:00
kasan_quarantine_reduce ( ) ;
2018-12-28 11:29:45 +03:00
if ( unlikely ( ptr = = NULL ) )
return NULL ;
2021-02-26 04:20:03 +03:00
/*
2022-03-25 04:10:31 +03:00
* The object has already been unpoisoned by kasan_unpoison_pages ( ) for
2021-02-26 04:20:23 +03:00
* alloc_pages ( ) or by kasan_krealloc ( ) for krealloc ( ) .
2021-02-26 04:20:03 +03:00
*/
/*
* The redzone has byte - level precision for the generic mode .
* Partially poison the last object granule to cover the unaligned
* part of the redzone .
*/
if ( IS_ENABLED ( CONFIG_KASAN_GENERIC ) )
kasan_poison_last_granule ( ptr , size ) ;
/* Poison the aligned part of the redzone. */
2018-12-28 11:29:45 +03:00
redzone_start = round_up ( ( unsigned long ) ( ptr + size ) ,
2020-12-22 23:00:24 +03:00
KASAN_GRANULE_SIZE ) ;
2021-02-26 04:20:03 +03:00
redzone_end = ( unsigned long ) ptr + page_size ( virt_to_page ( ptr ) ) ;
2021-02-24 23:05:05 +03:00
kasan_poison ( ( void * ) redzone_start , redzone_end - redzone_start ,
2021-04-30 08:59:59 +03:00
KASAN_PAGE_REDZONE , false ) ;
2018-12-28 11:29:45 +03:00
return ( void * ) ptr ;
}
2020-12-22 23:03:10 +03:00
void * __must_check __kasan_krealloc ( const void * object , size_t size , gfp_t flags )
2018-12-28 11:29:45 +03:00
{
2021-10-04 16:46:46 +03:00
struct slab * slab ;
2018-12-28 11:29:45 +03:00
if ( unlikely ( object = = ZERO_SIZE_PTR ) )
return ( void * ) object ;
2021-02-26 04:20:23 +03:00
/*
* Unpoison the object ' s data .
* Part of it might already have been unpoisoned , but it ' s unknown
* how big that part is .
*/
2021-04-30 08:59:59 +03:00
kasan_unpoison ( object , size , false ) ;
2021-02-26 04:20:23 +03:00
2021-10-04 16:46:46 +03:00
slab = virt_to_slab ( object ) ;
2018-12-28 11:29:45 +03:00
2021-02-26 04:20:23 +03:00
/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
2021-10-04 16:46:46 +03:00
if ( unlikely ( ! slab ) )
2020-12-22 23:03:10 +03:00
return __kasan_kmalloc_large ( object , size , flags ) ;
2018-12-28 11:29:45 +03:00
else
2021-10-04 16:46:46 +03:00
return ____kasan_kmalloc ( slab - > slab_cache , object , size , flags ) ;
2018-12-28 11:29:45 +03:00
}
2021-02-24 23:05:50 +03:00
bool __kasan_check_byte ( const void * address , unsigned long ip )
{
if ( ! kasan_byte_accessible ( address ) ) {
kasan_report ( ( unsigned long ) address , 1 , false , ip ) ;
return false ;
}
return true ;
}