2022-09-15 17:03:45 +02:00
// SPDX-License-Identifier: GPL-2.0
/*
* KMSAN hooks for kernel subsystems .
*
* These functions handle creation of KMSAN metadata for memory allocations .
*
* Copyright ( C ) 2018 - 2022 Google LLC
* Author : Alexander Potapenko < glider @ google . com >
*
*/
# include <linux/cacheflush.h>
2022-09-15 17:03:55 +02:00
# include <linux/dma-direction.h>
2022-09-15 17:03:45 +02:00
# include <linux/gfp.h>
2022-09-15 17:03:48 +02:00
# include <linux/kmsan.h>
2022-09-15 17:03:45 +02:00
# include <linux/mm.h>
# include <linux/mm_types.h>
2022-09-15 17:03:55 +02:00
# include <linux/scatterlist.h>
2022-09-15 17:03:45 +02:00
# include <linux/slab.h>
# include <linux/uaccess.h>
2022-09-15 17:03:57 +02:00
# include <linux/usb.h>
2022-09-15 17:03:45 +02:00
# include "../internal.h"
# include "../slab.h"
# include "kmsan.h"
/*
* Instrumented functions shouldn ' t be called under
* kmsan_enter_runtime ( ) / kmsan_leave_runtime ( ) , because this will lead to
* skipping effects of functions like memset ( ) inside instrumented code .
*/
2022-09-15 17:03:50 +02:00
void kmsan_task_create ( struct task_struct * task )
{
kmsan_enter_runtime ( ) ;
kmsan_internal_task_create ( task ) ;
kmsan_leave_runtime ( ) ;
}
void kmsan_task_exit ( struct task_struct * task )
{
struct kmsan_ctx * ctx = & task - > kmsan_ctx ;
if ( ! kmsan_enabled | | kmsan_in_runtime ( ) )
return ;
ctx - > allow_reporting = false ;
}
2022-09-15 17:03:49 +02:00
void kmsan_slab_alloc ( struct kmem_cache * s , void * object , gfp_t flags )
{
if ( unlikely ( object = = NULL ) )
return ;
if ( ! kmsan_enabled | | kmsan_in_runtime ( ) )
return ;
/*
* There ' s a ctor or this is an RCU cache - do nothing . The memory
* status hasn ' t changed since last use .
*/
if ( s - > ctor | | ( s - > flags & SLAB_TYPESAFE_BY_RCU ) )
return ;
kmsan_enter_runtime ( ) ;
if ( flags & __GFP_ZERO )
kmsan_internal_unpoison_memory ( object , s - > object_size ,
KMSAN_POISON_CHECK ) ;
else
kmsan_internal_poison_memory ( object , s - > object_size , flags ,
KMSAN_POISON_CHECK ) ;
kmsan_leave_runtime ( ) ;
}
void kmsan_slab_free ( struct kmem_cache * s , void * object )
{
if ( ! kmsan_enabled | | kmsan_in_runtime ( ) )
return ;
/* RCU slabs could be legally used after free within the RCU period */
if ( unlikely ( s - > flags & ( SLAB_TYPESAFE_BY_RCU | SLAB_POISON ) ) )
return ;
/*
* If there ' s a constructor , freed memory must remain in the same state
* until the next allocation . We cannot save its state to detect
* use - after - free bugs , instead we just keep it unpoisoned .
*/
if ( s - > ctor )
return ;
kmsan_enter_runtime ( ) ;
kmsan_internal_poison_memory ( object , s - > object_size , GFP_KERNEL ,
KMSAN_POISON_CHECK | KMSAN_POISON_FREE ) ;
kmsan_leave_runtime ( ) ;
}
void kmsan_kmalloc_large ( const void * ptr , size_t size , gfp_t flags )
{
if ( unlikely ( ptr = = NULL ) )
return ;
if ( ! kmsan_enabled | | kmsan_in_runtime ( ) )
return ;
kmsan_enter_runtime ( ) ;
if ( flags & __GFP_ZERO )
kmsan_internal_unpoison_memory ( ( void * ) ptr , size ,
/*checked*/ true ) ;
else
kmsan_internal_poison_memory ( ( void * ) ptr , size , flags ,
KMSAN_POISON_CHECK ) ;
kmsan_leave_runtime ( ) ;
}
void kmsan_kfree_large ( const void * ptr )
{
struct page * page ;
if ( ! kmsan_enabled | | kmsan_in_runtime ( ) )
return ;
kmsan_enter_runtime ( ) ;
page = virt_to_head_page ( ( void * ) ptr ) ;
KMSAN_WARN_ON ( ptr ! = page_address ( page ) ) ;
kmsan_internal_poison_memory ( ( void * ) ptr ,
2023-07-27 09:16:10 +08:00
page_size ( page ) ,
2022-09-15 17:03:49 +02:00
GFP_KERNEL ,
KMSAN_POISON_CHECK | KMSAN_POISON_FREE ) ;
kmsan_leave_runtime ( ) ;
}
2022-09-15 17:03:48 +02:00
static unsigned long vmalloc_shadow ( unsigned long addr )
{
return ( unsigned long ) kmsan_get_metadata ( ( void * ) addr ,
KMSAN_META_SHADOW ) ;
}
static unsigned long vmalloc_origin ( unsigned long addr )
{
return ( unsigned long ) kmsan_get_metadata ( ( void * ) addr ,
KMSAN_META_ORIGIN ) ;
}
void kmsan_vunmap_range_noflush ( unsigned long start , unsigned long end )
{
__vunmap_range_noflush ( vmalloc_shadow ( start ) , vmalloc_shadow ( end ) ) ;
__vunmap_range_noflush ( vmalloc_origin ( start ) , vmalloc_origin ( end ) ) ;
flush_cache_vmap ( vmalloc_shadow ( start ) , vmalloc_shadow ( end ) ) ;
flush_cache_vmap ( vmalloc_origin ( start ) , vmalloc_origin ( end ) ) ;
}
/*
* This function creates new shadow / origin pages for the physical pages mapped
* into the virtual memory . If those physical pages already had shadow / origin ,
* those are ignored .
*/
2023-04-13 15:12:21 +02:00
int kmsan_ioremap_page_range ( unsigned long start , unsigned long end ,
phys_addr_t phys_addr , pgprot_t prot ,
unsigned int page_shift )
2022-09-15 17:03:48 +02:00
{
gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO ;
struct page * shadow , * origin ;
unsigned long off = 0 ;
2023-04-13 15:12:21 +02:00
int nr , err = 0 , clean = 0 , mapped ;
2022-09-15 17:03:48 +02:00
if ( ! kmsan_enabled | | kmsan_in_runtime ( ) )
2023-04-13 15:12:21 +02:00
return 0 ;
2022-09-15 17:03:48 +02:00
nr = ( end - start ) / PAGE_SIZE ;
kmsan_enter_runtime ( ) ;
2023-04-13 15:12:21 +02:00
for ( int i = 0 ; i < nr ; i + + , off + = PAGE_SIZE , clean = i ) {
2022-09-15 17:03:48 +02:00
shadow = alloc_pages ( gfp_mask , 1 ) ;
origin = alloc_pages ( gfp_mask , 1 ) ;
2023-04-13 15:12:21 +02:00
if ( ! shadow | | ! origin ) {
err = - ENOMEM ;
goto ret ;
}
mapped = __vmap_pages_range_noflush (
2022-09-15 17:03:48 +02:00
vmalloc_shadow ( start + off ) ,
vmalloc_shadow ( start + off + PAGE_SIZE ) , prot , & shadow ,
PAGE_SHIFT ) ;
2023-04-13 15:12:21 +02:00
if ( mapped ) {
err = mapped ;
goto ret ;
}
shadow = NULL ;
mapped = __vmap_pages_range_noflush (
2022-09-15 17:03:48 +02:00
vmalloc_origin ( start + off ) ,
vmalloc_origin ( start + off + PAGE_SIZE ) , prot , & origin ,
PAGE_SHIFT ) ;
2023-04-13 15:12:21 +02:00
if ( mapped ) {
__vunmap_range_noflush (
vmalloc_shadow ( start + off ) ,
vmalloc_shadow ( start + off + PAGE_SIZE ) ) ;
err = mapped ;
goto ret ;
}
origin = NULL ;
}
/* Page mapping loop finished normally, nothing to clean up. */
clean = 0 ;
ret :
if ( clean > 0 ) {
/*
* Something went wrong . Clean up shadow / origin pages allocated
* on the last loop iteration , then delete mappings created
* during the previous iterations .
*/
if ( shadow )
__free_pages ( shadow , 1 ) ;
if ( origin )
__free_pages ( origin , 1 ) ;
__vunmap_range_noflush (
vmalloc_shadow ( start ) ,
vmalloc_shadow ( start + clean * PAGE_SIZE ) ) ;
__vunmap_range_noflush (
vmalloc_origin ( start ) ,
vmalloc_origin ( start + clean * PAGE_SIZE ) ) ;
2022-09-15 17:03:48 +02:00
}
flush_cache_vmap ( vmalloc_shadow ( start ) , vmalloc_shadow ( end ) ) ;
flush_cache_vmap ( vmalloc_origin ( start ) , vmalloc_origin ( end ) ) ;
kmsan_leave_runtime ( ) ;
2023-04-13 15:12:21 +02:00
return err ;
2022-09-15 17:03:48 +02:00
}
void kmsan_iounmap_page_range ( unsigned long start , unsigned long end )
{
unsigned long v_shadow , v_origin ;
struct page * shadow , * origin ;
int nr ;
if ( ! kmsan_enabled | | kmsan_in_runtime ( ) )
return ;
nr = ( end - start ) / PAGE_SIZE ;
kmsan_enter_runtime ( ) ;
v_shadow = ( unsigned long ) vmalloc_shadow ( start ) ;
v_origin = ( unsigned long ) vmalloc_origin ( start ) ;
for ( int i = 0 ; i < nr ;
i + + , v_shadow + = PAGE_SIZE , v_origin + = PAGE_SIZE ) {
shadow = kmsan_vmalloc_to_page_or_null ( ( void * ) v_shadow ) ;
origin = kmsan_vmalloc_to_page_or_null ( ( void * ) v_origin ) ;
__vunmap_range_noflush ( v_shadow , vmalloc_shadow ( end ) ) ;
__vunmap_range_noflush ( v_origin , vmalloc_origin ( end ) ) ;
if ( shadow )
__free_pages ( shadow , 1 ) ;
if ( origin )
__free_pages ( origin , 1 ) ;
}
flush_cache_vmap ( vmalloc_shadow ( start ) , vmalloc_shadow ( end ) ) ;
flush_cache_vmap ( vmalloc_origin ( start ) , vmalloc_origin ( end ) ) ;
kmsan_leave_runtime ( ) ;
}
2022-09-15 17:03:52 +02:00
void kmsan_copy_to_user ( void __user * to , const void * from , size_t to_copy ,
size_t left )
{
unsigned long ua_flags ;
if ( ! kmsan_enabled | | kmsan_in_runtime ( ) )
return ;
/*
* At this point we ' ve copied the memory already . It ' s hard to check it
* before copying , as the size of actually copied buffer is unknown .
*/
/* copy_to_user() may copy zero bytes. No need to check. */
if ( ! to_copy )
return ;
/* Or maybe copy_to_user() failed to copy anything. */
if ( to_copy < = left )
return ;
ua_flags = user_access_save ( ) ;
if ( ( u64 ) to < TASK_SIZE ) {
/* This is a user memory access, check it. */
kmsan_internal_check_memory ( ( void * ) from , to_copy - left , to ,
REASON_COPY_TO_USER ) ;
} else {
/* Otherwise this is a kernel memory access. This happens when a
* compat syscall passes an argument allocated on the kernel
* stack to a real syscall .
* Don ' t check anything , just copy the shadow of the copied
* bytes .
*/
kmsan_internal_memmove_metadata ( ( void * ) to , ( void * ) from ,
to_copy - left ) ;
}
user_access_restore ( ua_flags ) ;
}
EXPORT_SYMBOL ( kmsan_copy_to_user ) ;
2022-09-15 17:03:57 +02:00
/* Helper function to check an URB. */
void kmsan_handle_urb ( const struct urb * urb , bool is_out )
{
if ( ! urb )
return ;
if ( is_out )
kmsan_internal_check_memory ( urb - > transfer_buffer ,
urb - > transfer_buffer_length ,
/*user_addr*/ 0 , REASON_SUBMIT_URB ) ;
else
kmsan_internal_unpoison_memory ( urb - > transfer_buffer ,
urb - > transfer_buffer_length ,
/*checked*/ false ) ;
}
2022-12-15 17:26:57 +01:00
EXPORT_SYMBOL_GPL ( kmsan_handle_urb ) ;
2022-09-15 17:03:57 +02:00
2022-09-15 17:03:55 +02:00
static void kmsan_handle_dma_page ( const void * addr , size_t size ,
enum dma_data_direction dir )
{
switch ( dir ) {
case DMA_BIDIRECTIONAL :
kmsan_internal_check_memory ( ( void * ) addr , size , /*user_addr*/ 0 ,
REASON_ANY ) ;
kmsan_internal_unpoison_memory ( ( void * ) addr , size ,
/*checked*/ false ) ;
break ;
case DMA_TO_DEVICE :
kmsan_internal_check_memory ( ( void * ) addr , size , /*user_addr*/ 0 ,
REASON_ANY ) ;
break ;
case DMA_FROM_DEVICE :
kmsan_internal_unpoison_memory ( ( void * ) addr , size ,
/*checked*/ false ) ;
break ;
case DMA_NONE :
break ;
}
}
/* Helper function to handle DMA data transfers. */
void kmsan_handle_dma ( struct page * page , size_t offset , size_t size ,
enum dma_data_direction dir )
{
u64 page_offset , to_go , addr ;
if ( PageHighMem ( page ) )
return ;
addr = ( u64 ) page_address ( page ) + offset ;
/*
* The kernel may occasionally give us adjacent DMA pages not belonging
* to the same allocation . Process them separately to avoid triggering
* internal KMSAN checks .
*/
while ( size > 0 ) {
2023-07-27 09:16:11 +08:00
page_offset = offset_in_page ( addr ) ;
2022-09-15 17:03:55 +02:00
to_go = min ( PAGE_SIZE - page_offset , ( u64 ) size ) ;
kmsan_handle_dma_page ( ( void * ) addr , to_go , dir ) ;
addr + = to_go ;
size - = to_go ;
}
}
void kmsan_handle_dma_sg ( struct scatterlist * sg , int nents ,
enum dma_data_direction dir )
{
struct scatterlist * item ;
int i ;
for_each_sg ( sg , item , nents , i )
kmsan_handle_dma ( sg_page ( item ) , item - > offset , item - > length ,
dir ) ;
}
2022-09-15 17:03:45 +02:00
/* Functions from kmsan-checks.h follow. */
2024-01-24 18:31:34 +01:00
/*
* To create an origin , kmsan_poison_memory ( ) unwinds the stacks and stores it
* into the stack depot . This may cause deadlocks if done from within KMSAN
* runtime , therefore we bail out if kmsan_in_runtime ( ) .
*/
2022-09-15 17:03:45 +02:00
void kmsan_poison_memory ( const void * address , size_t size , gfp_t flags )
{
if ( ! kmsan_enabled | | kmsan_in_runtime ( ) )
return ;
kmsan_enter_runtime ( ) ;
/* The users may want to poison/unpoison random memory. */
kmsan_internal_poison_memory ( ( void * ) address , size , flags ,
KMSAN_POISON_NOCHECK ) ;
kmsan_leave_runtime ( ) ;
}
EXPORT_SYMBOL ( kmsan_poison_memory ) ;
2024-01-24 18:31:34 +01:00
/*
* Unlike kmsan_poison_memory ( ) , this function can be used from within KMSAN
* runtime , because it does not trigger allocations or call instrumented code .
*/
2022-09-15 17:03:45 +02:00
void kmsan_unpoison_memory ( const void * address , size_t size )
{
unsigned long ua_flags ;
2024-01-24 18:31:34 +01:00
if ( ! kmsan_enabled )
2022-09-15 17:03:45 +02:00
return ;
ua_flags = user_access_save ( ) ;
/* The users may want to poison/unpoison random memory. */
kmsan_internal_unpoison_memory ( ( void * ) address , size ,
KMSAN_POISON_NOCHECK ) ;
user_access_restore ( ua_flags ) ;
}
EXPORT_SYMBOL ( kmsan_unpoison_memory ) ;
2022-09-15 17:04:14 +02:00
/*
2024-01-24 18:31:34 +01:00
* Version of kmsan_unpoison_memory ( ) called from IRQ entry functions .
2022-09-15 17:04:14 +02:00
*/
void kmsan_unpoison_entry_regs ( const struct pt_regs * regs )
{
2024-01-24 18:31:34 +01:00
kmsan_unpoison_memory ( ( void * ) regs , sizeof ( * regs ) ) ;
2022-09-15 17:04:14 +02:00
}
2022-09-15 17:03:45 +02:00
void kmsan_check_memory ( const void * addr , size_t size )
{
if ( ! kmsan_enabled )
return ;
return kmsan_internal_check_memory ( ( void * ) addr , size , /*user_addr*/ 0 ,
REASON_ANY ) ;
}
EXPORT_SYMBOL ( kmsan_check_memory ) ;