2019-05-19 16:51:55 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2008-01-30 15:32:53 +03:00
/*
* test_kprobes . c - simple sanity test for * probes
*
* Copyright IBM Corp . 2008
*/
# include <linux/kernel.h>
# include <linux/kprobes.h>
# include <linux/random.h>
2021-10-21 03:54:24 +03:00
# include <kunit/test.h>
2008-01-30 15:32:53 +03:00
# define div_factor 3
2017-10-06 02:15:17 +03:00
static u32 rand1 , preh_val , posth_val ;
2009-01-07 01:41:47 +03:00
static u32 ( * target ) ( u32 value ) ;
2023-02-21 02:52:42 +03:00
static u32 ( * recursed_target ) ( u32 value ) ;
2009-01-07 01:41:48 +03:00
static u32 ( * target2 ) ( u32 value ) ;
2021-10-21 03:54:24 +03:00
static struct kunit * current_test ;
2008-01-30 15:32:53 +03:00
2021-10-25 14:41:52 +03:00
static unsigned long ( * internal_target ) ( void ) ;
static unsigned long ( * stacktrace_target ) ( void ) ;
static unsigned long ( * stacktrace_driver ) ( void ) ;
static unsigned long target_return_address [ 2 ] ;
2008-01-30 15:32:53 +03:00
static noinline u32 kprobe_target ( u32 value )
{
return ( value / div_factor ) ;
}
2023-02-21 02:52:42 +03:00
static noinline u32 kprobe_recursed_target ( u32 value )
{
return ( value / div_factor ) ;
}
2008-01-30 15:32:53 +03:00
static int kp_pre_handler ( struct kprobe * p , struct pt_regs * regs )
{
2021-10-21 03:54:24 +03:00
KUNIT_EXPECT_FALSE ( current_test , preemptible ( ) ) ;
2023-02-21 02:52:42 +03:00
preh_val = recursed_target ( rand1 ) ;
2008-01-30 15:32:53 +03:00
return 0 ;
}
static void kp_post_handler ( struct kprobe * p , struct pt_regs * regs ,
unsigned long flags )
{
2023-02-21 02:52:42 +03:00
u32 expval = recursed_target ( rand1 ) ;
2021-10-21 03:54:24 +03:00
KUNIT_EXPECT_FALSE ( current_test , preemptible ( ) ) ;
2023-02-21 02:52:42 +03:00
KUNIT_EXPECT_EQ ( current_test , preh_val , expval ) ;
2008-01-30 15:32:53 +03:00
posth_val = preh_val + div_factor ;
}
static struct kprobe kp = {
. symbol_name = " kprobe_target " ,
. pre_handler = kp_pre_handler ,
. post_handler = kp_post_handler
} ;
2021-10-21 03:54:24 +03:00
static void test_kprobe ( struct kunit * test )
2008-01-30 15:32:53 +03:00
{
2021-10-21 03:54:24 +03:00
current_test = test ;
KUNIT_EXPECT_EQ ( test , 0 , register_kprobe ( & kp ) ) ;
target ( rand1 ) ;
2008-01-30 15:32:53 +03:00
unregister_kprobe ( & kp ) ;
2021-10-21 03:54:24 +03:00
KUNIT_EXPECT_NE ( test , 0 , preh_val ) ;
KUNIT_EXPECT_NE ( test , 0 , posth_val ) ;
2008-01-30 15:32:53 +03:00
}
2009-01-07 01:41:48 +03:00
static noinline u32 kprobe_target2 ( u32 value )
{
return ( value / div_factor ) + 1 ;
}
2021-10-25 14:41:52 +03:00
static noinline unsigned long kprobe_stacktrace_internal_target ( void )
{
if ( ! target_return_address [ 0 ] )
target_return_address [ 0 ] = ( unsigned long ) __builtin_return_address ( 0 ) ;
return target_return_address [ 0 ] ;
}
static noinline unsigned long kprobe_stacktrace_target ( void )
{
if ( ! target_return_address [ 1 ] )
target_return_address [ 1 ] = ( unsigned long ) __builtin_return_address ( 0 ) ;
if ( internal_target )
internal_target ( ) ;
return target_return_address [ 1 ] ;
}
static noinline unsigned long kprobe_stacktrace_driver ( void )
{
if ( stacktrace_target )
stacktrace_target ( ) ;
/* This is for preventing inlining the function */
return ( unsigned long ) __builtin_return_address ( 0 ) ;
}
2009-01-07 01:41:48 +03:00
static int kp_pre_handler2 ( struct kprobe * p , struct pt_regs * regs )
{
preh_val = ( rand1 / div_factor ) + 1 ;
return 0 ;
}
static void kp_post_handler2 ( struct kprobe * p , struct pt_regs * regs ,
unsigned long flags )
{
2021-10-21 03:54:24 +03:00
KUNIT_EXPECT_EQ ( current_test , preh_val , ( rand1 / div_factor ) + 1 ) ;
2009-01-07 01:41:48 +03:00
posth_val = preh_val + div_factor ;
}
static struct kprobe kp2 = {
. symbol_name = " kprobe_target2 " ,
. pre_handler = kp_pre_handler2 ,
. post_handler = kp_post_handler2
} ;
2021-10-21 03:54:24 +03:00
static void test_kprobes ( struct kunit * test )
2009-01-07 01:41:48 +03:00
{
struct kprobe * kps [ 2 ] = { & kp , & kp2 } ;
2021-10-21 03:54:24 +03:00
current_test = test ;
2010-10-14 07:10:24 +04:00
/* addr and flags should be cleard for reusing kprobe. */
kp . addr = NULL ;
kp . flags = 0 ;
2009-01-07 01:41:48 +03:00
2021-10-21 03:54:24 +03:00
KUNIT_EXPECT_EQ ( test , 0 , register_kprobes ( kps , 2 ) ) ;
2009-01-07 01:41:48 +03:00
preh_val = 0 ;
posth_val = 0 ;
2021-10-21 03:54:24 +03:00
target ( rand1 ) ;
2009-01-07 01:41:48 +03:00
2021-10-21 03:54:24 +03:00
KUNIT_EXPECT_NE ( test , 0 , preh_val ) ;
KUNIT_EXPECT_NE ( test , 0 , posth_val ) ;
2009-01-07 01:41:48 +03:00
preh_val = 0 ;
posth_val = 0 ;
2021-10-21 03:54:24 +03:00
target2 ( rand1 ) ;
2009-01-07 01:41:48 +03:00
2021-10-21 03:54:24 +03:00
KUNIT_EXPECT_NE ( test , 0 , preh_val ) ;
KUNIT_EXPECT_NE ( test , 0 , posth_val ) ;
2009-01-07 01:41:48 +03:00
unregister_kprobes ( kps , 2 ) ;
}
2023-02-21 02:52:42 +03:00
static struct kprobe kp_missed = {
. symbol_name = " kprobe_recursed_target " ,
. pre_handler = kp_pre_handler ,
. post_handler = kp_post_handler ,
} ;
static void test_kprobe_missed ( struct kunit * test )
{
current_test = test ;
preh_val = 0 ;
posth_val = 0 ;
KUNIT_EXPECT_EQ ( test , 0 , register_kprobe ( & kp_missed ) ) ;
recursed_target ( rand1 ) ;
KUNIT_EXPECT_EQ ( test , 2 , kp_missed . nmissed ) ;
KUNIT_EXPECT_NE ( test , 0 , preh_val ) ;
KUNIT_EXPECT_NE ( test , 0 , posth_val ) ;
unregister_kprobe ( & kp_missed ) ;
}
2008-01-30 15:32:53 +03:00
# ifdef CONFIG_KRETPROBES
static u32 krph_val ;
2008-02-06 12:38:22 +03:00
static int entry_handler ( struct kretprobe_instance * ri , struct pt_regs * regs )
{
2021-10-21 03:54:24 +03:00
KUNIT_EXPECT_FALSE ( current_test , preemptible ( ) ) ;
2008-02-06 12:38:22 +03:00
krph_val = ( rand1 / div_factor ) ;
return 0 ;
}
2008-01-30 15:32:53 +03:00
static int return_handler ( struct kretprobe_instance * ri , struct pt_regs * regs )
{
unsigned long ret = regs_return_value ( regs ) ;
2021-10-21 03:54:24 +03:00
KUNIT_EXPECT_FALSE ( current_test , preemptible ( ) ) ;
KUNIT_EXPECT_EQ ( current_test , ret , rand1 / div_factor ) ;
KUNIT_EXPECT_NE ( current_test , krph_val , 0 ) ;
2008-02-06 12:38:22 +03:00
krph_val = rand1 ;
2008-01-30 15:32:53 +03:00
return 0 ;
}
static struct kretprobe rp = {
. handler = return_handler ,
2008-02-06 12:38:22 +03:00
. entry_handler = entry_handler ,
2008-01-30 15:32:53 +03:00
. kp . symbol_name = " kprobe_target "
} ;
2021-10-21 03:54:24 +03:00
static void test_kretprobe ( struct kunit * test )
2008-01-30 15:32:53 +03:00
{
2021-10-21 03:54:24 +03:00
current_test = test ;
KUNIT_EXPECT_EQ ( test , 0 , register_kretprobe ( & rp ) ) ;
target ( rand1 ) ;
2008-01-30 15:32:53 +03:00
unregister_kretprobe ( & rp ) ;
2021-10-21 03:54:24 +03:00
KUNIT_EXPECT_EQ ( test , krph_val , rand1 ) ;
2008-01-30 15:32:53 +03:00
}
2009-01-07 01:41:48 +03:00
static int return_handler2 ( struct kretprobe_instance * ri , struct pt_regs * regs )
{
unsigned long ret = regs_return_value ( regs ) ;
2021-10-21 03:54:24 +03:00
KUNIT_EXPECT_EQ ( current_test , ret , ( rand1 / div_factor ) + 1 ) ;
KUNIT_EXPECT_NE ( current_test , krph_val , 0 ) ;
2009-01-07 01:41:48 +03:00
krph_val = rand1 ;
return 0 ;
}
static struct kretprobe rp2 = {
. handler = return_handler2 ,
. entry_handler = entry_handler ,
. kp . symbol_name = " kprobe_target2 "
} ;
2021-10-21 03:54:24 +03:00
static void test_kretprobes ( struct kunit * test )
2009-01-07 01:41:48 +03:00
{
struct kretprobe * rps [ 2 ] = { & rp , & rp2 } ;
2021-10-21 03:54:24 +03:00
current_test = test ;
2010-10-14 07:10:24 +04:00
/* addr and flags should be cleard for reusing kprobe. */
rp . kp . addr = NULL ;
rp . kp . flags = 0 ;
2021-10-21 03:54:24 +03:00
KUNIT_EXPECT_EQ ( test , 0 , register_kretprobes ( rps , 2 ) ) ;
2009-01-07 01:41:48 +03:00
krph_val = 0 ;
2021-10-21 03:54:24 +03:00
target ( rand1 ) ;
KUNIT_EXPECT_EQ ( test , krph_val , rand1 ) ;
2009-01-07 01:41:48 +03:00
krph_val = 0 ;
2021-10-21 03:54:24 +03:00
target2 ( rand1 ) ;
KUNIT_EXPECT_EQ ( test , krph_val , rand1 ) ;
2009-01-07 01:41:48 +03:00
unregister_kretprobes ( rps , 2 ) ;
}
2021-10-25 14:41:52 +03:00
# ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
# define STACK_BUF_SIZE 16
static unsigned long stack_buf [ STACK_BUF_SIZE ] ;
static int stacktrace_return_handler ( struct kretprobe_instance * ri , struct pt_regs * regs )
{
unsigned long retval = regs_return_value ( regs ) ;
int i , ret ;
KUNIT_EXPECT_FALSE ( current_test , preemptible ( ) ) ;
KUNIT_EXPECT_EQ ( current_test , retval , target_return_address [ 1 ] ) ;
/*
* Test stacktrace inside the kretprobe handler , this will involves
* kretprobe trampoline , but must include correct return address
* of the target function .
*/
ret = stack_trace_save ( stack_buf , STACK_BUF_SIZE , 0 ) ;
KUNIT_EXPECT_NE ( current_test , ret , 0 ) ;
for ( i = 0 ; i < ret ; i + + ) {
if ( stack_buf [ i ] = = target_return_address [ 1 ] )
break ;
}
KUNIT_EXPECT_NE ( current_test , i , ret ) ;
# if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
/*
* Test stacktrace from pt_regs at the return address . Thus the stack
* trace must start from the target return address .
*/
ret = stack_trace_save_regs ( regs , stack_buf , STACK_BUF_SIZE , 0 ) ;
KUNIT_EXPECT_NE ( current_test , ret , 0 ) ;
KUNIT_EXPECT_EQ ( current_test , stack_buf [ 0 ] , target_return_address [ 1 ] ) ;
# endif
return 0 ;
}
static struct kretprobe rp3 = {
. handler = stacktrace_return_handler ,
. kp . symbol_name = " kprobe_stacktrace_target "
} ;
static void test_stacktrace_on_kretprobe ( struct kunit * test )
{
unsigned long myretaddr = ( unsigned long ) __builtin_return_address ( 0 ) ;
current_test = test ;
rp3 . kp . addr = NULL ;
rp3 . kp . flags = 0 ;
/*
* Run the stacktrace_driver ( ) to record correct return address in
* stacktrace_target ( ) and ensure stacktrace_driver ( ) call is not
* inlined by checking the return address of stacktrace_driver ( )
* and the return address of this function is different .
*/
KUNIT_ASSERT_NE ( test , myretaddr , stacktrace_driver ( ) ) ;
KUNIT_ASSERT_EQ ( test , 0 , register_kretprobe ( & rp3 ) ) ;
KUNIT_ASSERT_NE ( test , myretaddr , stacktrace_driver ( ) ) ;
unregister_kretprobe ( & rp3 ) ;
}
static int stacktrace_internal_return_handler ( struct kretprobe_instance * ri , struct pt_regs * regs )
{
unsigned long retval = regs_return_value ( regs ) ;
int i , ret ;
KUNIT_EXPECT_FALSE ( current_test , preemptible ( ) ) ;
KUNIT_EXPECT_EQ ( current_test , retval , target_return_address [ 0 ] ) ;
/*
* Test stacktrace inside the kretprobe handler for nested case .
* The unwinder will find the kretprobe_trampoline address on the
* return address , and kretprobe must solve that .
*/
ret = stack_trace_save ( stack_buf , STACK_BUF_SIZE , 0 ) ;
KUNIT_EXPECT_NE ( current_test , ret , 0 ) ;
for ( i = 0 ; i < ret - 1 ; i + + ) {
if ( stack_buf [ i ] = = target_return_address [ 0 ] ) {
KUNIT_EXPECT_EQ ( current_test , stack_buf [ i + 1 ] , target_return_address [ 1 ] ) ;
break ;
}
}
KUNIT_EXPECT_NE ( current_test , i , ret ) ;
# if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
/* Ditto for the regs version. */
ret = stack_trace_save_regs ( regs , stack_buf , STACK_BUF_SIZE , 0 ) ;
KUNIT_EXPECT_NE ( current_test , ret , 0 ) ;
KUNIT_EXPECT_EQ ( current_test , stack_buf [ 0 ] , target_return_address [ 0 ] ) ;
KUNIT_EXPECT_EQ ( current_test , stack_buf [ 1 ] , target_return_address [ 1 ] ) ;
# endif
return 0 ;
}
static struct kretprobe rp4 = {
. handler = stacktrace_internal_return_handler ,
. kp . symbol_name = " kprobe_stacktrace_internal_target "
} ;
static void test_stacktrace_on_nested_kretprobe ( struct kunit * test )
{
unsigned long myretaddr = ( unsigned long ) __builtin_return_address ( 0 ) ;
struct kretprobe * rps [ 2 ] = { & rp3 , & rp4 } ;
current_test = test ;
rp3 . kp . addr = NULL ;
rp3 . kp . flags = 0 ;
//KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
KUNIT_ASSERT_EQ ( test , 0 , register_kretprobes ( rps , 2 ) ) ;
KUNIT_ASSERT_NE ( test , myretaddr , stacktrace_driver ( ) ) ;
unregister_kretprobes ( rps , 2 ) ;
}
# endif /* CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE */
2008-01-30 15:32:53 +03:00
# endif /* CONFIG_KRETPROBES */
2021-10-21 03:54:24 +03:00
static int kprobes_test_init ( struct kunit * test )
2008-01-30 15:32:53 +03:00
{
2009-01-07 01:41:47 +03:00
target = kprobe_target ;
2009-01-07 01:41:48 +03:00
target2 = kprobe_target2 ;
2023-02-21 02:52:42 +03:00
recursed_target = kprobe_recursed_target ;
2021-10-25 14:41:52 +03:00
stacktrace_target = kprobe_stacktrace_target ;
internal_target = kprobe_stacktrace_internal_target ;
stacktrace_driver = kprobe_stacktrace_driver ;
2022-10-10 05:44:02 +03:00
rand1 = get_random_u32_above ( div_factor ) ;
2021-10-21 03:54:24 +03:00
return 0 ;
}
2008-01-30 15:32:53 +03:00
2021-10-21 03:54:24 +03:00
static struct kunit_case kprobes_testcases [ ] = {
KUNIT_CASE ( test_kprobe ) ,
KUNIT_CASE ( test_kprobes ) ,
2023-02-21 02:52:42 +03:00
KUNIT_CASE ( test_kprobe_missed ) ,
2008-01-30 15:32:53 +03:00
# ifdef CONFIG_KRETPROBES
2021-10-21 03:54:24 +03:00
KUNIT_CASE ( test_kretprobe ) ,
KUNIT_CASE ( test_kretprobes ) ,
2021-10-25 14:41:52 +03:00
# ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
KUNIT_CASE ( test_stacktrace_on_kretprobe ) ,
KUNIT_CASE ( test_stacktrace_on_nested_kretprobe ) ,
# endif
2021-10-21 03:54:24 +03:00
# endif
{ }
} ;
2008-01-30 15:32:53 +03:00
2021-10-21 03:54:24 +03:00
static struct kunit_suite kprobes_test_suite = {
. name = " kprobes_test " ,
. init = kprobes_test_init ,
. test_cases = kprobes_testcases ,
} ;
2008-01-30 15:32:53 +03:00
2021-10-21 03:54:24 +03:00
kunit_test_suites ( & kprobes_test_suite ) ;
MODULE_LICENSE ( " GPL " ) ;