2019-06-04 10:11:33 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2014-04-30 18:54:33 +09:00
/*
* arch / arm64 / kernel / ftrace . c
*
* Copyright ( C ) 2013 Linaro Limited
* Author : AKASHI Takahiro < takahiro . akashi @ linaro . org >
*/
# include <linux/ftrace.h>
2017-06-06 17:00:22 +00:00
# include <linux/module.h>
2014-04-30 18:54:33 +09:00
# include <linux/swab.h>
# include <linux/uaccess.h>
# include <asm/cacheflush.h>
2017-06-06 17:00:22 +00:00
# include <asm/debug-monitors.h>
2014-04-30 18:54:33 +09:00
# include <asm/ftrace.h>
# include <asm/insn.h>
2014-04-30 10:54:34 +01:00
# ifdef CONFIG_DYNAMIC_FTRACE
/*
* Replace a single instruction , which may be a branch or NOP .
* If @ validate = = true , a replaced instruction is checked against ' old ' .
*/
static int ftrace_modify_code ( unsigned long pc , u32 old , u32 new ,
bool validate )
{
u32 replaced ;
/*
* Note :
2015-12-04 11:38:40 +08:00
* We are paranoid about modifying text , as if a bug were to happen , it
* could cause us to read or write to someplace that could cause harm .
* Carefully read and modify the code with aarch64_insn_ * ( ) which uses
* probe_kernel_ * ( ) , and make sure what we read is what we expected it
* to be before modifying it .
2014-04-30 10:54:34 +01:00
*/
if ( validate ) {
if ( aarch64_insn_read ( ( void * ) pc , & replaced ) )
return - EFAULT ;
if ( replaced ! = old )
return - EINVAL ;
}
if ( aarch64_insn_patch_text_nosync ( ( void * ) pc , new ) )
return - EPERM ;
return 0 ;
}
/*
* Replace tracer function in ftrace_caller ( )
*/
int ftrace_update_ftrace_func ( ftrace_func_t func )
{
unsigned long pc ;
u32 new ;
pc = ( unsigned long ) & ftrace_call ;
2014-09-19 12:05:45 +01:00
new = aarch64_insn_gen_branch_imm ( pc , ( unsigned long ) func ,
AARCH64_INSN_BRANCH_LINK ) ;
2014-04-30 10:54:34 +01:00
return ftrace_modify_code ( pc , 0 , new , false ) ;
}
arm64: implement ftrace with regs
This patch implements FTRACE_WITH_REGS for arm64, which allows a traced
function's arguments (and some other registers) to be captured into a
struct pt_regs, allowing these to be inspected and/or modified. This is
a building block for live-patching, where a function's arguments may be
forwarded to another function. This is also necessary to enable ftrace
and in-kernel pointer authentication at the same time, as it allows the
LR value to be captured and adjusted prior to signing.
Using GCC's -fpatchable-function-entry=N option, we can have the
compiler insert a configurable number of NOPs between the function entry
point and the usual prologue. This also ensures functions are AAPCS
compliant (e.g. disabling inter-procedural register allocation).
For example, with -fpatchable-function-entry=2, GCC 8.1.0 compiles the
following:
| unsigned long bar(void);
|
| unsigned long foo(void)
| {
| return bar() + 1;
| }
... to:
| <foo>:
| nop
| nop
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl 0 <bar>
| add x0, x0, #0x1
| ldp x29, x30, [sp], #16
| ret
This patch builds the kernel with -fpatchable-function-entry=2,
prefixing each function with two NOPs. To trace a function, we replace
these NOPs with a sequence that saves the LR into a GPR, then calls an
ftrace entry assembly function which saves this and other relevant
registers:
| mov x9, x30
| bl <ftrace-entry>
Since patchable functions are AAPCS compliant (and the kernel does not
use x18 as a platform register), x9-x18 can be safely clobbered in the
patched sequence and the ftrace entry code.
There are now two ftrace entry functions, ftrace_regs_entry (which saves
all GPRs), and ftrace_entry (which saves the bare minimum). A PLT is
allocated for each within modules.
Signed-off-by: Torsten Duwe <duwe@suse.de>
[Mark: rework asm, comments, PLTs, initialization, commit message]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Torsten Duwe <duwe@suse.de>
Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Tested-by: Torsten Duwe <duwe@suse.de>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Julien Thierry <jthierry@redhat.com>
Cc: Will Deacon <will@kernel.org>
2019-02-08 16:10:19 +01:00
static struct plt_entry * get_ftrace_plt ( struct module * mod , unsigned long addr )
{
2019-10-21 15:05:52 +01:00
# ifdef CONFIG_ARM64_MODULE_PLTS
arm64: implement ftrace with regs
This patch implements FTRACE_WITH_REGS for arm64, which allows a traced
function's arguments (and some other registers) to be captured into a
struct pt_regs, allowing these to be inspected and/or modified. This is
a building block for live-patching, where a function's arguments may be
forwarded to another function. This is also necessary to enable ftrace
and in-kernel pointer authentication at the same time, as it allows the
LR value to be captured and adjusted prior to signing.
Using GCC's -fpatchable-function-entry=N option, we can have the
compiler insert a configurable number of NOPs between the function entry
point and the usual prologue. This also ensures functions are AAPCS
compliant (e.g. disabling inter-procedural register allocation).
For example, with -fpatchable-function-entry=2, GCC 8.1.0 compiles the
following:
| unsigned long bar(void);
|
| unsigned long foo(void)
| {
| return bar() + 1;
| }
... to:
| <foo>:
| nop
| nop
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl 0 <bar>
| add x0, x0, #0x1
| ldp x29, x30, [sp], #16
| ret
This patch builds the kernel with -fpatchable-function-entry=2,
prefixing each function with two NOPs. To trace a function, we replace
these NOPs with a sequence that saves the LR into a GPR, then calls an
ftrace entry assembly function which saves this and other relevant
registers:
| mov x9, x30
| bl <ftrace-entry>
Since patchable functions are AAPCS compliant (and the kernel does not
use x18 as a platform register), x9-x18 can be safely clobbered in the
patched sequence and the ftrace entry code.
There are now two ftrace entry functions, ftrace_regs_entry (which saves
all GPRs), and ftrace_entry (which saves the bare minimum). A PLT is
allocated for each within modules.
Signed-off-by: Torsten Duwe <duwe@suse.de>
[Mark: rework asm, comments, PLTs, initialization, commit message]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Torsten Duwe <duwe@suse.de>
Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Tested-by: Torsten Duwe <duwe@suse.de>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Julien Thierry <jthierry@redhat.com>
Cc: Will Deacon <will@kernel.org>
2019-02-08 16:10:19 +01:00
struct plt_entry * plt = mod - > arch . ftrace_trampolines ;
if ( addr = = FTRACE_ADDR )
return & plt [ FTRACE_PLT_IDX ] ;
if ( addr = = FTRACE_REGS_ADDR & & IS_ENABLED ( CONFIG_FTRACE_WITH_REGS ) )
return & plt [ FTRACE_REGS_PLT_IDX ] ;
2019-10-21 15:05:52 +01:00
# endif
arm64: implement ftrace with regs
This patch implements FTRACE_WITH_REGS for arm64, which allows a traced
function's arguments (and some other registers) to be captured into a
struct pt_regs, allowing these to be inspected and/or modified. This is
a building block for live-patching, where a function's arguments may be
forwarded to another function. This is also necessary to enable ftrace
and in-kernel pointer authentication at the same time, as it allows the
LR value to be captured and adjusted prior to signing.
Using GCC's -fpatchable-function-entry=N option, we can have the
compiler insert a configurable number of NOPs between the function entry
point and the usual prologue. This also ensures functions are AAPCS
compliant (e.g. disabling inter-procedural register allocation).
For example, with -fpatchable-function-entry=2, GCC 8.1.0 compiles the
following:
| unsigned long bar(void);
|
| unsigned long foo(void)
| {
| return bar() + 1;
| }
... to:
| <foo>:
| nop
| nop
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl 0 <bar>
| add x0, x0, #0x1
| ldp x29, x30, [sp], #16
| ret
This patch builds the kernel with -fpatchable-function-entry=2,
prefixing each function with two NOPs. To trace a function, we replace
these NOPs with a sequence that saves the LR into a GPR, then calls an
ftrace entry assembly function which saves this and other relevant
registers:
| mov x9, x30
| bl <ftrace-entry>
Since patchable functions are AAPCS compliant (and the kernel does not
use x18 as a platform register), x9-x18 can be safely clobbered in the
patched sequence and the ftrace entry code.
There are now two ftrace entry functions, ftrace_regs_entry (which saves
all GPRs), and ftrace_entry (which saves the bare minimum). A PLT is
allocated for each within modules.
Signed-off-by: Torsten Duwe <duwe@suse.de>
[Mark: rework asm, comments, PLTs, initialization, commit message]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Torsten Duwe <duwe@suse.de>
Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Tested-by: Torsten Duwe <duwe@suse.de>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Julien Thierry <jthierry@redhat.com>
Cc: Will Deacon <will@kernel.org>
2019-02-08 16:10:19 +01:00
return NULL ;
}
2014-04-30 10:54:34 +01:00
/*
* Turn on the call to ftrace_caller ( ) in instrumented function
*/
int ftrace_make_call ( struct dyn_ftrace * rec , unsigned long addr )
{
unsigned long pc = rec - > ip ;
u32 old , new ;
2017-06-12 14:43:25 +01:00
long offset = ( long ) pc - ( long ) addr ;
if ( offset < - SZ_128M | | offset > = SZ_128M ) {
2017-06-06 17:00:22 +00:00
struct module * mod ;
arm64: implement ftrace with regs
This patch implements FTRACE_WITH_REGS for arm64, which allows a traced
function's arguments (and some other registers) to be captured into a
struct pt_regs, allowing these to be inspected and/or modified. This is
a building block for live-patching, where a function's arguments may be
forwarded to another function. This is also necessary to enable ftrace
and in-kernel pointer authentication at the same time, as it allows the
LR value to be captured and adjusted prior to signing.
Using GCC's -fpatchable-function-entry=N option, we can have the
compiler insert a configurable number of NOPs between the function entry
point and the usual prologue. This also ensures functions are AAPCS
compliant (e.g. disabling inter-procedural register allocation).
For example, with -fpatchable-function-entry=2, GCC 8.1.0 compiles the
following:
| unsigned long bar(void);
|
| unsigned long foo(void)
| {
| return bar() + 1;
| }
... to:
| <foo>:
| nop
| nop
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl 0 <bar>
| add x0, x0, #0x1
| ldp x29, x30, [sp], #16
| ret
This patch builds the kernel with -fpatchable-function-entry=2,
prefixing each function with two NOPs. To trace a function, we replace
these NOPs with a sequence that saves the LR into a GPR, then calls an
ftrace entry assembly function which saves this and other relevant
registers:
| mov x9, x30
| bl <ftrace-entry>
Since patchable functions are AAPCS compliant (and the kernel does not
use x18 as a platform register), x9-x18 can be safely clobbered in the
patched sequence and the ftrace entry code.
There are now two ftrace entry functions, ftrace_regs_entry (which saves
all GPRs), and ftrace_entry (which saves the bare minimum). A PLT is
allocated for each within modules.
Signed-off-by: Torsten Duwe <duwe@suse.de>
[Mark: rework asm, comments, PLTs, initialization, commit message]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Torsten Duwe <duwe@suse.de>
Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Tested-by: Torsten Duwe <duwe@suse.de>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Julien Thierry <jthierry@redhat.com>
Cc: Will Deacon <will@kernel.org>
2019-02-08 16:10:19 +01:00
struct plt_entry * plt ;
2019-10-17 15:26:38 +01:00
2019-10-21 15:05:52 +01:00
if ( ! IS_ENABLED ( CONFIG_ARM64_MODULE_PLTS ) )
return - EINVAL ;
2017-06-06 17:00:22 +00:00
/*
* On kernels that support module PLTs , the offset between the
* branch instruction and its target may legally exceed the
* range of an ordinary relative ' bl ' opcode . In this case , we
* need to branch via a trampoline in the module .
*
* NOTE : __module_text_address ( ) must be called with preemption
* disabled , but we can rely on ftrace_lock to ensure that ' mod '
* retains its validity throughout the remainder of this code .
*/
preempt_disable ( ) ;
mod = __module_text_address ( pc ) ;
preempt_enable ( ) ;
if ( WARN_ON ( ! mod ) )
return - EINVAL ;
arm64: implement ftrace with regs
This patch implements FTRACE_WITH_REGS for arm64, which allows a traced
function's arguments (and some other registers) to be captured into a
struct pt_regs, allowing these to be inspected and/or modified. This is
a building block for live-patching, where a function's arguments may be
forwarded to another function. This is also necessary to enable ftrace
and in-kernel pointer authentication at the same time, as it allows the
LR value to be captured and adjusted prior to signing.
Using GCC's -fpatchable-function-entry=N option, we can have the
compiler insert a configurable number of NOPs between the function entry
point and the usual prologue. This also ensures functions are AAPCS
compliant (e.g. disabling inter-procedural register allocation).
For example, with -fpatchable-function-entry=2, GCC 8.1.0 compiles the
following:
| unsigned long bar(void);
|
| unsigned long foo(void)
| {
| return bar() + 1;
| }
... to:
| <foo>:
| nop
| nop
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl 0 <bar>
| add x0, x0, #0x1
| ldp x29, x30, [sp], #16
| ret
This patch builds the kernel with -fpatchable-function-entry=2,
prefixing each function with two NOPs. To trace a function, we replace
these NOPs with a sequence that saves the LR into a GPR, then calls an
ftrace entry assembly function which saves this and other relevant
registers:
| mov x9, x30
| bl <ftrace-entry>
Since patchable functions are AAPCS compliant (and the kernel does not
use x18 as a platform register), x9-x18 can be safely clobbered in the
patched sequence and the ftrace entry code.
There are now two ftrace entry functions, ftrace_regs_entry (which saves
all GPRs), and ftrace_entry (which saves the bare minimum). A PLT is
allocated for each within modules.
Signed-off-by: Torsten Duwe <duwe@suse.de>
[Mark: rework asm, comments, PLTs, initialization, commit message]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Torsten Duwe <duwe@suse.de>
Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Tested-by: Torsten Duwe <duwe@suse.de>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Julien Thierry <jthierry@redhat.com>
Cc: Will Deacon <will@kernel.org>
2019-02-08 16:10:19 +01:00
plt = get_ftrace_plt ( mod , addr ) ;
if ( ! plt ) {
pr_err ( " ftrace: no module PLT for %ps \n " , ( void * ) addr ) ;
return - EINVAL ;
}
addr = ( unsigned long ) plt ;
2017-06-23 18:02:06 +01:00
}
2017-06-06 17:00:22 +00:00
2014-04-30 10:54:34 +01:00
old = aarch64_insn_gen_nop ( ) ;
2014-09-19 12:05:45 +01:00
new = aarch64_insn_gen_branch_imm ( pc , addr , AARCH64_INSN_BRANCH_LINK ) ;
2014-04-30 10:54:34 +01:00
return ftrace_modify_code ( pc , old , new , true ) ;
}
arm64: implement ftrace with regs
This patch implements FTRACE_WITH_REGS for arm64, which allows a traced
function's arguments (and some other registers) to be captured into a
struct pt_regs, allowing these to be inspected and/or modified. This is
a building block for live-patching, where a function's arguments may be
forwarded to another function. This is also necessary to enable ftrace
and in-kernel pointer authentication at the same time, as it allows the
LR value to be captured and adjusted prior to signing.
Using GCC's -fpatchable-function-entry=N option, we can have the
compiler insert a configurable number of NOPs between the function entry
point and the usual prologue. This also ensures functions are AAPCS
compliant (e.g. disabling inter-procedural register allocation).
For example, with -fpatchable-function-entry=2, GCC 8.1.0 compiles the
following:
| unsigned long bar(void);
|
| unsigned long foo(void)
| {
| return bar() + 1;
| }
... to:
| <foo>:
| nop
| nop
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl 0 <bar>
| add x0, x0, #0x1
| ldp x29, x30, [sp], #16
| ret
This patch builds the kernel with -fpatchable-function-entry=2,
prefixing each function with two NOPs. To trace a function, we replace
these NOPs with a sequence that saves the LR into a GPR, then calls an
ftrace entry assembly function which saves this and other relevant
registers:
| mov x9, x30
| bl <ftrace-entry>
Since patchable functions are AAPCS compliant (and the kernel does not
use x18 as a platform register), x9-x18 can be safely clobbered in the
patched sequence and the ftrace entry code.
There are now two ftrace entry functions, ftrace_regs_entry (which saves
all GPRs), and ftrace_entry (which saves the bare minimum). A PLT is
allocated for each within modules.
Signed-off-by: Torsten Duwe <duwe@suse.de>
[Mark: rework asm, comments, PLTs, initialization, commit message]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Torsten Duwe <duwe@suse.de>
Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Tested-by: Torsten Duwe <duwe@suse.de>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Julien Thierry <jthierry@redhat.com>
Cc: Will Deacon <will@kernel.org>
2019-02-08 16:10:19 +01:00
# ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
int ftrace_modify_call ( struct dyn_ftrace * rec , unsigned long old_addr ,
unsigned long addr )
{
unsigned long pc = rec - > ip ;
u32 old , new ;
old = aarch64_insn_gen_branch_imm ( pc , old_addr ,
AARCH64_INSN_BRANCH_LINK ) ;
new = aarch64_insn_gen_branch_imm ( pc , addr , AARCH64_INSN_BRANCH_LINK ) ;
return ftrace_modify_code ( pc , old , new , true ) ;
}
/*
* The compiler has inserted two NOPs before the regular function prologue .
* All instrumented functions follow the AAPCS , so x0 - x8 and x19 - x30 are live ,
* and x9 - x18 are free for our use .
*
* At runtime we want to be able to swing a single NOP < - > BL to enable or
* disable the ftrace call . The BL requires us to save the original LR value ,
* so here we insert a < MOV X9 , LR > over the first NOP so the instructions
* before the regular prologue are :
*
* | Compiled | Disabled | Enabled |
* + - - - - - - - - - - + - - - - - - - - - - - - + - - - - - - - - - - - - +
* | NOP | MOV X9 , LR | MOV X9 , LR |
* | NOP | NOP | BL < entry > |
*
* The LR value will be recovered by ftrace_regs_entry , and restored into LR
* before returning to the regular function prologue . When a function is not
* being traced , the MOV is not harmful given x9 is not live per the AAPCS .
*
* Note : ftrace_process_locs ( ) has pre - adjusted rec - > ip to be the address of
* the BL .
*/
int ftrace_init_nop ( struct module * mod , struct dyn_ftrace * rec )
{
unsigned long pc = rec - > ip - AARCH64_INSN_SIZE ;
u32 old , new ;
old = aarch64_insn_gen_nop ( ) ;
new = aarch64_insn_gen_move_reg ( AARCH64_INSN_REG_9 ,
AARCH64_INSN_REG_LR ,
AARCH64_INSN_VARIANT_64BIT ) ;
return ftrace_modify_code ( pc , old , new , true ) ;
}
# endif
2014-04-30 10:54:34 +01:00
/*
* Turn off the call to ftrace_caller ( ) in instrumented function
*/
int ftrace_make_nop ( struct module * mod , struct dyn_ftrace * rec ,
unsigned long addr )
{
unsigned long pc = rec - > ip ;
2017-06-06 17:00:21 +00:00
bool validate = true ;
u32 old = 0 , new ;
2017-06-12 14:43:25 +01:00
long offset = ( long ) pc - ( long ) addr ;
if ( offset < - SZ_128M | | offset > = SZ_128M ) {
2017-06-06 17:00:21 +00:00
u32 replaced ;
2019-10-21 15:05:52 +01:00
if ( ! IS_ENABLED ( CONFIG_ARM64_MODULE_PLTS ) )
return - EINVAL ;
2017-06-06 17:00:21 +00:00
/*
* ' mod ' is only set at module load time , but if we end up
* dealing with an out - of - range condition , we can assume it
* is due to a module being loaded far away from the kernel .
*/
if ( ! mod ) {
preempt_disable ( ) ;
mod = __module_text_address ( pc ) ;
preempt_enable ( ) ;
if ( WARN_ON ( ! mod ) )
return - EINVAL ;
}
/*
* The instruction we are about to patch may be a branch and
* link instruction that was redirected via a PLT entry . In
* this case , the normal validation will fail , but we can at
* least check that we are dealing with a branch and link
* instruction that points into the right module .
*/
if ( aarch64_insn_read ( ( void * ) pc , & replaced ) )
return - EFAULT ;
if ( ! aarch64_insn_is_bl ( replaced ) | |
! within_module ( pc + aarch64_get_branch_offset ( replaced ) ,
mod ) )
return - EINVAL ;
validate = false ;
} else {
old = aarch64_insn_gen_branch_imm ( pc , addr ,
AARCH64_INSN_BRANCH_LINK ) ;
}
2014-04-30 10:54:34 +01:00
new = aarch64_insn_gen_nop ( ) ;
2017-06-06 17:00:21 +00:00
return ftrace_modify_code ( pc , old , new , validate ) ;
2014-04-30 10:54:34 +01:00
}
2015-12-04 11:38:39 +08:00
void arch_ftrace_update_code ( int command )
{
2018-12-05 12:48:54 -05:00
command | = FTRACE_MAY_SLEEP ;
2015-12-04 11:38:39 +08:00
ftrace_modify_all_code ( command ) ;
}
2014-04-30 10:54:34 +01:00
int __init ftrace_dyn_arch_init ( void )
{
return 0 ;
}
# endif /* CONFIG_DYNAMIC_FTRACE */
2014-04-30 18:54:33 +09:00
# ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* function_graph tracer expects ftrace_return_to_handler ( ) to be called
* on the way back to parent . For this purpose , this function is called
* in _mcount ( ) or ftrace_caller ( ) to replace return address ( * parent ) on
* the call stack to return_to_handler .
*
* Note that @ frame_pointer is used only for sanity check later .
*/
2018-11-15 22:42:03 +00:00
void prepare_ftrace_return ( unsigned long self_addr , unsigned long * parent ,
2014-04-30 18:54:33 +09:00
unsigned long frame_pointer )
{
unsigned long return_hooker = ( unsigned long ) & return_to_handler ;
unsigned long old ;
if ( unlikely ( atomic_read ( & current - > tracing_graph_pause ) ) )
return ;
/*
* Note :
* No protection against faulting at * parent , which may be seen
* on other archs . It ' s unlikely on AArch64 .
*/
old = * parent ;
2018-11-18 17:21:51 -05:00
if ( ! function_graph_enter ( old , self_addr , frame_pointer , NULL ) )
2015-12-15 17:33:39 +09:00
* parent = return_hooker ;
2014-04-30 18:54:33 +09:00
}
2014-04-30 10:54:34 +01:00
# ifdef CONFIG_DYNAMIC_FTRACE
/*
* Turn on / off the call to ftrace_graph_caller ( ) in ftrace_caller ( )
* depending on @ enable .
*/
static int ftrace_modify_graph_caller ( bool enable )
{
unsigned long pc = ( unsigned long ) & ftrace_graph_call ;
u32 branch , nop ;
branch = aarch64_insn_gen_branch_imm ( pc ,
2014-09-19 12:05:45 +01:00
( unsigned long ) ftrace_graph_caller ,
2015-02-13 04:06:21 +00:00
AARCH64_INSN_BRANCH_NOLINK ) ;
2014-04-30 10:54:34 +01:00
nop = aarch64_insn_gen_nop ( ) ;
if ( enable )
return ftrace_modify_code ( pc , nop , branch , true ) ;
else
return ftrace_modify_code ( pc , branch , nop , true ) ;
}
int ftrace_enable_ftrace_graph_caller ( void )
{
return ftrace_modify_graph_caller ( true ) ;
}
int ftrace_disable_ftrace_graph_caller ( void )
{
return ftrace_modify_graph_caller ( false ) ;
}
# endif /* CONFIG_DYNAMIC_FTRACE */
2014-04-30 18:54:33 +09:00
# endif /* CONFIG_FUNCTION_GRAPH_TRACER */