riscv: Fix text patching when IPI are used
For now, we use stop_machine() to patch the text and when we use IPIs for remote icache flushes (which is emitted in patch_text_nosync()), the system hangs. So instead, make sure every CPU executes the stop_machine() patching function and emit a local icache flush there. Co-developed-by: Björn Töpel <bjorn@rivosinc.com> Signed-off-by: Björn Töpel <bjorn@rivosinc.com> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Andrea Parri <parri.andrea@gmail.com> Link: https://lore.kernel.org/r/20240229121056.203419-3-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
parent
29cee75fb6
commit
c97bf62996
@ -6,6 +6,7 @@
|
||||
#ifndef _ASM_RISCV_PATCH_H
|
||||
#define _ASM_RISCV_PATCH_H
|
||||
|
||||
int patch_insn_write(void *addr, const void *insn, size_t len);
|
||||
int patch_text_nosync(void *addr, const void *insns, size_t len);
|
||||
int patch_text_set_nosync(void *addr, u8 c, size_t len);
|
||||
int patch_text(void *addr, u32 *insns, int ninsns);
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/patch.h>
|
||||
|
||||
@ -75,8 +76,7 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
|
||||
make_call_t0(hook_pos, target, call);
|
||||
|
||||
/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
|
||||
if (patch_text_nosync
|
||||
((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
|
||||
if (patch_insn_write((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
|
||||
return -EPERM;
|
||||
|
||||
return 0;
|
||||
@ -88,7 +88,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
|
||||
make_call_t0(rec->ip, addr, call);
|
||||
|
||||
if (patch_text_nosync((void *)rec->ip, call, MCOUNT_INSN_SIZE))
|
||||
if (patch_insn_write((void *)rec->ip, call, MCOUNT_INSN_SIZE))
|
||||
return -EPERM;
|
||||
|
||||
return 0;
|
||||
@ -99,7 +99,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
{
|
||||
unsigned int nops[2] = {NOP4, NOP4};
|
||||
|
||||
if (patch_text_nosync((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
|
||||
if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
|
||||
return -EPERM;
|
||||
|
||||
return 0;
|
||||
@ -134,6 +134,42 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct ftrace_modify_param {
|
||||
int command;
|
||||
atomic_t cpu_count;
|
||||
};
|
||||
|
||||
static int __ftrace_modify_code(void *data)
|
||||
{
|
||||
struct ftrace_modify_param *param = data;
|
||||
|
||||
if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) {
|
||||
ftrace_modify_all_code(param->command);
|
||||
/*
|
||||
* Make sure the patching store is effective *before* we
|
||||
* increment the counter which releases all waiting CPUs
|
||||
* by using the release variant of atomic increment. The
|
||||
* release pairs with the call to local_flush_icache_all()
|
||||
* on the waiting CPU.
|
||||
*/
|
||||
atomic_inc_return_release(¶m->cpu_count);
|
||||
} else {
|
||||
while (atomic_read(¶m->cpu_count) <= num_online_cpus())
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
local_flush_icache_all();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void arch_ftrace_update_code(int command)
|
||||
{
|
||||
struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
|
||||
|
||||
stop_machine(__ftrace_modify_code, ¶m, cpu_online_mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
|
@ -188,7 +188,7 @@ int patch_text_set_nosync(void *addr, u8 c, size_t len)
|
||||
}
|
||||
NOKPROBE_SYMBOL(patch_text_set_nosync);
|
||||
|
||||
static int patch_insn_write(void *addr, const void *insn, size_t len)
|
||||
int patch_insn_write(void *addr, const void *insn, size_t len)
|
||||
{
|
||||
size_t patched = 0;
|
||||
size_t size;
|
||||
@ -232,15 +232,23 @@ static int patch_text_cb(void *data)
|
||||
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
|
||||
for (i = 0; ret == 0 && i < patch->ninsns; i++) {
|
||||
len = GET_INSN_LENGTH(patch->insns[i]);
|
||||
ret = patch_text_nosync(patch->addr + i * len,
|
||||
&patch->insns[i], len);
|
||||
ret = patch_insn_write(patch->addr + i * len, &patch->insns[i], len);
|
||||
}
|
||||
atomic_inc(&patch->cpu_count);
|
||||
/*
|
||||
* Make sure the patching store is effective *before* we
|
||||
* increment the counter which releases all waiting CPUs
|
||||
* by using the release variant of atomic increment. The
|
||||
* release pairs with the call to local_flush_icache_all()
|
||||
* on the waiting CPU.
|
||||
*/
|
||||
atomic_inc_return_release(&patch->cpu_count);
|
||||
} else {
|
||||
while (atomic_read(&patch->cpu_count) <= num_online_cpus())
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
local_flush_icache_all();
|
||||
|
||||
return ret;
|
||||
}
|
||||
NOKPROBE_SYMBOL(patch_text_cb);
|
||||
|
Loading…
x
Reference in New Issue
Block a user