powerpc/watchpoints: Disable preemption in thread_change_pc()
thread_change_pc() uses CPU local data, so must be protected from
swapping CPUs while it is reading the breakpoint struct.
The error is more noticeable after 1e60f3564b
("powerpc/watchpoints:
Track perf single step directly on the breakpoint"), which added an
unconditional __this_cpu_read() call in thread_change_pc(). However the
existing __this_cpu_read() that runs if a breakpoint does need to be
re-inserted has the same issue.
Signed-off-by: Benjamin Gray <bgray@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230829063457.54157-2-bgray@linux.ibm.com
This commit is contained in:
parent
4ff3ba4db5
commit
cc879ab3ce
@ -230,13 +230,15 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
|
|||||||
struct arch_hw_breakpoint *info;
|
struct arch_hw_breakpoint *info;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
|
||||||
for (i = 0; i < nr_wp_slots(); i++) {
|
for (i = 0; i < nr_wp_slots(); i++) {
|
||||||
struct perf_event *bp = __this_cpu_read(bp_per_reg[i]);
|
struct perf_event *bp = __this_cpu_read(bp_per_reg[i]);
|
||||||
|
|
||||||
if (unlikely(bp && counter_arch_bp(bp)->perf_single_step))
|
if (unlikely(bp && counter_arch_bp(bp)->perf_single_step))
|
||||||
goto reset;
|
goto reset;
|
||||||
}
|
}
|
||||||
return;
|
goto out;
|
||||||
|
|
||||||
reset:
|
reset:
|
||||||
regs_set_return_msr(regs, regs->msr & ~MSR_SE);
|
regs_set_return_msr(regs, regs->msr & ~MSR_SE);
|
||||||
@ -245,6 +247,9 @@ reset:
|
|||||||
__set_breakpoint(i, info);
|
__set_breakpoint(i, info);
|
||||||
info->perf_single_step = false;
|
info->perf_single_step = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_larx_stcx_instr(int type)
|
static bool is_larx_stcx_instr(int type)
|
||||||
|
Loading…
Reference in New Issue
Block a user