perf/x86/lbr: Remove cpuc->lbr_xsave allocation from atomic context
[ Upstream commit 488e13a489e9707a7e81e1991fdd1f20c0f04689 ] If the kernel is compiled with the CONFIG_LOCKDEP option, the conditional might_sleep_if() deep in kmem_cache_alloc() will generate the following trace, and potentially cause a deadlock when another LBR event is added: [] BUG: sleeping function called from invalid context at include/linux/sched/mm.h:196 [] Call Trace: [] kmem_cache_alloc+0x36/0x250 [] intel_pmu_lbr_add+0x152/0x170 [] x86_pmu_add+0x83/0xd0 Make it symmetric with the release_lbr_buffers() call and mirror the existing DS buffers. Fixes: c085fb8774 ("perf/x86/intel/lbr: Support XSAVES for arch LBR read") Signed-off-by: Like Xu <like.xu@linux.intel.com> [peterz: simplified] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Kan Liang <kan.liang@linux.intel.com> Link: https://lkml.kernel.org/r/20210430052247.3079672-2-like.xu@linux.intel.com Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
ca2acbd548
commit
56bc20e5fc
@ -372,10 +372,12 @@ int x86_reserve_hardware(void)
|
||||
if (!atomic_inc_not_zero(&pmc_refcount)) {
|
||||
mutex_lock(&pmc_reserve_mutex);
|
||||
if (atomic_read(&pmc_refcount) == 0) {
|
||||
if (!reserve_pmc_hardware())
|
||||
if (!reserve_pmc_hardware()) {
|
||||
err = -EBUSY;
|
||||
else
|
||||
} else {
|
||||
reserve_ds_buffers();
|
||||
reserve_lbr_buffers();
|
||||
}
|
||||
}
|
||||
if (!err)
|
||||
atomic_inc(&pmc_refcount);
|
||||
|
@ -658,7 +658,6 @@ static inline bool branch_user_callstack(unsigned br_sel)
|
||||
|
||||
void intel_pmu_lbr_add(struct perf_event *event)
|
||||
{
|
||||
struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
|
||||
if (!x86_pmu.lbr_nr)
|
||||
@ -696,11 +695,6 @@ void intel_pmu_lbr_add(struct perf_event *event)
|
||||
perf_sched_cb_inc(event->ctx->pmu);
|
||||
if (!cpuc->lbr_users++ && !event->total_time_running)
|
||||
intel_pmu_lbr_reset();
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
|
||||
kmem_cache && !cpuc->lbr_xsave &&
|
||||
(cpuc->lbr_users != cpuc->lbr_pebs_users))
|
||||
cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void release_lbr_buffers(void)
|
||||
@ -721,6 +715,26 @@ void release_lbr_buffers(void)
|
||||
}
|
||||
}
|
||||
|
||||
void reserve_lbr_buffers(void)
|
||||
{
|
||||
struct kmem_cache *kmem_cache;
|
||||
struct cpu_hw_events *cpuc;
|
||||
int cpu;
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
|
||||
return;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
|
||||
kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
|
||||
if (!kmem_cache || cpuc->lbr_xsave)
|
||||
continue;
|
||||
|
||||
cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, GFP_KERNEL,
|
||||
cpu_to_node(cpu));
|
||||
}
|
||||
}
|
||||
|
||||
void intel_pmu_lbr_del(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
|
@ -1122,6 +1122,8 @@ void reserve_ds_buffers(void);
|
||||
|
||||
void release_lbr_buffers(void);
|
||||
|
||||
void reserve_lbr_buffers(void);
|
||||
|
||||
extern struct event_constraint bts_constraint;
|
||||
extern struct event_constraint vlbr_constraint;
|
||||
|
||||
@ -1267,6 +1269,10 @@ static inline void release_lbr_buffers(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void reserve_lbr_buffers(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int intel_pmu_init(void)
|
||||
{
|
||||
return 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user