Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Two leftover fixes from the v3.17 cycle - these will be forwarded to stable as well, if they prove problem-free in wider testing as well" [ Side note: the "fix perf bug in fork()" fix had also come in through Andrew's patch-bomb - Linus ] * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf: Fix perf bug in fork() perf: Fix unclone_ctx() vs. locking
This commit is contained in:
commit
ebf546cc53
@ -906,13 +906,23 @@ static void put_ctx(struct perf_event_context *ctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unclone_ctx(struct perf_event_context *ctx)
|
/*
|
||||||
|
* This must be done under the ctx->lock, such as to serialize against
|
||||||
|
* context_equiv(), therefore we cannot call put_ctx() since that might end up
|
||||||
|
* calling scheduler related locks and ctx->lock nests inside those.
|
||||||
|
*/
|
||||||
|
static __must_check struct perf_event_context *
|
||||||
|
unclone_ctx(struct perf_event_context *ctx)
|
||||||
{
|
{
|
||||||
if (ctx->parent_ctx) {
|
struct perf_event_context *parent_ctx = ctx->parent_ctx;
|
||||||
put_ctx(ctx->parent_ctx);
|
|
||||||
|
lockdep_assert_held(&ctx->lock);
|
||||||
|
|
||||||
|
if (parent_ctx)
|
||||||
ctx->parent_ctx = NULL;
|
ctx->parent_ctx = NULL;
|
||||||
}
|
|
||||||
ctx->generation++;
|
ctx->generation++;
|
||||||
|
|
||||||
|
return parent_ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
|
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
|
||||||
@ -2259,6 +2269,9 @@ static void ctx_sched_out(struct perf_event_context *ctx,
|
|||||||
static int context_equiv(struct perf_event_context *ctx1,
|
static int context_equiv(struct perf_event_context *ctx1,
|
||||||
struct perf_event_context *ctx2)
|
struct perf_event_context *ctx2)
|
||||||
{
|
{
|
||||||
|
lockdep_assert_held(&ctx1->lock);
|
||||||
|
lockdep_assert_held(&ctx2->lock);
|
||||||
|
|
||||||
/* Pinning disables the swap optimization */
|
/* Pinning disables the swap optimization */
|
||||||
if (ctx1->pin_count || ctx2->pin_count)
|
if (ctx1->pin_count || ctx2->pin_count)
|
||||||
return 0;
|
return 0;
|
||||||
@ -2992,6 +3005,7 @@ static int event_enable_on_exec(struct perf_event *event,
|
|||||||
*/
|
*/
|
||||||
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
|
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
|
||||||
{
|
{
|
||||||
|
struct perf_event_context *clone_ctx = NULL;
|
||||||
struct perf_event *event;
|
struct perf_event *event;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int enabled = 0;
|
int enabled = 0;
|
||||||
@ -3023,7 +3037,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
|
|||||||
* Unclone this context if we enabled any event.
|
* Unclone this context if we enabled any event.
|
||||||
*/
|
*/
|
||||||
if (enabled)
|
if (enabled)
|
||||||
unclone_ctx(ctx);
|
clone_ctx = unclone_ctx(ctx);
|
||||||
|
|
||||||
raw_spin_unlock(&ctx->lock);
|
raw_spin_unlock(&ctx->lock);
|
||||||
|
|
||||||
@ -3033,6 +3047,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
|
|||||||
perf_event_context_sched_in(ctx, ctx->task);
|
perf_event_context_sched_in(ctx, ctx->task);
|
||||||
out:
|
out:
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
if (clone_ctx)
|
||||||
|
put_ctx(clone_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_event_exec(void)
|
void perf_event_exec(void)
|
||||||
@ -3185,7 +3202,7 @@ errout:
|
|||||||
static struct perf_event_context *
|
static struct perf_event_context *
|
||||||
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
|
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
|
||||||
{
|
{
|
||||||
struct perf_event_context *ctx;
|
struct perf_event_context *ctx, *clone_ctx = NULL;
|
||||||
struct perf_cpu_context *cpuctx;
|
struct perf_cpu_context *cpuctx;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ctxn, err;
|
int ctxn, err;
|
||||||
@ -3219,9 +3236,12 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
|
|||||||
retry:
|
retry:
|
||||||
ctx = perf_lock_task_context(task, ctxn, &flags);
|
ctx = perf_lock_task_context(task, ctxn, &flags);
|
||||||
if (ctx) {
|
if (ctx) {
|
||||||
unclone_ctx(ctx);
|
clone_ctx = unclone_ctx(ctx);
|
||||||
++ctx->pin_count;
|
++ctx->pin_count;
|
||||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||||
|
|
||||||
|
if (clone_ctx)
|
||||||
|
put_ctx(clone_ctx);
|
||||||
} else {
|
} else {
|
||||||
ctx = alloc_perf_context(pmu, task);
|
ctx = alloc_perf_context(pmu, task);
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
@ -7646,7 +7666,7 @@ __perf_event_exit_task(struct perf_event *child_event,
|
|||||||
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
||||||
{
|
{
|
||||||
struct perf_event *child_event, *next;
|
struct perf_event *child_event, *next;
|
||||||
struct perf_event_context *child_ctx, *parent_ctx;
|
struct perf_event_context *child_ctx, *clone_ctx = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (likely(!child->perf_event_ctxp[ctxn])) {
|
if (likely(!child->perf_event_ctxp[ctxn])) {
|
||||||
@ -7672,29 +7692,17 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
|||||||
task_ctx_sched_out(child_ctx);
|
task_ctx_sched_out(child_ctx);
|
||||||
child->perf_event_ctxp[ctxn] = NULL;
|
child->perf_event_ctxp[ctxn] = NULL;
|
||||||
|
|
||||||
/*
|
|
||||||
* In order to avoid freeing: child_ctx->parent_ctx->task
|
|
||||||
* under perf_event_context::lock, grab another reference.
|
|
||||||
*/
|
|
||||||
parent_ctx = child_ctx->parent_ctx;
|
|
||||||
if (parent_ctx)
|
|
||||||
get_ctx(parent_ctx);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this context is a clone; unclone it so it can't get
|
* If this context is a clone; unclone it so it can't get
|
||||||
* swapped to another process while we're removing all
|
* swapped to another process while we're removing all
|
||||||
* the events from it.
|
* the events from it.
|
||||||
*/
|
*/
|
||||||
unclone_ctx(child_ctx);
|
clone_ctx = unclone_ctx(child_ctx);
|
||||||
update_context_time(child_ctx);
|
update_context_time(child_ctx);
|
||||||
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
|
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
|
||||||
|
|
||||||
/*
|
if (clone_ctx)
|
||||||
* Now that we no longer hold perf_event_context::lock, drop
|
put_ctx(clone_ctx);
|
||||||
* our extra child_ctx->parent_ctx reference.
|
|
||||||
*/
|
|
||||||
if (parent_ctx)
|
|
||||||
put_ctx(parent_ctx);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Report the task dead after unscheduling the events so that we
|
* Report the task dead after unscheduling the events so that we
|
||||||
|
Loading…
Reference in New Issue
Block a user