Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
5006921837
@ -29,6 +29,8 @@
|
|||||||
|
|
||||||
#define COUNTER_SHIFT 16
|
#define COUNTER_SHIFT 16
|
||||||
|
|
||||||
|
static HLIST_HEAD(uncore_unused_list);
|
||||||
|
|
||||||
struct amd_uncore {
|
struct amd_uncore {
|
||||||
int id;
|
int id;
|
||||||
int refcnt;
|
int refcnt;
|
||||||
@ -39,7 +41,7 @@ struct amd_uncore {
|
|||||||
cpumask_t *active_mask;
|
cpumask_t *active_mask;
|
||||||
struct pmu *pmu;
|
struct pmu *pmu;
|
||||||
struct perf_event *events[MAX_COUNTERS];
|
struct perf_event *events[MAX_COUNTERS];
|
||||||
struct amd_uncore *free_when_cpu_online;
|
struct hlist_node node;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct amd_uncore * __percpu *amd_uncore_nb;
|
static struct amd_uncore * __percpu *amd_uncore_nb;
|
||||||
@ -306,6 +308,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
|
|||||||
uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
|
uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
|
||||||
uncore_nb->active_mask = &amd_nb_active_mask;
|
uncore_nb->active_mask = &amd_nb_active_mask;
|
||||||
uncore_nb->pmu = &amd_nb_pmu;
|
uncore_nb->pmu = &amd_nb_pmu;
|
||||||
|
uncore_nb->id = -1;
|
||||||
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
|
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -319,6 +322,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
|
|||||||
uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
|
uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
|
||||||
uncore_l2->active_mask = &amd_l2_active_mask;
|
uncore_l2->active_mask = &amd_l2_active_mask;
|
||||||
uncore_l2->pmu = &amd_l2_pmu;
|
uncore_l2->pmu = &amd_l2_pmu;
|
||||||
|
uncore_l2->id = -1;
|
||||||
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
|
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -348,7 +352,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (this->id == that->id) {
|
if (this->id == that->id) {
|
||||||
that->free_when_cpu_online = this;
|
hlist_add_head(&this->node, &uncore_unused_list);
|
||||||
this = that;
|
this = that;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -388,13 +392,23 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void uncore_clean_online(void)
|
||||||
|
{
|
||||||
|
struct amd_uncore *uncore;
|
||||||
|
struct hlist_node *n;
|
||||||
|
|
||||||
|
hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
|
||||||
|
hlist_del(&uncore->node);
|
||||||
|
kfree(uncore);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void uncore_online(unsigned int cpu,
|
static void uncore_online(unsigned int cpu,
|
||||||
struct amd_uncore * __percpu *uncores)
|
struct amd_uncore * __percpu *uncores)
|
||||||
{
|
{
|
||||||
struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
|
struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
|
||||||
|
|
||||||
kfree(uncore->free_when_cpu_online);
|
uncore_clean_online();
|
||||||
uncore->free_when_cpu_online = NULL;
|
|
||||||
|
|
||||||
if (cpu == uncore->cpu)
|
if (cpu == uncore->cpu)
|
||||||
cpumask_set_cpu(cpu, uncore->active_mask);
|
cpumask_set_cpu(cpu, uncore->active_mask);
|
||||||
|
@ -31,7 +31,17 @@
|
|||||||
struct bts_ctx {
|
struct bts_ctx {
|
||||||
struct perf_output_handle handle;
|
struct perf_output_handle handle;
|
||||||
struct debug_store ds_back;
|
struct debug_store ds_back;
|
||||||
int started;
|
int state;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* BTS context states: */
|
||||||
|
enum {
|
||||||
|
/* no ongoing AUX transactions */
|
||||||
|
BTS_STATE_STOPPED = 0,
|
||||||
|
/* AUX transaction is on, BTS tracing is disabled */
|
||||||
|
BTS_STATE_INACTIVE,
|
||||||
|
/* AUX transaction is on, BTS tracing is running */
|
||||||
|
BTS_STATE_ACTIVE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
|
static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
|
||||||
@ -204,6 +214,15 @@ static void bts_update(struct bts_ctx *bts)
|
|||||||
static int
|
static int
|
||||||
bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
|
bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ordering PMU callbacks wrt themselves and the PMI is done by means
|
||||||
|
* of bts::state, which:
|
||||||
|
* - is set when bts::handle::event is valid, that is, between
|
||||||
|
* perf_aux_output_begin() and perf_aux_output_end();
|
||||||
|
* - is zero otherwise;
|
||||||
|
* - is ordered against bts::handle::event with a compiler barrier.
|
||||||
|
*/
|
||||||
|
|
||||||
static void __bts_event_start(struct perf_event *event)
|
static void __bts_event_start(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
|
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
|
||||||
@ -221,10 +240,13 @@ static void __bts_event_start(struct perf_event *event)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* local barrier to make sure that ds configuration made it
|
* local barrier to make sure that ds configuration made it
|
||||||
* before we enable BTS
|
* before we enable BTS and bts::state goes ACTIVE
|
||||||
*/
|
*/
|
||||||
wmb();
|
wmb();
|
||||||
|
|
||||||
|
/* INACTIVE/STOPPED -> ACTIVE */
|
||||||
|
WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);
|
||||||
|
|
||||||
intel_pmu_enable_bts(config);
|
intel_pmu_enable_bts(config);
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -251,9 +273,6 @@ static void bts_event_start(struct perf_event *event, int flags)
|
|||||||
|
|
||||||
__bts_event_start(event);
|
__bts_event_start(event);
|
||||||
|
|
||||||
/* PMI handler: this counter is running and likely generating PMIs */
|
|
||||||
ACCESS_ONCE(bts->started) = 1;
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
fail_end_stop:
|
fail_end_stop:
|
||||||
@ -263,30 +282,34 @@ fail_stop:
|
|||||||
event->hw.state = PERF_HES_STOPPED;
|
event->hw.state = PERF_HES_STOPPED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __bts_event_stop(struct perf_event *event)
|
static void __bts_event_stop(struct perf_event *event, int state)
|
||||||
{
|
{
|
||||||
|
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
|
||||||
|
|
||||||
|
/* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
|
||||||
|
WRITE_ONCE(bts->state, state);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No extra synchronization is mandated by the documentation to have
|
* No extra synchronization is mandated by the documentation to have
|
||||||
* BTS data stores globally visible.
|
* BTS data stores globally visible.
|
||||||
*/
|
*/
|
||||||
intel_pmu_disable_bts();
|
intel_pmu_disable_bts();
|
||||||
|
|
||||||
if (event->hw.state & PERF_HES_STOPPED)
|
|
||||||
return;
|
|
||||||
|
|
||||||
ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bts_event_stop(struct perf_event *event, int flags)
|
static void bts_event_stop(struct perf_event *event, int flags)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||||
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
|
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
|
||||||
struct bts_buffer *buf = perf_get_aux(&bts->handle);
|
struct bts_buffer *buf = NULL;
|
||||||
|
int state = READ_ONCE(bts->state);
|
||||||
|
|
||||||
/* PMI handler: don't restart this counter */
|
if (state == BTS_STATE_ACTIVE)
|
||||||
ACCESS_ONCE(bts->started) = 0;
|
__bts_event_stop(event, BTS_STATE_STOPPED);
|
||||||
|
|
||||||
__bts_event_stop(event);
|
if (state != BTS_STATE_STOPPED)
|
||||||
|
buf = perf_get_aux(&bts->handle);
|
||||||
|
|
||||||
|
event->hw.state |= PERF_HES_STOPPED;
|
||||||
|
|
||||||
if (flags & PERF_EF_UPDATE) {
|
if (flags & PERF_EF_UPDATE) {
|
||||||
bts_update(bts);
|
bts_update(bts);
|
||||||
@ -296,6 +319,7 @@ static void bts_event_stop(struct perf_event *event, int flags)
|
|||||||
bts->handle.head =
|
bts->handle.head =
|
||||||
local_xchg(&buf->data_size,
|
local_xchg(&buf->data_size,
|
||||||
buf->nr_pages << PAGE_SHIFT);
|
buf->nr_pages << PAGE_SHIFT);
|
||||||
|
|
||||||
perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
|
perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
|
||||||
!!local_xchg(&buf->lost, 0));
|
!!local_xchg(&buf->lost, 0));
|
||||||
}
|
}
|
||||||
@ -310,8 +334,20 @@ static void bts_event_stop(struct perf_event *event, int flags)
|
|||||||
void intel_bts_enable_local(void)
|
void intel_bts_enable_local(void)
|
||||||
{
|
{
|
||||||
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
|
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
|
||||||
|
int state = READ_ONCE(bts->state);
|
||||||
|
|
||||||
if (bts->handle.event && bts->started)
|
/*
|
||||||
|
* Here we transition from INACTIVE to ACTIVE;
|
||||||
|
* if we instead are STOPPED from the interrupt handler,
|
||||||
|
* stay that way. Can't be ACTIVE here though.
|
||||||
|
*/
|
||||||
|
if (WARN_ON_ONCE(state == BTS_STATE_ACTIVE))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (state == BTS_STATE_STOPPED)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (bts->handle.event)
|
||||||
__bts_event_start(bts->handle.event);
|
__bts_event_start(bts->handle.event);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -319,8 +355,15 @@ void intel_bts_disable_local(void)
|
|||||||
{
|
{
|
||||||
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
|
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Here we transition from ACTIVE to INACTIVE;
|
||||||
|
* do nothing for STOPPED or INACTIVE.
|
||||||
|
*/
|
||||||
|
if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE)
|
||||||
|
return;
|
||||||
|
|
||||||
if (bts->handle.event)
|
if (bts->handle.event)
|
||||||
__bts_event_stop(bts->handle.event);
|
__bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -335,8 +378,6 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
|
head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
|
||||||
if (WARN_ON_ONCE(head != local_read(&buf->head)))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
phys = &buf->buf[buf->cur_buf];
|
phys = &buf->buf[buf->cur_buf];
|
||||||
space = phys->offset + phys->displacement + phys->size - head;
|
space = phys->offset + phys->displacement + phys->size - head;
|
||||||
@ -403,22 +444,37 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
|
|||||||
|
|
||||||
int intel_bts_interrupt(void)
|
int intel_bts_interrupt(void)
|
||||||
{
|
{
|
||||||
|
struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
|
||||||
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
|
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
|
||||||
struct perf_event *event = bts->handle.event;
|
struct perf_event *event = bts->handle.event;
|
||||||
struct bts_buffer *buf;
|
struct bts_buffer *buf;
|
||||||
s64 old_head;
|
s64 old_head;
|
||||||
int err;
|
int err = -ENOSPC, handled = 0;
|
||||||
|
|
||||||
if (!event || !bts->started)
|
/*
|
||||||
return 0;
|
* The only surefire way of knowing if this NMI is ours is by checking
|
||||||
|
* the write ptr against the PMI threshold.
|
||||||
|
*/
|
||||||
|
if (ds->bts_index >= ds->bts_interrupt_threshold)
|
||||||
|
handled = 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
|
||||||
|
* so we can only be INACTIVE or STOPPED
|
||||||
|
*/
|
||||||
|
if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
|
||||||
|
return handled;
|
||||||
|
|
||||||
buf = perf_get_aux(&bts->handle);
|
buf = perf_get_aux(&bts->handle);
|
||||||
|
if (!buf)
|
||||||
|
return handled;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Skip snapshot counters: they don't use the interrupt, but
|
* Skip snapshot counters: they don't use the interrupt, but
|
||||||
* there's no other way of telling, because the pointer will
|
* there's no other way of telling, because the pointer will
|
||||||
* keep moving
|
* keep moving
|
||||||
*/
|
*/
|
||||||
if (!buf || buf->snapshot)
|
if (buf->snapshot)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
old_head = local_read(&buf->head);
|
old_head = local_read(&buf->head);
|
||||||
@ -426,18 +482,27 @@ int intel_bts_interrupt(void)
|
|||||||
|
|
||||||
/* no new data */
|
/* no new data */
|
||||||
if (old_head == local_read(&buf->head))
|
if (old_head == local_read(&buf->head))
|
||||||
return 0;
|
return handled;
|
||||||
|
|
||||||
perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
|
perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
|
||||||
!!local_xchg(&buf->lost, 0));
|
!!local_xchg(&buf->lost, 0));
|
||||||
|
|
||||||
buf = perf_aux_output_begin(&bts->handle, event);
|
buf = perf_aux_output_begin(&bts->handle, event);
|
||||||
if (!buf)
|
if (buf)
|
||||||
return 1;
|
err = bts_buffer_reset(buf, &bts->handle);
|
||||||
|
|
||||||
err = bts_buffer_reset(buf, &bts->handle);
|
if (err) {
|
||||||
if (err)
|
WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
|
||||||
perf_aux_output_end(&bts->handle, 0, false);
|
|
||||||
|
if (buf) {
|
||||||
|
/*
|
||||||
|
* BTS_STATE_STOPPED should be visible before
|
||||||
|
* cleared handle::event
|
||||||
|
*/
|
||||||
|
barrier();
|
||||||
|
perf_aux_output_end(&bts->handle, 0, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -458,6 +458,11 @@ static void __intel_cqm_event_count(void *info);
|
|||||||
static void init_mbm_sample(u32 rmid, u32 evt_type);
|
static void init_mbm_sample(u32 rmid, u32 evt_type);
|
||||||
static void __intel_mbm_event_count(void *info);
|
static void __intel_mbm_event_count(void *info);
|
||||||
|
|
||||||
|
static bool is_cqm_event(int e)
|
||||||
|
{
|
||||||
|
return (e == QOS_L3_OCCUP_EVENT_ID);
|
||||||
|
}
|
||||||
|
|
||||||
static bool is_mbm_event(int e)
|
static bool is_mbm_event(int e)
|
||||||
{
|
{
|
||||||
return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
|
return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
|
||||||
@ -1366,6 +1371,10 @@ static int intel_cqm_event_init(struct perf_event *event)
|
|||||||
(event->attr.config > QOS_MBM_LOCAL_EVENT_ID))
|
(event->attr.config > QOS_MBM_LOCAL_EVENT_ID))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if ((is_cqm_event(event->attr.config) && !cqm_enabled) ||
|
||||||
|
(is_mbm_event(event->attr.config) && !mbm_enabled))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* unsupported modes and filters */
|
/* unsupported modes and filters */
|
||||||
if (event->attr.exclude_user ||
|
if (event->attr.exclude_user ||
|
||||||
event->attr.exclude_kernel ||
|
event->attr.exclude_kernel ||
|
||||||
|
@ -1312,18 +1312,18 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
|||||||
struct pebs_record_nhm *p = at;
|
struct pebs_record_nhm *p = at;
|
||||||
u64 pebs_status;
|
u64 pebs_status;
|
||||||
|
|
||||||
/* PEBS v3 has accurate status bits */
|
pebs_status = p->status & cpuc->pebs_enabled;
|
||||||
|
pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
|
||||||
|
|
||||||
|
/* PEBS v3 has more accurate status bits */
|
||||||
if (x86_pmu.intel_cap.pebs_format >= 3) {
|
if (x86_pmu.intel_cap.pebs_format >= 3) {
|
||||||
for_each_set_bit(bit, (unsigned long *)&p->status,
|
for_each_set_bit(bit, (unsigned long *)&pebs_status,
|
||||||
MAX_PEBS_EVENTS)
|
x86_pmu.max_pebs_events)
|
||||||
counts[bit]++;
|
counts[bit]++;
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
pebs_status = p->status & cpuc->pebs_enabled;
|
|
||||||
pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On some CPUs the PEBS status can be zero when PEBS is
|
* On some CPUs the PEBS status can be zero when PEBS is
|
||||||
* racing with clearing of GLOBAL_STATUS.
|
* racing with clearing of GLOBAL_STATUS.
|
||||||
@ -1371,8 +1371,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
event = cpuc->events[bit];
|
event = cpuc->events[bit];
|
||||||
WARN_ON_ONCE(!event);
|
if (WARN_ON_ONCE(!event))
|
||||||
WARN_ON_ONCE(!event->attr.precise_ip);
|
continue;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!event->attr.precise_ip))
|
||||||
|
continue;
|
||||||
|
|
||||||
/* log dropped samples number */
|
/* log dropped samples number */
|
||||||
if (error[bit])
|
if (error[bit])
|
||||||
|
@ -2497,11 +2497,11 @@ static int __perf_event_stop(void *info)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int perf_event_restart(struct perf_event *event)
|
static int perf_event_stop(struct perf_event *event, int restart)
|
||||||
{
|
{
|
||||||
struct stop_event_data sd = {
|
struct stop_event_data sd = {
|
||||||
.event = event,
|
.event = event,
|
||||||
.restart = 1,
|
.restart = restart,
|
||||||
};
|
};
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@ -4874,6 +4874,19 @@ static void ring_buffer_attach(struct perf_event *event,
|
|||||||
spin_unlock_irqrestore(&rb->event_lock, flags);
|
spin_unlock_irqrestore(&rb->event_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Avoid racing with perf_mmap_close(AUX): stop the event
|
||||||
|
* before swizzling the event::rb pointer; if it's getting
|
||||||
|
* unmapped, its aux_mmap_count will be 0 and it won't
|
||||||
|
* restart. See the comment in __perf_pmu_output_stop().
|
||||||
|
*
|
||||||
|
* Data will inevitably be lost when set_output is done in
|
||||||
|
* mid-air, but then again, whoever does it like this is
|
||||||
|
* not in for the data anyway.
|
||||||
|
*/
|
||||||
|
if (has_aux(event))
|
||||||
|
perf_event_stop(event, 0);
|
||||||
|
|
||||||
rcu_assign_pointer(event->rb, rb);
|
rcu_assign_pointer(event->rb, rb);
|
||||||
|
|
||||||
if (old_rb) {
|
if (old_rb) {
|
||||||
@ -6150,7 +6163,7 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
|
|||||||
raw_spin_unlock_irqrestore(&ifh->lock, flags);
|
raw_spin_unlock_irqrestore(&ifh->lock, flags);
|
||||||
|
|
||||||
if (restart)
|
if (restart)
|
||||||
perf_event_restart(event);
|
perf_event_stop(event, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_event_exec(void)
|
void perf_event_exec(void)
|
||||||
@ -6194,7 +6207,13 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* In case of inheritance, it will be the parent that links to the
|
* In case of inheritance, it will be the parent that links to the
|
||||||
* ring-buffer, but it will be the child that's actually using it:
|
* ring-buffer, but it will be the child that's actually using it.
|
||||||
|
*
|
||||||
|
* We are using event::rb to determine if the event should be stopped,
|
||||||
|
* however this may race with ring_buffer_attach() (through set_output),
|
||||||
|
* which will make us skip the event that actually needs to be stopped.
|
||||||
|
* So ring_buffer_attach() has to stop an aux event before re-assigning
|
||||||
|
* its rb pointer.
|
||||||
*/
|
*/
|
||||||
if (rcu_dereference(parent->rb) == rb)
|
if (rcu_dereference(parent->rb) == rb)
|
||||||
ro->err = __perf_event_stop(&sd);
|
ro->err = __perf_event_stop(&sd);
|
||||||
@ -6708,7 +6727,7 @@ static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
|
|||||||
raw_spin_unlock_irqrestore(&ifh->lock, flags);
|
raw_spin_unlock_irqrestore(&ifh->lock, flags);
|
||||||
|
|
||||||
if (restart)
|
if (restart)
|
||||||
perf_event_restart(event);
|
perf_event_stop(event, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -7897,7 +7916,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
|
|||||||
mmput(mm);
|
mmput(mm);
|
||||||
|
|
||||||
restart:
|
restart:
|
||||||
perf_event_restart(event);
|
perf_event_stop(event, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -330,15 +330,22 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
|
|||||||
if (!rb)
|
if (!rb)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
|
if (!rb_has_aux(rb))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
|
* If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
|
||||||
* the aux buffer is in perf_mmap_close(), about to get freed.
|
* about to get freed, so we leave immediately.
|
||||||
|
*
|
||||||
|
* Checking rb::aux_mmap_count and rb::refcount has to be done in
|
||||||
|
* the same order, see perf_mmap_close. Otherwise we end up freeing
|
||||||
|
* aux pages in this path, which is a bug, because in_atomic().
|
||||||
*/
|
*/
|
||||||
if (!atomic_read(&rb->aux_mmap_count))
|
if (!atomic_read(&rb->aux_mmap_count))
|
||||||
goto err_put;
|
goto err;
|
||||||
|
|
||||||
|
if (!atomic_inc_not_zero(&rb->aux_refcount))
|
||||||
|
goto err;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Nesting is not supported for AUX area, make sure nested
|
* Nesting is not supported for AUX area, make sure nested
|
||||||
|
Loading…
x
Reference in New Issue
Block a user