Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "This tree includes four core perf fixes for misc bugs, three fixes to x86 PMU drivers, and two updates to old email addresses" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf: Do not send exit event twice perf/x86/intel: Fix INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA macro perf/x86/intel: Make L1D_PEND_MISS.FB_FULL not constrained on Haswell perf: Fix PERF_EVENT_IOC_PERIOD deadlock treewide: Remove old email address perf/x86: Fix LBR call stack save/restore perf: Update email address in MAINTAINERS perf/core: Robustify the perf_cgroup_from_task() RCU checks perf/core: Fix RCU problem with cgroup context switching code
This commit is contained in:
commit
51825c8a86
@ -8286,7 +8286,7 @@ F: include/linux/delayacct.h
|
||||
F: kernel/delayacct.c
|
||||
|
||||
PERFORMANCE EVENTS SUBSYSTEM
|
||||
M: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
M: Ingo Molnar <mingo@redhat.com>
|
||||
M: Arnaldo Carvalho de Melo <acme@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
@ -14,7 +14,7 @@
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
*
|
||||
* ppc:
|
||||
|
@ -10,7 +10,7 @@
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
*
|
||||
* ppc:
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
*/
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
|
@ -21,7 +21,7 @@
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||
*/
|
||||
|
@ -5,7 +5,7 @@
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||
*
|
||||
|
@ -5,7 +5,7 @@
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||
*
|
||||
@ -387,7 +387,7 @@ struct cpu_hw_events {
|
||||
/* Check flags and event code/umask, and set the HSW N/A flag */
|
||||
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
|
||||
__EVENT_CONSTRAINT(code, n, \
|
||||
INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
|
||||
INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
|
||||
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
|
||||
|
||||
|
||||
@ -627,6 +627,7 @@ struct x86_perf_task_context {
|
||||
u64 lbr_from[MAX_LBR_ENTRIES];
|
||||
u64 lbr_to[MAX_LBR_ENTRIES];
|
||||
u64 lbr_info[MAX_LBR_ENTRIES];
|
||||
int tos;
|
||||
int lbr_callstack_users;
|
||||
int lbr_stack_state;
|
||||
};
|
||||
|
@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
|
||||
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
||||
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
||||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
|
||||
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
|
||||
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
||||
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
|
||||
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
|
||||
|
@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
|
||||
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
|
||||
{
|
||||
if (event->attach_state & PERF_ATTACH_TASK)
|
||||
return perf_cgroup_from_task(event->hw.target);
|
||||
return perf_cgroup_from_task(event->hw.target, event->ctx);
|
||||
|
||||
return event->cgrp;
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
|
||||
}
|
||||
|
||||
mask = x86_pmu.lbr_nr - 1;
|
||||
tos = intel_pmu_lbr_tos();
|
||||
tos = task_ctx->tos;
|
||||
for (i = 0; i < tos; i++) {
|
||||
lbr_idx = (tos - i) & mask;
|
||||
wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
|
||||
@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
|
||||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
|
||||
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
|
||||
}
|
||||
wrmsrl(x86_pmu.lbr_tos, tos);
|
||||
task_ctx->lbr_stack_state = LBR_NONE;
|
||||
}
|
||||
|
||||
@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
|
||||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
|
||||
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
|
||||
}
|
||||
task_ctx->tos = tos;
|
||||
task_ctx->lbr_stack_state = LBR_VALID;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* x86 specific code for irq_work
|
||||
*
|
||||
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
@ -5,7 +5,7 @@
|
||||
* Copyright 2001 Red Hat, Inc.
|
||||
* Based on code from mm/memory.c Copyright Linus Torvalds and others.
|
||||
*
|
||||
* Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright 2011 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
|
@ -5,7 +5,7 @@
|
||||
* Jump label support
|
||||
*
|
||||
* Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
|
||||
* Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* DEPRECATED API:
|
||||
*
|
||||
|
@ -2,7 +2,7 @@
|
||||
* Runtime locking correctness validator
|
||||
*
|
||||
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* see Documentation/locking/lockdep-design.txt for more details.
|
||||
*/
|
||||
|
@ -697,9 +697,11 @@ struct perf_cgroup {
|
||||
* if there is no cgroup event for the current CPU context.
|
||||
*/
|
||||
static inline struct perf_cgroup *
|
||||
perf_cgroup_from_task(struct task_struct *task)
|
||||
perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
|
||||
{
|
||||
return container_of(task_css(task, perf_event_cgrp_id),
|
||||
return container_of(task_css_check(task, perf_event_cgrp_id,
|
||||
ctx ? lockdep_is_held(&ctx->lock)
|
||||
: true),
|
||||
struct perf_cgroup, css);
|
||||
}
|
||||
#endif /* CONFIG_CGROUP_PERF */
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* FLoating proportions
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* This file contains the public data structure and API definitions.
|
||||
*/
|
||||
|
@ -21,7 +21,7 @@
|
||||
* Authors:
|
||||
* Srikar Dronamraju
|
||||
* Jim Keniston
|
||||
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
|
@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
|
||||
if (!is_cgroup_event(event))
|
||||
return;
|
||||
|
||||
cgrp = perf_cgroup_from_task(current);
|
||||
cgrp = perf_cgroup_from_task(current, event->ctx);
|
||||
/*
|
||||
* Do not update time when cgroup is not active
|
||||
*/
|
||||
@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
|
||||
if (!task || !ctx->nr_cgroups)
|
||||
return;
|
||||
|
||||
cgrp = perf_cgroup_from_task(task);
|
||||
cgrp = perf_cgroup_from_task(task, ctx);
|
||||
info = this_cpu_ptr(cgrp->info);
|
||||
info->timestamp = ctx->timestamp;
|
||||
}
|
||||
@ -489,7 +489,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
|
||||
* we reschedule only in the presence of cgroup
|
||||
* constrained events.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
||||
@ -522,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
|
||||
* set cgrp before ctxsw in to allow
|
||||
* event_filter_match() to not have to pass
|
||||
* task around
|
||||
* we pass the cpuctx->ctx to perf_cgroup_from_task()
|
||||
* because cgorup events are only per-cpu
|
||||
*/
|
||||
cpuctx->cgrp = perf_cgroup_from_task(task);
|
||||
cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
|
||||
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
|
||||
}
|
||||
perf_pmu_enable(cpuctx->ctx.pmu);
|
||||
@ -531,8 +532,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@ -542,17 +541,20 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
|
||||
struct perf_cgroup *cgrp1;
|
||||
struct perf_cgroup *cgrp2 = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
/*
|
||||
* we come here when we know perf_cgroup_events > 0
|
||||
* we do not need to pass the ctx here because we know
|
||||
* we are holding the rcu lock
|
||||
*/
|
||||
cgrp1 = perf_cgroup_from_task(task);
|
||||
cgrp1 = perf_cgroup_from_task(task, NULL);
|
||||
|
||||
/*
|
||||
* next is NULL when called from perf_event_enable_on_exec()
|
||||
* that will systematically cause a cgroup_switch()
|
||||
*/
|
||||
if (next)
|
||||
cgrp2 = perf_cgroup_from_task(next);
|
||||
cgrp2 = perf_cgroup_from_task(next, NULL);
|
||||
|
||||
/*
|
||||
* only schedule out current cgroup events if we know
|
||||
@ -561,6 +563,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
|
||||
*/
|
||||
if (cgrp1 != cgrp2)
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
@ -569,13 +573,16 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
struct perf_cgroup *cgrp1;
|
||||
struct perf_cgroup *cgrp2 = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
/*
|
||||
* we come here when we know perf_cgroup_events > 0
|
||||
* we do not need to pass the ctx here because we know
|
||||
* we are holding the rcu lock
|
||||
*/
|
||||
cgrp1 = perf_cgroup_from_task(task);
|
||||
cgrp1 = perf_cgroup_from_task(task, NULL);
|
||||
|
||||
/* prev can never be NULL */
|
||||
cgrp2 = perf_cgroup_from_task(prev);
|
||||
cgrp2 = perf_cgroup_from_task(prev, NULL);
|
||||
|
||||
/*
|
||||
* only need to schedule in cgroup events if we are changing
|
||||
@ -584,6 +591,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
*/
|
||||
if (cgrp1 != cgrp2)
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
||||
@ -4216,7 +4225,14 @@ retry:
|
||||
goto retry;
|
||||
}
|
||||
|
||||
__perf_event_period(&pe);
|
||||
if (event->attr.freq) {
|
||||
event->attr.sample_freq = value;
|
||||
} else {
|
||||
event->attr.sample_period = value;
|
||||
event->hw.sample_period = value;
|
||||
}
|
||||
|
||||
local64_set(&event->hw.period_left, 0);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
|
||||
return 0;
|
||||
@ -5666,6 +5682,17 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
|
||||
struct perf_event_context *task_ctx)
|
||||
{
|
||||
rcu_read_lock();
|
||||
preempt_disable();
|
||||
perf_event_aux_ctx(task_ctx, output, data);
|
||||
preempt_enable();
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void
|
||||
perf_event_aux(perf_event_aux_output_cb output, void *data,
|
||||
struct perf_event_context *task_ctx)
|
||||
@ -5675,14 +5702,23 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
|
||||
struct pmu *pmu;
|
||||
int ctxn;
|
||||
|
||||
/*
|
||||
* If we have task_ctx != NULL we only notify
|
||||
* the task context itself. The task_ctx is set
|
||||
* only for EXIT events before releasing task
|
||||
* context.
|
||||
*/
|
||||
if (task_ctx) {
|
||||
perf_event_aux_task_ctx(output, data, task_ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
||||
if (cpuctx->unique_pmu != pmu)
|
||||
goto next;
|
||||
perf_event_aux_ctx(&cpuctx->ctx, output, data);
|
||||
if (task_ctx)
|
||||
goto next;
|
||||
ctxn = pmu->task_ctx_nr;
|
||||
if (ctxn < 0)
|
||||
goto next;
|
||||
@ -5692,12 +5728,6 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
|
||||
next:
|
||||
put_cpu_ptr(pmu->pmu_cpu_context);
|
||||
}
|
||||
|
||||
if (task_ctx) {
|
||||
preempt_disable();
|
||||
perf_event_aux_ctx(task_ctx, output, data);
|
||||
preempt_enable();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@ -8787,10 +8817,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
||||
struct perf_event_context *child_ctx, *clone_ctx = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
if (likely(!child->perf_event_ctxp[ctxn])) {
|
||||
perf_event_task(child, NULL, 0);
|
||||
if (likely(!child->perf_event_ctxp[ctxn]))
|
||||
return;
|
||||
}
|
||||
|
||||
local_irq_save(flags);
|
||||
/*
|
||||
@ -8874,6 +8902,14 @@ void perf_event_exit_task(struct task_struct *child)
|
||||
|
||||
for_each_task_context_nr(ctxn)
|
||||
perf_event_exit_task_context(child, ctxn);
|
||||
|
||||
/*
|
||||
* The perf_event_exit_task_context calls perf_event_task
|
||||
* with child's task_ctx, which generates EXIT events for
|
||||
* child contexts and sets child->perf_event_ctxp[] to NULL.
|
||||
* At this point we need to send EXIT events to cpu contexts.
|
||||
*/
|
||||
perf_event_task(child, NULL, 0);
|
||||
}
|
||||
|
||||
static void perf_free_event(struct perf_event *event,
|
||||
@ -9452,7 +9488,9 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
|
||||
static int __perf_cgroup_move(void *info)
|
||||
{
|
||||
struct task_struct *task = info;
|
||||
rcu_read_lock();
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
|
@ -19,7 +19,7 @@
|
||||
* Authors:
|
||||
* Srikar Dronamraju
|
||||
* Jim Keniston
|
||||
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* Provides a framework for enqueueing and running callbacks from hardirq
|
||||
* context. The enqueueing is NMI-safe.
|
||||
|
@ -2,7 +2,7 @@
|
||||
* jump label support
|
||||
*
|
||||
* Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
|
||||
* Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2011 Peter Zijlstra
|
||||
*
|
||||
*/
|
||||
#include <linux/memory.h>
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Started by Ingo Molnar:
|
||||
*
|
||||
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* this code maps all the lock dependencies as they occur in a live kernel
|
||||
* and will warn about the following classes of locking bugs:
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Started by Ingo Molnar:
|
||||
*
|
||||
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* Code for /proc/lockdep and /proc/lockdep_stats:
|
||||
*
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* sched_clock for unstable cpu clocks
|
||||
*
|
||||
* Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* Updates and enhancements:
|
||||
* Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
|
||||
|
@ -17,7 +17,7 @@
|
||||
* Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
|
||||
*
|
||||
* Adaptive scheduling granularity, math enhancements by Peter Zijlstra
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*/
|
||||
|
||||
#include <linux/latencytop.h>
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* trace event based perf event profiling/tracing
|
||||
*
|
||||
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
|
||||
* Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
*/
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
*
|
||||
* Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
|
||||
* Bits and pieces stolen from Peter Zijlstra's code, which is
|
||||
* Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright 2007, Red Hat Inc. Peter Zijlstra
|
||||
* GPLv2
|
||||
*
|
||||
* see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Floating proportions
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
|
@ -2,7 +2,7 @@
|
||||
* mm/page-writeback.c
|
||||
*
|
||||
* Copyright (C) 2002, Linus Torvalds.
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* Contains functions related to writing back dirty pages at the
|
||||
* address_space level.
|
||||
|
Loading…
Reference in New Issue
Block a user