Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug updates from Thomas Gleixner: "This is the next part of the hotplug rework. - Convert all notifiers with a priority assigned - Convert all CPU_STARTING/DYING notifiers The final removal of the STARTING/DYING infrastructure will happen when the merge window closes. Another 700 hundred line of unpenetrable maze gone :)" * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) timers/core: Correct callback order during CPU hot plug leds/trigger/cpu: Move from CPU_STARTING to ONLINE level powerpc/numa: Convert to hotplug state machine arm/perf: Fix hotplug state machine conversion irqchip/armada: Avoid unused function warnings ARC/time: Convert to hotplug state machine clocksource/atlas7: Convert to hotplug state machine clocksource/armada-370-xp: Convert to hotplug state machine clocksource/exynos_mct: Convert to hotplug state machine clocksource/arm_global_timer: Convert to hotplug state machine rcu: Convert rcutree to hotplug state machine KVM/arm/arm64/vgic-new: Convert to hotplug state machine smp/cfd: Convert core to hotplug state machine x86/x2apic: Convert to CPU hotplug state machine profile: Convert to hotplug state machine timers/core: Convert to hotplug state machine hrtimer: Convert to hotplug state machine x86/tboot: Convert to hotplug state machine arm64/armv8 deprecated: Convert to hotplug state machine hwtracing/coresight-etm4x: Convert to hotplug state machine ...
This commit is contained in:
commit
a6408f6cb6
@ -296,30 +296,23 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
|
|||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arc_timer_cpu_notify(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
static int arc_timer_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
|
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
|
||||||
|
|
||||||
evt->cpumask = cpumask_of(smp_processor_id());
|
evt->cpumask = cpumask_of(smp_processor_id());
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX);
|
||||||
case CPU_STARTING:
|
|
||||||
clockevents_config_and_register(evt, arc_timer_freq,
|
|
||||||
0, ULONG_MAX);
|
|
||||||
enable_percpu_irq(arc_timer_irq, 0);
|
enable_percpu_irq(arc_timer_irq, 0);
|
||||||
break;
|
return 0;
|
||||||
case CPU_DYING:
|
|
||||||
disable_percpu_irq(arc_timer_irq);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block arc_timer_cpu_nb = {
|
static int arc_timer_dying_cpu(unsigned int cpu)
|
||||||
.notifier_call = arc_timer_cpu_notify,
|
{
|
||||||
};
|
disable_percpu_irq(arc_timer_irq);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* clockevent setup for boot CPU
|
* clockevent setup for boot CPU
|
||||||
@ -329,12 +322,6 @@ static int __init arc_clockevent_setup(struct device_node *node)
|
|||||||
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
|
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = register_cpu_notifier(&arc_timer_cpu_nb);
|
|
||||||
if (ret) {
|
|
||||||
pr_err("Failed to register cpu notifier");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
arc_timer_irq = irq_of_parse_and_map(node, 0);
|
arc_timer_irq = irq_of_parse_and_map(node, 0);
|
||||||
if (arc_timer_irq <= 0) {
|
if (arc_timer_irq <= 0) {
|
||||||
pr_err("clockevent: missing irq");
|
pr_err("clockevent: missing irq");
|
||||||
@ -347,11 +334,6 @@ static int __init arc_clockevent_setup(struct device_node *node)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
evt->irq = arc_timer_irq;
|
|
||||||
evt->cpumask = cpumask_of(smp_processor_id());
|
|
||||||
clockevents_config_and_register(evt, arc_timer_freq,
|
|
||||||
0, ARC_TIMER_MAX);
|
|
||||||
|
|
||||||
/* Needs apriori irq_set_percpu_devid() done in intc map function */
|
/* Needs apriori irq_set_percpu_devid() done in intc map function */
|
||||||
ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
|
ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
|
||||||
"Timer0 (per-cpu-tick)", evt);
|
"Timer0 (per-cpu-tick)", evt);
|
||||||
@ -360,8 +342,14 @@ static int __init arc_clockevent_setup(struct device_node *node)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
enable_percpu_irq(arc_timer_irq, 0);
|
ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
|
||||||
|
"AP_ARC_TIMER_STARTING",
|
||||||
|
arc_timer_starting_cpu,
|
||||||
|
arc_timer_dying_cpu);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("Failed to setup hotplug state");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -310,24 +310,17 @@ static void twd_timer_setup(void)
|
|||||||
enable_percpu_irq(clk->irq, 0);
|
enable_percpu_irq(clk->irq, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int twd_timer_cpu_notify(struct notifier_block *self,
|
static int twd_timer_starting_cpu(unsigned int cpu)
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
twd_timer_setup();
|
twd_timer_setup();
|
||||||
break;
|
return 0;
|
||||||
case CPU_DYING:
|
|
||||||
twd_timer_stop();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block twd_timer_cpu_nb = {
|
static int twd_timer_dying_cpu(unsigned int cpu)
|
||||||
.notifier_call = twd_timer_cpu_notify,
|
{
|
||||||
};
|
twd_timer_stop();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int __init twd_local_timer_common_register(struct device_node *np)
|
static int __init twd_local_timer_common_register(struct device_node *np)
|
||||||
{
|
{
|
||||||
@ -345,9 +338,9 @@ static int __init twd_local_timer_common_register(struct device_node *np)
|
|||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = register_cpu_notifier(&twd_timer_cpu_nb);
|
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING,
|
||||||
if (err)
|
"AP_ARM_TWD_STARTING",
|
||||||
goto out_irq;
|
twd_timer_starting_cpu, twd_timer_dying_cpu);
|
||||||
|
|
||||||
twd_get_clock(np);
|
twd_get_clock(np);
|
||||||
if (!of_property_read_bool(np, "always-on"))
|
if (!of_property_read_bool(np, "always-on"))
|
||||||
@ -365,8 +358,6 @@ static int __init twd_local_timer_common_register(struct device_node *np)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_irq:
|
|
||||||
free_percpu_irq(twd_ppi, twd_evt);
|
|
||||||
out_free:
|
out_free:
|
||||||
iounmap(twd_base);
|
iounmap(twd_base);
|
||||||
twd_base = NULL;
|
twd_base = NULL;
|
||||||
|
@ -111,20 +111,12 @@ static struct notifier_block mvebu_hwcc_pci_nb __maybe_unused = {
|
|||||||
.notifier_call = mvebu_hwcc_notifier,
|
.notifier_call = mvebu_hwcc_notifier,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int armada_xp_clear_shared_l2_notifier_func(struct notifier_block *nfb,
|
static int armada_xp_clear_l2_starting(unsigned int cpu)
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
|
||||||
armada_xp_clear_shared_l2();
|
armada_xp_clear_shared_l2();
|
||||||
|
return 0;
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block armada_xp_clear_shared_l2_notifier = {
|
|
||||||
.notifier_call = armada_xp_clear_shared_l2_notifier_func,
|
|
||||||
.priority = 100,
|
|
||||||
};
|
|
||||||
|
|
||||||
static void __init armada_370_coherency_init(struct device_node *np)
|
static void __init armada_370_coherency_init(struct device_node *np)
|
||||||
{
|
{
|
||||||
struct resource res;
|
struct resource res;
|
||||||
@ -155,8 +147,9 @@ static void __init armada_370_coherency_init(struct device_node *np)
|
|||||||
|
|
||||||
of_node_put(cpu_config_np);
|
of_node_put(cpu_config_np);
|
||||||
|
|
||||||
register_cpu_notifier(&armada_xp_clear_shared_l2_notifier);
|
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY,
|
||||||
|
"AP_ARM_MVEBU_COHERENCY",
|
||||||
|
armada_xp_clear_l2_starting, NULL);
|
||||||
exit:
|
exit:
|
||||||
set_cpu_coherent();
|
set_cpu_coherent();
|
||||||
}
|
}
|
||||||
|
@ -597,17 +597,16 @@ static void l2c310_configure(void __iomem *base)
|
|||||||
L310_POWER_CTRL);
|
L310_POWER_CTRL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
|
static int l2c310_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
switch (act & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
|
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
|
||||||
break;
|
return 0;
|
||||||
case CPU_DYING:
|
}
|
||||||
|
|
||||||
|
static int l2c310_dying_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
|
set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
|
||||||
break;
|
return 0;
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
|
static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
|
||||||
@ -678,10 +677,10 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
|
|||||||
power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
|
power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
|
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
|
||||||
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
|
cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
|
||||||
cpu_notifier(l2c310_cpu_enable_flz, 0);
|
"AP_ARM_L2X0_STARTING", l2c310_starting_cpu,
|
||||||
}
|
l2c310_dying_cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
|
static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
|
||||||
|
@ -643,19 +643,19 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
|
|||||||
* hardware state at every thread switch. We clear our held state when
|
* hardware state at every thread switch. We clear our held state when
|
||||||
* a CPU has been killed, indicating that the VFP hardware doesn't contain
|
* a CPU has been killed, indicating that the VFP hardware doesn't contain
|
||||||
* a threads VFP state. When a CPU starts up, we re-enable access to the
|
* a threads VFP state. When a CPU starts up, we re-enable access to the
|
||||||
* VFP hardware.
|
* VFP hardware. The callbacks below are called on the CPU which
|
||||||
*
|
|
||||||
* Both CPU_DYING and CPU_STARTING are called on the CPU which
|
|
||||||
* is being offlined/onlined.
|
* is being offlined/onlined.
|
||||||
*/
|
*/
|
||||||
static int vfp_hotplug(struct notifier_block *b, unsigned long action,
|
static int vfp_dying_cpu(unsigned int cpu)
|
||||||
void *hcpu)
|
{
|
||||||
|
vfp_force_reload(cpu, current_thread_info());
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vfp_starting_cpu(unsigned int unused)
|
||||||
{
|
{
|
||||||
if (action == CPU_DYING || action == CPU_DYING_FROZEN)
|
|
||||||
vfp_current_hw_state[(long)hcpu] = NULL;
|
|
||||||
else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
|
||||||
vfp_enable(NULL);
|
vfp_enable(NULL);
|
||||||
return NOTIFY_OK;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vfp_kmode_exception(void)
|
void vfp_kmode_exception(void)
|
||||||
@ -732,6 +732,10 @@ static int __init vfp_init(void)
|
|||||||
unsigned int vfpsid;
|
unsigned int vfpsid;
|
||||||
unsigned int cpu_arch = cpu_architecture();
|
unsigned int cpu_arch = cpu_architecture();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Enable the access to the VFP on all online CPUs so the
|
||||||
|
* following test on FPSID will succeed.
|
||||||
|
*/
|
||||||
if (cpu_arch >= CPU_ARCH_ARMv6)
|
if (cpu_arch >= CPU_ARCH_ARMv6)
|
||||||
on_each_cpu(vfp_enable, NULL, 1);
|
on_each_cpu(vfp_enable, NULL, 1);
|
||||||
|
|
||||||
@ -794,7 +798,9 @@ static int __init vfp_init(void)
|
|||||||
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
|
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
hotcpu_notifier(vfp_hotplug, 0);
|
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING,
|
||||||
|
"AP_ARM_VFP_STARTING", vfp_starting_cpu,
|
||||||
|
vfp_dying_cpu);
|
||||||
|
|
||||||
vfp_vector = vfp_support_entry;
|
vfp_vector = vfp_support_entry;
|
||||||
|
|
||||||
|
@ -153,12 +153,11 @@ static struct notifier_block xen_pvclock_gtod_notifier = {
|
|||||||
.notifier_call = xen_pvclock_gtod_notify,
|
.notifier_call = xen_pvclock_gtod_notify,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void xen_percpu_init(void)
|
static int xen_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct vcpu_register_vcpu_info info;
|
struct vcpu_register_vcpu_info info;
|
||||||
struct vcpu_info *vcpup;
|
struct vcpu_info *vcpup;
|
||||||
int err;
|
int err;
|
||||||
int cpu = get_cpu();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VCPUOP_register_vcpu_info cannot be called twice for the same
|
* VCPUOP_register_vcpu_info cannot be called twice for the same
|
||||||
@ -186,7 +185,13 @@ static void xen_percpu_init(void)
|
|||||||
|
|
||||||
after_register_vcpu_info:
|
after_register_vcpu_info:
|
||||||
enable_percpu_irq(xen_events_irq, 0);
|
enable_percpu_irq(xen_events_irq, 0);
|
||||||
put_cpu();
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xen_dying_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
disable_percpu_irq(xen_events_irq);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
|
static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
|
||||||
@ -205,28 +210,6 @@ static void xen_power_off(void)
|
|||||||
BUG_ON(rc);
|
BUG_ON(rc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xen_cpu_notification(struct notifier_block *self,
|
|
||||||
unsigned long action,
|
|
||||||
void *hcpu)
|
|
||||||
{
|
|
||||||
switch (action) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
xen_percpu_init();
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
disable_percpu_irq(xen_events_irq);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block xen_cpu_notifier = {
|
|
||||||
.notifier_call = xen_cpu_notification,
|
|
||||||
};
|
|
||||||
|
|
||||||
static irqreturn_t xen_arm_callback(int irq, void *arg)
|
static irqreturn_t xen_arm_callback(int irq, void *arg)
|
||||||
{
|
{
|
||||||
xen_hvm_evtchn_do_upcall();
|
xen_hvm_evtchn_do_upcall();
|
||||||
@ -425,16 +408,14 @@ static int __init xen_guest_init(void)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
xen_percpu_init();
|
|
||||||
|
|
||||||
register_cpu_notifier(&xen_cpu_notifier);
|
|
||||||
|
|
||||||
xen_time_setup_guest();
|
xen_time_setup_guest();
|
||||||
|
|
||||||
if (xen_initial_domain())
|
if (xen_initial_domain())
|
||||||
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
|
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
|
||||||
|
|
||||||
return 0;
|
return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING,
|
||||||
|
"AP_ARM_XEN_STARTING", xen_starting_cpu,
|
||||||
|
xen_dying_cpu);
|
||||||
}
|
}
|
||||||
early_initcall(xen_guest_init);
|
early_initcall(xen_guest_init);
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
|
|||||||
* 0 - If all the hooks ran successfully.
|
* 0 - If all the hooks ran successfully.
|
||||||
* -EINVAL - At least one hook is not supported by the CPU.
|
* -EINVAL - At least one hook is not supported by the CPU.
|
||||||
*/
|
*/
|
||||||
static int run_all_insn_set_hw_mode(unsigned long cpu)
|
static int run_all_insn_set_hw_mode(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -131,7 +131,7 @@ static int run_all_insn_set_hw_mode(unsigned long cpu)
|
|||||||
list_for_each_entry(insn, &insn_emulation, node) {
|
list_for_each_entry(insn, &insn_emulation, node) {
|
||||||
bool enable = (insn->current_mode == INSN_HW);
|
bool enable = (insn->current_mode == INSN_HW);
|
||||||
if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
|
if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
|
||||||
pr_warn("CPU[%ld] cannot support the emulation of %s",
|
pr_warn("CPU[%u] cannot support the emulation of %s",
|
||||||
cpu, insn->ops->name);
|
cpu, insn->ops->name);
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
}
|
}
|
||||||
@ -611,20 +611,6 @@ static struct insn_emulation_ops setend_ops = {
|
|||||||
.set_hw_mode = setend_set_hw_mode,
|
.set_hw_mode = setend_set_hw_mode,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int insn_cpu_hotplug_notify(struct notifier_block *b,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
int rc = 0;
|
|
||||||
if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
|
|
||||||
rc = run_all_insn_set_hw_mode((unsigned long)hcpu);
|
|
||||||
|
|
||||||
return notifier_from_errno(rc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block insn_cpu_hotplug_notifier = {
|
|
||||||
.notifier_call = insn_cpu_hotplug_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Invoked as late_initcall, since not needed before init spawned.
|
* Invoked as late_initcall, since not needed before init spawned.
|
||||||
*/
|
*/
|
||||||
@ -643,7 +629,9 @@ static int __init armv8_deprecated_init(void)
|
|||||||
pr_info("setend instruction emulation is not supported on the system");
|
pr_info("setend instruction emulation is not supported on the system");
|
||||||
}
|
}
|
||||||
|
|
||||||
register_cpu_notifier(&insn_cpu_hotplug_notifier);
|
cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
|
||||||
|
"AP_ARM64_ISNDEP_STARTING",
|
||||||
|
run_all_insn_set_hw_mode, NULL);
|
||||||
register_insn_emulation_sysctl(ctl_abi);
|
register_insn_emulation_sysctl(ctl_abi);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -453,29 +453,13 @@ static struct pmu pmu = {
|
|||||||
.read = bfin_pmu_read,
|
.read = bfin_pmu_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void bfin_pmu_setup(int cpu)
|
static int bfin_pmu_prepare_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
|
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
|
||||||
|
|
||||||
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
unsigned int cpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
bfin_write_PFCTL(0);
|
bfin_write_PFCTL(0);
|
||||||
bfin_pmu_setup(cpu);
|
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
|
||||||
break;
|
return 0;
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init bfin_pmu_init(void)
|
static int __init bfin_pmu_init(void)
|
||||||
@ -491,8 +475,8 @@ static int __init bfin_pmu_init(void)
|
|||||||
|
|
||||||
ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
perf_cpu_notifier(bfin_pmu_notifier);
|
cpuhp_setup_state(CPUHP_PERF_BFIN, "PERF_BFIN",
|
||||||
|
bfin_pmu_prepare_cpu, NULL);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
early_initcall(bfin_pmu_init);
|
early_initcall(bfin_pmu_init);
|
||||||
|
@ -806,25 +806,16 @@ static struct metag_pmu _metag_pmu = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* PMU CPU hotplug notifier */
|
/* PMU CPU hotplug notifier */
|
||||||
static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action,
|
static int metag_pmu_starting_cpu(unsigned int cpu)
|
||||||
void *hcpu)
|
|
||||||
{
|
{
|
||||||
unsigned int cpu = (unsigned int)hcpu;
|
|
||||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||||
|
|
||||||
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
|
|
||||||
memset(cpuc, 0, sizeof(struct cpu_hw_events));
|
memset(cpuc, 0, sizeof(struct cpu_hw_events));
|
||||||
raw_spin_lock_init(&cpuc->pmu_lock);
|
raw_spin_lock_init(&cpuc->pmu_lock);
|
||||||
|
|
||||||
return NOTIFY_OK;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block metag_pmu_notifier = {
|
|
||||||
.notifier_call = metag_pmu_cpu_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* PMU Initialisation */
|
/* PMU Initialisation */
|
||||||
static int __init init_hw_perf_events(void)
|
static int __init init_hw_perf_events(void)
|
||||||
{
|
{
|
||||||
@ -876,16 +867,13 @@ static int __init init_hw_perf_events(void)
|
|||||||
metag_out32(0, PERF_COUNT(0));
|
metag_out32(0, PERF_COUNT(0));
|
||||||
metag_out32(0, PERF_COUNT(1));
|
metag_out32(0, PERF_COUNT(1));
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
cpuhp_setup_state(CPUHP_AP_PERF_METAG_STARTING,
|
||||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
"AP_PERF_METAG_STARTING", metag_pmu_starting_cpu,
|
||||||
|
NULL);
|
||||||
|
|
||||||
memset(cpuc, 0, sizeof(struct cpu_hw_events));
|
|
||||||
raw_spin_lock_init(&cpuc->pmu_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
register_cpu_notifier(&metag_pmu_notifier);
|
|
||||||
ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW);
|
ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW);
|
||||||
out:
|
if (ret)
|
||||||
|
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_METAG_STARTING);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
early_initcall(init_hw_perf_events);
|
early_initcall(init_hw_perf_events);
|
||||||
|
@ -168,33 +168,26 @@ static int loongson3_perfcount_handler(void)
|
|||||||
return handled;
|
return handled;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int loongson3_cpu_callback(struct notifier_block *nfb,
|
static int loongson3_starting_cpu(unsigned int cpu)
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
switch (action) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
case CPU_STARTING_FROZEN:
|
|
||||||
write_c0_perflo1(reg.control1);
|
write_c0_perflo1(reg.control1);
|
||||||
write_c0_perflo2(reg.control2);
|
write_c0_perflo2(reg.control2);
|
||||||
break;
|
return 0;
|
||||||
case CPU_DYING:
|
|
||||||
case CPU_DYING_FROZEN:
|
|
||||||
write_c0_perflo1(0xc0000000);
|
|
||||||
write_c0_perflo2(0x40000000);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block loongson3_notifier_block = {
|
static int loongson3_dying_cpu(unsigned int cpu)
|
||||||
.notifier_call = loongson3_cpu_callback
|
{
|
||||||
};
|
write_c0_perflo1(0xc0000000);
|
||||||
|
write_c0_perflo2(0x40000000);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int __init loongson3_init(void)
|
static int __init loongson3_init(void)
|
||||||
{
|
{
|
||||||
on_each_cpu(reset_counters, NULL, 1);
|
on_each_cpu(reset_counters, NULL, 1);
|
||||||
register_hotcpu_notifier(&loongson3_notifier_block);
|
cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
|
||||||
|
"AP_MIPS_OP_LOONGSON3_STARTING",
|
||||||
|
loongson3_starting_cpu, loongson3_dying_cpu);
|
||||||
save_perf_irq = perf_irq;
|
save_perf_irq = perf_irq;
|
||||||
perf_irq = loongson3_perfcount_handler;
|
perf_irq = loongson3_perfcount_handler;
|
||||||
|
|
||||||
@ -204,7 +197,7 @@ static int __init loongson3_init(void)
|
|||||||
static void loongson3_exit(void)
|
static void loongson3_exit(void)
|
||||||
{
|
{
|
||||||
on_each_cpu(reset_counters, NULL, 1);
|
on_each_cpu(reset_counters, NULL, 1);
|
||||||
unregister_hotcpu_notifier(&loongson3_notifier_block);
|
cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING);
|
||||||
perf_irq = save_perf_irq;
|
perf_irq = save_perf_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -581,30 +581,22 @@ static void verify_cpu_node_mapping(int cpu, int node)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
|
/* Must run before sched domains notifier. */
|
||||||
void *hcpu)
|
static int ppc_numa_cpu_prepare(unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned long lcpu = (unsigned long)hcpu;
|
int nid;
|
||||||
int ret = NOTIFY_DONE, nid;
|
|
||||||
|
|
||||||
switch (action) {
|
nid = numa_setup_cpu(cpu);
|
||||||
case CPU_UP_PREPARE:
|
verify_cpu_node_mapping(cpu, nid);
|
||||||
case CPU_UP_PREPARE_FROZEN:
|
return 0;
|
||||||
nid = numa_setup_cpu(lcpu);
|
}
|
||||||
verify_cpu_node_mapping((int)lcpu, nid);
|
|
||||||
ret = NOTIFY_OK;
|
static int ppc_numa_cpu_dead(unsigned int cpu)
|
||||||
break;
|
{
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
case CPU_DEAD:
|
unmap_cpu_from_node(cpu);
|
||||||
case CPU_DEAD_FROZEN:
|
|
||||||
case CPU_UP_CANCELED:
|
|
||||||
case CPU_UP_CANCELED_FROZEN:
|
|
||||||
unmap_cpu_from_node(lcpu);
|
|
||||||
ret = NOTIFY_OK;
|
|
||||||
break;
|
|
||||||
#endif
|
#endif
|
||||||
}
|
return 0;
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -913,11 +905,6 @@ static void __init dump_numa_memory_topology(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block ppc64_numa_nb = {
|
|
||||||
.notifier_call = cpu_numa_callback,
|
|
||||||
.priority = 1 /* Must run before sched domains notifier. */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Initialize NODE_DATA for a node on the local memory */
|
/* Initialize NODE_DATA for a node on the local memory */
|
||||||
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
|
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
|
||||||
{
|
{
|
||||||
@ -985,15 +972,18 @@ void __init initmem_init(void)
|
|||||||
setup_node_to_cpumask_map();
|
setup_node_to_cpumask_map();
|
||||||
|
|
||||||
reset_numa_cpu_lookup_table();
|
reset_numa_cpu_lookup_table();
|
||||||
register_cpu_notifier(&ppc64_numa_nb);
|
|
||||||
/*
|
/*
|
||||||
* We need the numa_cpu_lookup_table to be accurate for all CPUs,
|
* We need the numa_cpu_lookup_table to be accurate for all CPUs,
|
||||||
* even before we online them, so that we can use cpu_to_{node,mem}
|
* even before we online them, so that we can use cpu_to_{node,mem}
|
||||||
* early in boot, cf. smp_prepare_cpus().
|
* early in boot, cf. smp_prepare_cpus().
|
||||||
|
* _nocalls() + manual invocation is used because cpuhp is not yet
|
||||||
|
* initialized for the boot CPU.
|
||||||
*/
|
*/
|
||||||
for_each_present_cpu(cpu) {
|
cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "POWER_NUMA_PREPARE",
|
||||||
numa_setup_cpu((unsigned long)cpu);
|
ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
|
||||||
}
|
for_each_present_cpu(cpu)
|
||||||
|
numa_setup_cpu(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init early_numa(char *p)
|
static int __init early_numa(char *p)
|
||||||
|
@ -2158,31 +2158,15 @@ static void perf_event_interrupt(struct pt_regs *regs)
|
|||||||
irq_exit();
|
irq_exit();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void power_pmu_setup(int cpu)
|
int power_pmu_prepare_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
|
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
|
||||||
|
|
||||||
if (!ppmu)
|
if (ppmu) {
|
||||||
return;
|
|
||||||
memset(cpuhw, 0, sizeof(*cpuhw));
|
memset(cpuhw, 0, sizeof(*cpuhw));
|
||||||
cpuhw->mmcr[0] = MMCR0_FC;
|
cpuhw->mmcr[0] = MMCR0_FC;
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
unsigned int cpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
power_pmu_setup(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int register_power_pmu(struct power_pmu *pmu)
|
int register_power_pmu(struct power_pmu *pmu)
|
||||||
@ -2205,7 +2189,7 @@ int register_power_pmu(struct power_pmu *pmu)
|
|||||||
#endif /* CONFIG_PPC64 */
|
#endif /* CONFIG_PPC64 */
|
||||||
|
|
||||||
perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
|
perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
|
||||||
perf_cpu_notifier(power_pmu_notifier);
|
cpuhp_setup_state(CPUHP_PERF_POWER, "PERF_POWER",
|
||||||
|
power_pmu_prepare_cpu, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -664,30 +664,22 @@ static struct pmu cpumf_pmu = {
|
|||||||
.cancel_txn = cpumf_pmu_cancel_txn,
|
.cancel_txn = cpumf_pmu_cancel_txn,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
|
static int cpumf_pmf_setup(unsigned int cpu, int flags)
|
||||||
void *hcpu)
|
|
||||||
{
|
{
|
||||||
int flags;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_ONLINE:
|
|
||||||
case CPU_DOWN_FAILED:
|
|
||||||
flags = PMC_INIT;
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
setup_pmc_cpu(&flags);
|
setup_pmc_cpu(&flags);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
break;
|
return 0;
|
||||||
case CPU_DOWN_PREPARE:
|
}
|
||||||
flags = PMC_RELEASE;
|
|
||||||
local_irq_disable();
|
|
||||||
setup_pmc_cpu(&flags);
|
|
||||||
local_irq_enable();
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
static int s390_pmu_online_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
return cpumf_pmf_setup(cpu, PMC_INIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int s390_pmu_offline_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
return cpumf_pmf_setup(cpu, PMC_RELEASE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init cpumf_pmu_init(void)
|
static int __init cpumf_pmu_init(void)
|
||||||
@ -707,7 +699,7 @@ static int __init cpumf_pmu_init(void)
|
|||||||
if (rc) {
|
if (rc) {
|
||||||
pr_err("Registering for CPU-measurement alerts "
|
pr_err("Registering for CPU-measurement alerts "
|
||||||
"failed with rc=%i\n", rc);
|
"failed with rc=%i\n", rc);
|
||||||
goto out;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpumf_pmu.attr_groups = cpumf_cf_event_group();
|
cpumf_pmu.attr_groups = cpumf_cf_event_group();
|
||||||
@ -716,10 +708,10 @@ static int __init cpumf_pmu_init(void)
|
|||||||
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
|
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
|
||||||
unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
|
unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
|
||||||
cpumf_measurement_alert);
|
cpumf_measurement_alert);
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
perf_cpu_notifier(cpumf_pmu_notifier);
|
|
||||||
out:
|
|
||||||
return rc;
|
return rc;
|
||||||
|
}
|
||||||
|
return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
|
||||||
|
"AP_PERF_S390_CF_ONLINE",
|
||||||
|
s390_pmu_online_cpu, s390_pmu_offline_cpu);
|
||||||
}
|
}
|
||||||
early_initcall(cpumf_pmu_init);
|
early_initcall(cpumf_pmu_init);
|
||||||
|
@ -1504,37 +1504,28 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
|
|||||||
sf_disable();
|
sf_disable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
static int cpusf_pmu_setup(unsigned int cpu, int flags)
|
||||||
static int cpumf_pmu_notifier(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
int flags;
|
|
||||||
|
|
||||||
/* Ignore the notification if no events are scheduled on the PMU.
|
/* Ignore the notification if no events are scheduled on the PMU.
|
||||||
* This might be racy...
|
* This might be racy...
|
||||||
*/
|
*/
|
||||||
if (!atomic_read(&num_events))
|
if (!atomic_read(&num_events))
|
||||||
return NOTIFY_OK;
|
return 0;
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_ONLINE:
|
|
||||||
case CPU_DOWN_FAILED:
|
|
||||||
flags = PMC_INIT;
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
setup_pmc_cpu(&flags);
|
setup_pmc_cpu(&flags);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
break;
|
return 0;
|
||||||
case CPU_DOWN_PREPARE:
|
}
|
||||||
flags = PMC_RELEASE;
|
|
||||||
local_irq_disable();
|
|
||||||
setup_pmc_cpu(&flags);
|
|
||||||
local_irq_enable();
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
static int s390_pmu_sf_online_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
return cpusf_pmu_setup(cpu, PMC_INIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int s390_pmu_sf_offline_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
return cpusf_pmu_setup(cpu, PMC_RELEASE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int param_get_sfb_size(char *buffer, const struct kernel_param *kp)
|
static int param_get_sfb_size(char *buffer, const struct kernel_param *kp)
|
||||||
@ -1634,7 +1625,9 @@ static int __init init_cpum_sampling_pmu(void)
|
|||||||
cpumf_measurement_alert);
|
cpumf_measurement_alert);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
perf_cpu_notifier(cpumf_pmu_notifier);
|
|
||||||
|
cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "AP_PERF_S390_SF_ONLINE",
|
||||||
|
s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu);
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -352,28 +352,12 @@ static struct pmu pmu = {
|
|||||||
.read = sh_pmu_read,
|
.read = sh_pmu_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void sh_pmu_setup(int cpu)
|
static int sh_pmu_prepare_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
|
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
|
||||||
|
|
||||||
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
|
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
|
||||||
}
|
return 0;
|
||||||
|
|
||||||
static int
|
|
||||||
sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
unsigned int cpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
sh_pmu_setup(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int register_sh_pmu(struct sh_pmu *_pmu)
|
int register_sh_pmu(struct sh_pmu *_pmu)
|
||||||
@ -394,6 +378,7 @@ int register_sh_pmu(struct sh_pmu *_pmu)
|
|||||||
WARN_ON(_pmu->num_events > MAX_HWEVENTS);
|
WARN_ON(_pmu->num_events > MAX_HWEVENTS);
|
||||||
|
|
||||||
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
||||||
perf_cpu_notifier(sh_pmu_notifier);
|
cpuhp_setup_state(CPUHP_PERF_SUPERH, "PERF_SUPERH", sh_pmu_prepare_cpu,
|
||||||
|
NULL);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -331,15 +331,9 @@ static void vgetcpu_cpu_init(void *arg)
|
|||||||
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
|
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int vgetcpu_online(unsigned int cpu)
|
||||||
vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
|
|
||||||
{
|
{
|
||||||
long cpu = (long)arg;
|
return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
|
||||||
|
|
||||||
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
|
|
||||||
smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
|
|
||||||
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init init_vdso(void)
|
static int __init init_vdso(void)
|
||||||
@ -350,15 +344,9 @@ static int __init init_vdso(void)
|
|||||||
init_vdso_image(&vdso_image_x32);
|
init_vdso_image(&vdso_image_x32);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
cpu_notifier_register_begin();
|
|
||||||
|
|
||||||
on_each_cpu(vgetcpu_cpu_init, NULL, 1);
|
|
||||||
/* notifier priority > KVM */
|
/* notifier priority > KVM */
|
||||||
__hotcpu_notifier(vgetcpu_cpu_notifier, 30);
|
return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
|
||||||
|
"AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL);
|
||||||
cpu_notifier_register_done();
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
subsys_initcall(init_vdso);
|
subsys_initcall(init_vdso);
|
||||||
#endif /* CONFIG_X86_64 */
|
#endif /* CONFIG_X86_64 */
|
||||||
|
@ -370,13 +370,13 @@ static int amd_pmu_cpu_prepare(int cpu)
|
|||||||
WARN_ON_ONCE(cpuc->amd_nb);
|
WARN_ON_ONCE(cpuc->amd_nb);
|
||||||
|
|
||||||
if (!x86_pmu.amd_nb_constraints)
|
if (!x86_pmu.amd_nb_constraints)
|
||||||
return NOTIFY_OK;
|
return 0;
|
||||||
|
|
||||||
cpuc->amd_nb = amd_alloc_nb(cpu);
|
cpuc->amd_nb = amd_alloc_nb(cpu);
|
||||||
if (!cpuc->amd_nb)
|
if (!cpuc->amd_nb)
|
||||||
return NOTIFY_BAD;
|
return -ENOMEM;
|
||||||
|
|
||||||
return NOTIFY_OK;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_pmu_cpu_starting(int cpu)
|
static void amd_pmu_cpu_starting(int cpu)
|
||||||
|
@ -725,13 +725,10 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init int perf_event_ibs_init(void)
|
static __init void perf_event_ibs_init(void)
|
||||||
{
|
{
|
||||||
struct attribute **attr = ibs_op_format_attrs;
|
struct attribute **attr = ibs_op_format_attrs;
|
||||||
|
|
||||||
if (!ibs_caps)
|
|
||||||
return -ENODEV; /* ibs not supported by the cpu */
|
|
||||||
|
|
||||||
perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
|
perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
|
||||||
|
|
||||||
if (ibs_caps & IBS_CAPS_OPCNT) {
|
if (ibs_caps & IBS_CAPS_OPCNT) {
|
||||||
@ -742,13 +739,11 @@ static __init int perf_event_ibs_init(void)
|
|||||||
|
|
||||||
register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
|
register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
|
||||||
pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
|
pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
|
#else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
|
||||||
|
|
||||||
static __init int perf_event_ibs_init(void) { return 0; }
|
static __init void perf_event_ibs_init(void) { }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -925,7 +920,7 @@ static inline int get_ibs_lvt_offset(void)
|
|||||||
return val & IBSCTL_LVT_OFFSET_MASK;
|
return val & IBSCTL_LVT_OFFSET_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void setup_APIC_ibs(void *dummy)
|
static void setup_APIC_ibs(void)
|
||||||
{
|
{
|
||||||
int offset;
|
int offset;
|
||||||
|
|
||||||
@ -940,7 +935,7 @@ failed:
|
|||||||
smp_processor_id());
|
smp_processor_id());
|
||||||
}
|
}
|
||||||
|
|
||||||
static void clear_APIC_ibs(void *dummy)
|
static void clear_APIC_ibs(void)
|
||||||
{
|
{
|
||||||
int offset;
|
int offset;
|
||||||
|
|
||||||
@ -949,18 +944,24 @@ static void clear_APIC_ibs(void *dummy)
|
|||||||
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
|
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
setup_APIC_ibs();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
|
|
||||||
static int perf_ibs_suspend(void)
|
static int perf_ibs_suspend(void)
|
||||||
{
|
{
|
||||||
clear_APIC_ibs(NULL);
|
clear_APIC_ibs();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_ibs_resume(void)
|
static void perf_ibs_resume(void)
|
||||||
{
|
{
|
||||||
ibs_eilvt_setup();
|
ibs_eilvt_setup();
|
||||||
setup_APIC_ibs(NULL);
|
setup_APIC_ibs();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct syscore_ops perf_ibs_syscore_ops = {
|
static struct syscore_ops perf_ibs_syscore_ops = {
|
||||||
@ -979,27 +980,15 @@ static inline void perf_ibs_pm_init(void) { }
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int
|
static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
|
||||||
perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
clear_APIC_ibs();
|
||||||
case CPU_STARTING:
|
return 0;
|
||||||
setup_APIC_ibs(NULL);
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
clear_APIC_ibs(NULL);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init int amd_ibs_init(void)
|
static __init int amd_ibs_init(void)
|
||||||
{
|
{
|
||||||
u32 caps;
|
u32 caps;
|
||||||
int ret = -EINVAL;
|
|
||||||
|
|
||||||
caps = __get_ibs_caps();
|
caps = __get_ibs_caps();
|
||||||
if (!caps)
|
if (!caps)
|
||||||
@ -1008,22 +997,25 @@ static __init int amd_ibs_init(void)
|
|||||||
ibs_eilvt_setup();
|
ibs_eilvt_setup();
|
||||||
|
|
||||||
if (!ibs_eilvt_valid())
|
if (!ibs_eilvt_valid())
|
||||||
goto out;
|
return -EINVAL;
|
||||||
|
|
||||||
perf_ibs_pm_init();
|
perf_ibs_pm_init();
|
||||||
cpu_notifier_register_begin();
|
|
||||||
ibs_caps = caps;
|
ibs_caps = caps;
|
||||||
/* make ibs_caps visible to other cpus: */
|
/* make ibs_caps visible to other cpus: */
|
||||||
smp_mb();
|
smp_mb();
|
||||||
smp_call_function(setup_APIC_ibs, NULL, 1);
|
/*
|
||||||
__perf_cpu_notifier(perf_ibs_cpu_notifier);
|
* x86_pmu_amd_ibs_starting_cpu will be called from core on
|
||||||
cpu_notifier_register_done();
|
* all online cpus.
|
||||||
|
*/
|
||||||
|
cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
|
||||||
|
"AP_PERF_X86_AMD_IBS_STARTING",
|
||||||
|
x86_pmu_amd_ibs_starting_cpu,
|
||||||
|
x86_pmu_amd_ibs_dying_cpu);
|
||||||
|
|
||||||
ret = perf_event_ibs_init();
|
perf_event_ibs_init();
|
||||||
out:
|
|
||||||
if (ret)
|
return 0;
|
||||||
pr_err("Failed to setup IBS, %d\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Since we need the pci subsystem to init ibs we can't do this earlier: */
|
/* Since we need the pci subsystem to init ibs we can't do this earlier: */
|
||||||
|
@ -228,12 +228,12 @@ static struct pmu pmu_class = {
|
|||||||
.read = pmu_event_read,
|
.read = pmu_event_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void power_cpu_exit(int cpu)
|
static int power_cpu_exit(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int target;
|
int target;
|
||||||
|
|
||||||
if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask))
|
if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask))
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find a new CPU on the same compute unit, if was set in cpumask
|
* Find a new CPU on the same compute unit, if was set in cpumask
|
||||||
@ -245,9 +245,10 @@ static void power_cpu_exit(int cpu)
|
|||||||
cpumask_set_cpu(target, &cpu_mask);
|
cpumask_set_cpu(target, &cpu_mask);
|
||||||
perf_pmu_migrate_context(&pmu_class, cpu, target);
|
perf_pmu_migrate_context(&pmu_class, cpu, target);
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void power_cpu_init(int cpu)
|
static int power_cpu_init(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int target;
|
int target;
|
||||||
|
|
||||||
@ -255,7 +256,7 @@ static void power_cpu_init(int cpu)
|
|||||||
* 1) If any CPU is set at cpu_mask in the same compute unit, do
|
* 1) If any CPU is set at cpu_mask in the same compute unit, do
|
||||||
* nothing.
|
* nothing.
|
||||||
* 2) If no CPU is set at cpu_mask in the same compute unit,
|
* 2) If no CPU is set at cpu_mask in the same compute unit,
|
||||||
* set current STARTING CPU.
|
* set current ONLINE CPU.
|
||||||
*
|
*
|
||||||
* Note: if there is a CPU aside of the new one already in the
|
* Note: if there is a CPU aside of the new one already in the
|
||||||
* sibling mask, then it is also in cpu_mask.
|
* sibling mask, then it is also in cpu_mask.
|
||||||
@ -263,33 +264,9 @@ static void power_cpu_init(int cpu)
|
|||||||
target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
|
target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
|
||||||
if (target >= nr_cpumask_bits)
|
if (target >= nr_cpumask_bits)
|
||||||
cpumask_set_cpu(cpu, &cpu_mask);
|
cpumask_set_cpu(cpu, &cpu_mask);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
power_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
unsigned int cpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_DOWN_FAILED:
|
|
||||||
case CPU_STARTING:
|
|
||||||
power_cpu_init(cpu);
|
|
||||||
break;
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
power_cpu_exit(cpu);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block power_cpu_notifier_nb = {
|
|
||||||
.notifier_call = power_cpu_notifier,
|
|
||||||
.priority = CPU_PRI_PERF,
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct x86_cpu_id cpu_match[] = {
|
static const struct x86_cpu_id cpu_match[] = {
|
||||||
{ .vendor = X86_VENDOR_AMD, .family = 0x15 },
|
{ .vendor = X86_VENDOR_AMD, .family = 0x15 },
|
||||||
{},
|
{},
|
||||||
@ -297,7 +274,7 @@ static const struct x86_cpu_id cpu_match[] = {
|
|||||||
|
|
||||||
static int __init amd_power_pmu_init(void)
|
static int __init amd_power_pmu_init(void)
|
||||||
{
|
{
|
||||||
int cpu, target, ret;
|
int ret;
|
||||||
|
|
||||||
if (!x86_match_cpu(cpu_match))
|
if (!x86_match_cpu(cpu_match))
|
||||||
return 0;
|
return 0;
|
||||||
@ -312,38 +289,25 @@ static int __init amd_power_pmu_init(void)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu_notifier_register_begin();
|
|
||||||
|
|
||||||
/* Choose one online core of each compute unit. */
|
cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
|
||||||
for_each_online_cpu(cpu) {
|
"AP_PERF_X86_AMD_POWER_ONLINE",
|
||||||
target = cpumask_first(topology_sibling_cpumask(cpu));
|
power_cpu_init, power_cpu_exit);
|
||||||
if (!cpumask_test_cpu(target, &cpu_mask))
|
|
||||||
cpumask_set_cpu(target, &cpu_mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = perf_pmu_register(&pmu_class, "power", -1);
|
ret = perf_pmu_register(&pmu_class, "power", -1);
|
||||||
if (WARN_ON(ret)) {
|
if (WARN_ON(ret)) {
|
||||||
pr_warn("AMD Power PMU registration failed\n");
|
pr_warn("AMD Power PMU registration failed\n");
|
||||||
goto out;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
__register_cpu_notifier(&power_cpu_notifier_nb);
|
|
||||||
|
|
||||||
pr_info("AMD Power PMU detected\n");
|
pr_info("AMD Power PMU detected\n");
|
||||||
|
|
||||||
out:
|
|
||||||
cpu_notifier_register_done();
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
module_init(amd_power_pmu_init);
|
module_init(amd_power_pmu_init);
|
||||||
|
|
||||||
static void __exit amd_power_pmu_exit(void)
|
static void __exit amd_power_pmu_exit(void)
|
||||||
{
|
{
|
||||||
cpu_notifier_register_begin();
|
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE);
|
||||||
__unregister_cpu_notifier(&power_cpu_notifier_nb);
|
|
||||||
cpu_notifier_register_done();
|
|
||||||
|
|
||||||
perf_pmu_unregister(&pmu_class);
|
perf_pmu_unregister(&pmu_class);
|
||||||
}
|
}
|
||||||
module_exit(amd_power_pmu_exit);
|
module_exit(amd_power_pmu_exit);
|
||||||
|
@ -358,7 +358,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_uncore_cpu_starting(unsigned int cpu)
|
static int amd_uncore_cpu_starting(unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned int eax, ebx, ecx, edx;
|
unsigned int eax, ebx, ecx, edx;
|
||||||
struct amd_uncore *uncore;
|
struct amd_uncore *uncore;
|
||||||
@ -384,6 +384,8 @@ static void amd_uncore_cpu_starting(unsigned int cpu)
|
|||||||
uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
|
uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
|
||||||
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
|
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void uncore_online(unsigned int cpu,
|
static void uncore_online(unsigned int cpu,
|
||||||
@ -398,13 +400,15 @@ static void uncore_online(unsigned int cpu,
|
|||||||
cpumask_set_cpu(cpu, uncore->active_mask);
|
cpumask_set_cpu(cpu, uncore->active_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_uncore_cpu_online(unsigned int cpu)
|
static int amd_uncore_cpu_online(unsigned int cpu)
|
||||||
{
|
{
|
||||||
if (amd_uncore_nb)
|
if (amd_uncore_nb)
|
||||||
uncore_online(cpu, amd_uncore_nb);
|
uncore_online(cpu, amd_uncore_nb);
|
||||||
|
|
||||||
if (amd_uncore_l2)
|
if (amd_uncore_l2)
|
||||||
uncore_online(cpu, amd_uncore_l2);
|
uncore_online(cpu, amd_uncore_l2);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void uncore_down_prepare(unsigned int cpu,
|
static void uncore_down_prepare(unsigned int cpu,
|
||||||
@ -433,13 +437,15 @@ static void uncore_down_prepare(unsigned int cpu,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_uncore_cpu_down_prepare(unsigned int cpu)
|
static int amd_uncore_cpu_down_prepare(unsigned int cpu)
|
||||||
{
|
{
|
||||||
if (amd_uncore_nb)
|
if (amd_uncore_nb)
|
||||||
uncore_down_prepare(cpu, amd_uncore_nb);
|
uncore_down_prepare(cpu, amd_uncore_nb);
|
||||||
|
|
||||||
if (amd_uncore_l2)
|
if (amd_uncore_l2)
|
||||||
uncore_down_prepare(cpu, amd_uncore_l2);
|
uncore_down_prepare(cpu, amd_uncore_l2);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
|
static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
|
||||||
@ -454,74 +460,19 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
|
|||||||
*per_cpu_ptr(uncores, cpu) = NULL;
|
*per_cpu_ptr(uncores, cpu) = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_uncore_cpu_dead(unsigned int cpu)
|
static int amd_uncore_cpu_dead(unsigned int cpu)
|
||||||
{
|
{
|
||||||
if (amd_uncore_nb)
|
if (amd_uncore_nb)
|
||||||
uncore_dead(cpu, amd_uncore_nb);
|
uncore_dead(cpu, amd_uncore_nb);
|
||||||
|
|
||||||
if (amd_uncore_l2)
|
if (amd_uncore_l2)
|
||||||
uncore_dead(cpu, amd_uncore_l2);
|
uncore_dead(cpu, amd_uncore_l2);
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
return 0;
|
||||||
amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
|
|
||||||
void *hcpu)
|
|
||||||
{
|
|
||||||
unsigned int cpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
if (amd_uncore_cpu_up_prepare(cpu))
|
|
||||||
return notifier_from_errno(-ENOMEM);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_STARTING:
|
|
||||||
amd_uncore_cpu_starting(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_ONLINE:
|
|
||||||
amd_uncore_cpu_online(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
amd_uncore_cpu_down_prepare(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_UP_CANCELED:
|
|
||||||
case CPU_DEAD:
|
|
||||||
amd_uncore_cpu_dead(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block amd_uncore_cpu_notifier_block = {
|
|
||||||
.notifier_call = amd_uncore_cpu_notifier,
|
|
||||||
.priority = CPU_PRI_PERF + 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
static void __init init_cpu_already_online(void *dummy)
|
|
||||||
{
|
|
||||||
unsigned int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
amd_uncore_cpu_starting(cpu);
|
|
||||||
amd_uncore_cpu_online(cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cleanup_cpu_online(void *dummy)
|
|
||||||
{
|
|
||||||
unsigned int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
amd_uncore_cpu_dead(cpu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init amd_uncore_init(void)
|
static int __init amd_uncore_init(void)
|
||||||
{
|
{
|
||||||
unsigned int cpu, cpu2;
|
|
||||||
int ret = -ENODEV;
|
int ret = -ENODEV;
|
||||||
|
|
||||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||||
@ -558,38 +509,29 @@ static int __init amd_uncore_init(void)
|
|||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret)
|
/*
|
||||||
goto fail_nodev;
|
* Install callbacks. Core will call them for each online cpu.
|
||||||
|
*/
|
||||||
cpu_notifier_register_begin();
|
if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
|
||||||
|
"PERF_X86_AMD_UNCORE_PREP",
|
||||||
/* init cpus already online before registering for hotplug notifier */
|
amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
|
||||||
for_each_online_cpu(cpu) {
|
goto fail_l2;
|
||||||
ret = amd_uncore_cpu_up_prepare(cpu);
|
|
||||||
if (ret)
|
|
||||||
goto fail_online;
|
|
||||||
smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
__register_cpu_notifier(&amd_uncore_cpu_notifier_block);
|
|
||||||
cpu_notifier_register_done();
|
|
||||||
|
|
||||||
|
if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
|
||||||
|
"AP_PERF_X86_AMD_UNCORE_STARTING",
|
||||||
|
amd_uncore_cpu_starting, NULL))
|
||||||
|
goto fail_prep;
|
||||||
|
if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
|
||||||
|
"AP_PERF_X86_AMD_UNCORE_ONLINE",
|
||||||
|
amd_uncore_cpu_online,
|
||||||
|
amd_uncore_cpu_down_prepare))
|
||||||
|
goto fail_start;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
fail_start:
|
||||||
fail_online:
|
cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
|
||||||
for_each_online_cpu(cpu2) {
|
fail_prep:
|
||||||
if (cpu2 == cpu)
|
cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
|
||||||
break;
|
|
||||||
smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1);
|
|
||||||
}
|
|
||||||
cpu_notifier_register_done();
|
|
||||||
|
|
||||||
/* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
|
|
||||||
amd_uncore_nb = amd_uncore_l2 = NULL;
|
|
||||||
|
|
||||||
if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
|
|
||||||
perf_pmu_unregister(&amd_l2_pmu);
|
|
||||||
fail_l2:
|
fail_l2:
|
||||||
if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
|
if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
|
||||||
perf_pmu_unregister(&amd_nb_pmu);
|
perf_pmu_unregister(&amd_nb_pmu);
|
||||||
|
@ -1477,49 +1477,49 @@ NOKPROBE_SYMBOL(perf_event_nmi_handler);
|
|||||||
struct event_constraint emptyconstraint;
|
struct event_constraint emptyconstraint;
|
||||||
struct event_constraint unconstrained;
|
struct event_constraint unconstrained;
|
||||||
|
|
||||||
static int
|
static int x86_pmu_prepare_cpu(unsigned int cpu)
|
||||||
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
unsigned int cpu = (long)hcpu;
|
|
||||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||||
int i, ret = NOTIFY_OK;
|
int i;
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
|
for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
|
||||||
cpuc->kfree_on_online[i] = NULL;
|
cpuc->kfree_on_online[i] = NULL;
|
||||||
if (x86_pmu.cpu_prepare)
|
if (x86_pmu.cpu_prepare)
|
||||||
ret = x86_pmu.cpu_prepare(cpu);
|
return x86_pmu.cpu_prepare(cpu);
|
||||||
break;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
case CPU_STARTING:
|
static int x86_pmu_dead_cpu(unsigned int cpu)
|
||||||
if (x86_pmu.cpu_starting)
|
{
|
||||||
x86_pmu.cpu_starting(cpu);
|
if (x86_pmu.cpu_dead)
|
||||||
break;
|
x86_pmu.cpu_dead(cpu);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int x86_pmu_online_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||||
|
int i;
|
||||||
|
|
||||||
case CPU_ONLINE:
|
|
||||||
for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
|
for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
|
||||||
kfree(cpuc->kfree_on_online[i]);
|
kfree(cpuc->kfree_on_online[i]);
|
||||||
cpuc->kfree_on_online[i] = NULL;
|
cpuc->kfree_on_online[i] = NULL;
|
||||||
}
|
}
|
||||||
break;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
case CPU_DYING:
|
static int x86_pmu_starting_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
if (x86_pmu.cpu_starting)
|
||||||
|
x86_pmu.cpu_starting(cpu);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int x86_pmu_dying_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
if (x86_pmu.cpu_dying)
|
if (x86_pmu.cpu_dying)
|
||||||
x86_pmu.cpu_dying(cpu);
|
x86_pmu.cpu_dying(cpu);
|
||||||
break;
|
return 0;
|
||||||
|
|
||||||
case CPU_UP_CANCELED:
|
|
||||||
case CPU_DEAD:
|
|
||||||
if (x86_pmu.cpu_dead)
|
|
||||||
x86_pmu.cpu_dead(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init pmu_check_apic(void)
|
static void __init pmu_check_apic(void)
|
||||||
@ -1787,10 +1787,39 @@ static int __init init_hw_perf_events(void)
|
|||||||
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
|
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
|
||||||
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
|
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
|
||||||
|
|
||||||
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
/*
|
||||||
perf_cpu_notifier(x86_pmu_notifier);
|
* Install callbacks. Core will call them for each online
|
||||||
|
* cpu.
|
||||||
|
*/
|
||||||
|
err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE",
|
||||||
|
x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
|
||||||
|
"AP_PERF_X86_STARTING", x86_pmu_starting_cpu,
|
||||||
|
x86_pmu_dying_cpu);
|
||||||
|
if (err)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE",
|
||||||
|
x86_pmu_online_cpu, NULL);
|
||||||
|
if (err)
|
||||||
|
goto out1;
|
||||||
|
|
||||||
|
err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
||||||
|
if (err)
|
||||||
|
goto out2;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out2:
|
||||||
|
cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE);
|
||||||
|
out1:
|
||||||
|
cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING);
|
||||||
|
out:
|
||||||
|
cpuhp_remove_state(CPUHP_PERF_X86_PREPARE);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
early_initcall(init_hw_perf_events);
|
early_initcall(init_hw_perf_events);
|
||||||
|
|
||||||
|
@ -3109,7 +3109,7 @@ static int intel_pmu_cpu_prepare(int cpu)
|
|||||||
cpuc->excl_thread_id = 0;
|
cpuc->excl_thread_id = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return NOTIFY_OK;
|
return 0;
|
||||||
|
|
||||||
err_constraint_list:
|
err_constraint_list:
|
||||||
kfree(cpuc->constraint_list);
|
kfree(cpuc->constraint_list);
|
||||||
@ -3120,7 +3120,7 @@ err_shared_regs:
|
|||||||
cpuc->shared_regs = NULL;
|
cpuc->shared_regs = NULL;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
return NOTIFY_BAD;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_cpu_starting(int cpu)
|
static void intel_pmu_cpu_starting(int cpu)
|
||||||
|
@ -1577,7 +1577,7 @@ static inline void cqm_pick_event_reader(int cpu)
|
|||||||
cpumask_set_cpu(cpu, &cqm_cpumask);
|
cpumask_set_cpu(cpu, &cqm_cpumask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_cqm_cpu_starting(unsigned int cpu)
|
static int intel_cqm_cpu_starting(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
|
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
|
||||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||||
@ -1588,39 +1588,26 @@ static void intel_cqm_cpu_starting(unsigned int cpu)
|
|||||||
|
|
||||||
WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
|
WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
|
||||||
WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
|
WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
|
||||||
|
|
||||||
|
cqm_pick_event_reader(cpu);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_cqm_cpu_exit(unsigned int cpu)
|
static int intel_cqm_cpu_exit(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int target;
|
int target;
|
||||||
|
|
||||||
/* Is @cpu the current cqm reader for this package ? */
|
/* Is @cpu the current cqm reader for this package ? */
|
||||||
if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask))
|
if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask))
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
/* Find another online reader in this package */
|
/* Find another online reader in this package */
|
||||||
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
|
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
|
||||||
|
|
||||||
if (target < nr_cpu_ids)
|
if (target < nr_cpu_ids)
|
||||||
cpumask_set_cpu(target, &cqm_cpumask);
|
cpumask_set_cpu(target, &cqm_cpumask);
|
||||||
}
|
|
||||||
|
|
||||||
static int intel_cqm_cpu_notifier(struct notifier_block *nb,
|
return 0;
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
unsigned int cpu = (unsigned long)hcpu;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
intel_cqm_cpu_exit(cpu);
|
|
||||||
break;
|
|
||||||
case CPU_STARTING:
|
|
||||||
intel_cqm_cpu_starting(cpu);
|
|
||||||
cqm_pick_event_reader(cpu);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct x86_cpu_id intel_cqm_match[] = {
|
static const struct x86_cpu_id intel_cqm_match[] = {
|
||||||
@ -1682,7 +1669,7 @@ out:
|
|||||||
static int __init intel_cqm_init(void)
|
static int __init intel_cqm_init(void)
|
||||||
{
|
{
|
||||||
char *str = NULL, scale[20];
|
char *str = NULL, scale[20];
|
||||||
int i, cpu, ret;
|
int cpu, ret;
|
||||||
|
|
||||||
if (x86_match_cpu(intel_cqm_match))
|
if (x86_match_cpu(intel_cqm_match))
|
||||||
cqm_enabled = true;
|
cqm_enabled = true;
|
||||||
@ -1705,8 +1692,7 @@ static int __init intel_cqm_init(void)
|
|||||||
*
|
*
|
||||||
* Also, check that the scales match on all cpus.
|
* Also, check that the scales match on all cpus.
|
||||||
*/
|
*/
|
||||||
cpu_notifier_register_begin();
|
get_online_cpus();
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||||
|
|
||||||
@ -1743,11 +1729,6 @@ static int __init intel_cqm_init(void)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
for_each_online_cpu(i) {
|
|
||||||
intel_cqm_cpu_starting(i);
|
|
||||||
cqm_pick_event_reader(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mbm_enabled)
|
if (mbm_enabled)
|
||||||
ret = intel_mbm_init();
|
ret = intel_mbm_init();
|
||||||
if (ret && !cqm_enabled)
|
if (ret && !cqm_enabled)
|
||||||
@ -1772,12 +1753,18 @@ static int __init intel_cqm_init(void)
|
|||||||
pr_info("Intel MBM enabled\n");
|
pr_info("Intel MBM enabled\n");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register the hot cpu notifier once we are sure cqm
|
* Setup the hot cpu notifier once we are sure cqm
|
||||||
* is enabled to avoid notifier leak.
|
* is enabled to avoid notifier leak.
|
||||||
*/
|
*/
|
||||||
__perf_cpu_notifier(intel_cqm_cpu_notifier);
|
cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING,
|
||||||
|
"AP_PERF_X86_CQM_STARTING",
|
||||||
|
intel_cqm_cpu_starting, NULL);
|
||||||
|
cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "AP_PERF_X86_CQM_ONLINE",
|
||||||
|
NULL, intel_cqm_cpu_exit);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
cpu_notifier_register_done();
|
put_online_cpus();
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kfree(str);
|
kfree(str);
|
||||||
cqm_cleanup();
|
cqm_cleanup();
|
||||||
|
@ -366,7 +366,7 @@ static int cstate_pmu_event_add(struct perf_event *event, int mode)
|
|||||||
* Check if exiting cpu is the designated reader. If so migrate the
|
* Check if exiting cpu is the designated reader. If so migrate the
|
||||||
* events when there is a valid target available
|
* events when there is a valid target available
|
||||||
*/
|
*/
|
||||||
static void cstate_cpu_exit(int cpu)
|
static int cstate_cpu_exit(unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned int target;
|
unsigned int target;
|
||||||
|
|
||||||
@ -391,9 +391,10 @@ static void cstate_cpu_exit(int cpu)
|
|||||||
perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
|
perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cstate_cpu_init(int cpu)
|
static int cstate_cpu_init(unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned int target;
|
unsigned int target;
|
||||||
|
|
||||||
@ -415,31 +416,10 @@ static void cstate_cpu_init(int cpu)
|
|||||||
topology_core_cpumask(cpu));
|
topology_core_cpumask(cpu));
|
||||||
if (has_cstate_pkg && target >= nr_cpu_ids)
|
if (has_cstate_pkg && target >= nr_cpu_ids)
|
||||||
cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
|
cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cstate_cpu_notifier(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
unsigned int cpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
cstate_cpu_init(cpu);
|
|
||||||
break;
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
cstate_cpu_exit(cpu);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block cstate_cpu_nb = {
|
|
||||||
.notifier_call = cstate_cpu_notifier,
|
|
||||||
.priority = CPU_PRI_PERF + 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct pmu cstate_core_pmu = {
|
static struct pmu cstate_core_pmu = {
|
||||||
.attr_groups = core_attr_groups,
|
.attr_groups = core_attr_groups,
|
||||||
.name = "cstate_core",
|
.name = "cstate_core",
|
||||||
@ -600,18 +580,20 @@ static inline void cstate_cleanup(void)
|
|||||||
|
|
||||||
static int __init cstate_init(void)
|
static int __init cstate_init(void)
|
||||||
{
|
{
|
||||||
int cpu, err;
|
int err;
|
||||||
|
|
||||||
cpu_notifier_register_begin();
|
cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
|
||||||
for_each_online_cpu(cpu)
|
"AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init,
|
||||||
cstate_cpu_init(cpu);
|
NULL);
|
||||||
|
cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
|
||||||
|
"AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit);
|
||||||
|
|
||||||
if (has_cstate_core) {
|
if (has_cstate_core) {
|
||||||
err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
|
err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
|
||||||
if (err) {
|
if (err) {
|
||||||
has_cstate_core = false;
|
has_cstate_core = false;
|
||||||
pr_info("Failed to register cstate core pmu\n");
|
pr_info("Failed to register cstate core pmu\n");
|
||||||
goto out;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -621,12 +603,10 @@ static int __init cstate_init(void)
|
|||||||
has_cstate_pkg = false;
|
has_cstate_pkg = false;
|
||||||
pr_info("Failed to register cstate pkg pmu\n");
|
pr_info("Failed to register cstate pkg pmu\n");
|
||||||
cstate_cleanup();
|
cstate_cleanup();
|
||||||
goto out;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
__register_cpu_notifier(&cstate_cpu_nb);
|
|
||||||
out:
|
|
||||||
cpu_notifier_register_done();
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -652,9 +632,8 @@ module_init(cstate_pmu_init);
|
|||||||
|
|
||||||
static void __exit cstate_pmu_exit(void)
|
static void __exit cstate_pmu_exit(void)
|
||||||
{
|
{
|
||||||
cpu_notifier_register_begin();
|
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
|
||||||
__unregister_cpu_notifier(&cstate_cpu_nb);
|
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
|
||||||
cstate_cleanup();
|
cstate_cleanup();
|
||||||
cpu_notifier_register_done();
|
|
||||||
}
|
}
|
||||||
module_exit(cstate_pmu_exit);
|
module_exit(cstate_pmu_exit);
|
||||||
|
@ -556,14 +556,14 @@ const struct attribute_group *rapl_attr_groups[] = {
|
|||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void rapl_cpu_exit(int cpu)
|
static int rapl_cpu_offline(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||||
int target;
|
int target;
|
||||||
|
|
||||||
/* Check if exiting cpu is used for collecting rapl events */
|
/* Check if exiting cpu is used for collecting rapl events */
|
||||||
if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
|
if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
pmu->cpu = -1;
|
pmu->cpu = -1;
|
||||||
/* Find a new cpu to collect rapl events */
|
/* Find a new cpu to collect rapl events */
|
||||||
@ -575,9 +575,10 @@ static void rapl_cpu_exit(int cpu)
|
|||||||
pmu->cpu = target;
|
pmu->cpu = target;
|
||||||
perf_pmu_migrate_context(pmu->pmu, cpu, target);
|
perf_pmu_migrate_context(pmu->pmu, cpu, target);
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rapl_cpu_init(int cpu)
|
static int rapl_cpu_online(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||||
int target;
|
int target;
|
||||||
@ -588,13 +589,14 @@ static void rapl_cpu_init(int cpu)
|
|||||||
*/
|
*/
|
||||||
target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
|
target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
|
||||||
if (target < nr_cpu_ids)
|
if (target < nr_cpu_ids)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
cpumask_set_cpu(cpu, &rapl_cpu_mask);
|
cpumask_set_cpu(cpu, &rapl_cpu_mask);
|
||||||
pmu->cpu = cpu;
|
pmu->cpu = cpu;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rapl_cpu_prepare(int cpu)
|
static int rapl_cpu_prepare(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||||
|
|
||||||
@ -615,33 +617,6 @@ static int rapl_cpu_prepare(int cpu)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rapl_cpu_notifier(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
unsigned int cpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
rapl_cpu_prepare(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_DOWN_FAILED:
|
|
||||||
case CPU_ONLINE:
|
|
||||||
rapl_cpu_init(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
rapl_cpu_exit(cpu);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block rapl_cpu_nb = {
|
|
||||||
.notifier_call = rapl_cpu_notifier,
|
|
||||||
.priority = CPU_PRI_PERF + 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int rapl_check_hw_unit(bool apply_quirk)
|
static int rapl_check_hw_unit(bool apply_quirk)
|
||||||
{
|
{
|
||||||
u64 msr_rapl_power_unit_bits;
|
u64 msr_rapl_power_unit_bits;
|
||||||
@ -692,24 +667,6 @@ static void __init rapl_advertise(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init rapl_prepare_cpus(void)
|
|
||||||
{
|
|
||||||
unsigned int cpu, pkg;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
pkg = topology_logical_package_id(cpu);
|
|
||||||
if (rapl_pmus->pmus[pkg])
|
|
||||||
continue;
|
|
||||||
|
|
||||||
ret = rapl_cpu_prepare(cpu);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
rapl_cpu_init(cpu);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cleanup_rapl_pmus(void)
|
static void cleanup_rapl_pmus(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
@ -837,35 +794,44 @@ static int __init rapl_pmu_init(void)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
cpu_notifier_register_begin();
|
/*
|
||||||
|
* Install callbacks. Core will call them for each online cpu.
|
||||||
|
*/
|
||||||
|
|
||||||
ret = rapl_prepare_cpus();
|
ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP",
|
||||||
|
rapl_cpu_prepare, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
|
||||||
|
"AP_PERF_X86_RAPL_ONLINE",
|
||||||
|
rapl_cpu_online, rapl_cpu_offline);
|
||||||
|
if (ret)
|
||||||
|
goto out1;
|
||||||
|
|
||||||
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
|
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out2;
|
||||||
|
|
||||||
__register_cpu_notifier(&rapl_cpu_nb);
|
|
||||||
cpu_notifier_register_done();
|
|
||||||
rapl_advertise();
|
rapl_advertise();
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out2:
|
||||||
|
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
||||||
|
out1:
|
||||||
|
cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
|
||||||
out:
|
out:
|
||||||
pr_warn("Initialization failed (%d), disabled\n", ret);
|
pr_warn("Initialization failed (%d), disabled\n", ret);
|
||||||
cleanup_rapl_pmus();
|
cleanup_rapl_pmus();
|
||||||
cpu_notifier_register_done();
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
module_init(rapl_pmu_init);
|
module_init(rapl_pmu_init);
|
||||||
|
|
||||||
static void __exit intel_rapl_exit(void)
|
static void __exit intel_rapl_exit(void)
|
||||||
{
|
{
|
||||||
cpu_notifier_register_begin();
|
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
||||||
__unregister_cpu_notifier(&rapl_cpu_nb);
|
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
|
||||||
perf_pmu_unregister(&rapl_pmus->pmu);
|
perf_pmu_unregister(&rapl_pmus->pmu);
|
||||||
cleanup_rapl_pmus();
|
cleanup_rapl_pmus();
|
||||||
cpu_notifier_register_done();
|
|
||||||
}
|
}
|
||||||
module_exit(intel_rapl_exit);
|
module_exit(intel_rapl_exit);
|
||||||
|
@ -1052,7 +1052,7 @@ static void uncore_pci_exit(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void uncore_cpu_dying(int cpu)
|
static int uncore_cpu_dying(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||||
struct intel_uncore_pmu *pmu;
|
struct intel_uncore_pmu *pmu;
|
||||||
@ -1069,16 +1069,19 @@ static void uncore_cpu_dying(int cpu)
|
|||||||
uncore_box_exit(box);
|
uncore_box_exit(box);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void uncore_cpu_starting(int cpu, bool init)
|
static int first_init;
|
||||||
|
|
||||||
|
static int uncore_cpu_starting(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||||
struct intel_uncore_pmu *pmu;
|
struct intel_uncore_pmu *pmu;
|
||||||
struct intel_uncore_box *box;
|
struct intel_uncore_box *box;
|
||||||
int i, pkg, ncpus = 1;
|
int i, pkg, ncpus = 1;
|
||||||
|
|
||||||
if (init) {
|
if (first_init) {
|
||||||
/*
|
/*
|
||||||
* On init we get the number of online cpus in the package
|
* On init we get the number of online cpus in the package
|
||||||
* and set refcount for all of them.
|
* and set refcount for all of them.
|
||||||
@ -1099,9 +1102,11 @@ static void uncore_cpu_starting(int cpu, bool init)
|
|||||||
uncore_box_init(box);
|
uncore_box_init(box);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int uncore_cpu_prepare(int cpu)
|
static int uncore_cpu_prepare(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||||
struct intel_uncore_pmu *pmu;
|
struct intel_uncore_pmu *pmu;
|
||||||
@ -1164,13 +1169,13 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
|
|||||||
uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
|
uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void uncore_event_exit_cpu(int cpu)
|
static int uncore_event_cpu_offline(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int target;
|
int target;
|
||||||
|
|
||||||
/* Check if exiting cpu is used for collecting uncore events */
|
/* Check if exiting cpu is used for collecting uncore events */
|
||||||
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
|
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
/* Find a new cpu to collect uncore events */
|
/* Find a new cpu to collect uncore events */
|
||||||
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
|
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
|
||||||
@ -1183,9 +1188,10 @@ static void uncore_event_exit_cpu(int cpu)
|
|||||||
|
|
||||||
uncore_change_context(uncore_msr_uncores, cpu, target);
|
uncore_change_context(uncore_msr_uncores, cpu, target);
|
||||||
uncore_change_context(uncore_pci_uncores, cpu, target);
|
uncore_change_context(uncore_pci_uncores, cpu, target);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void uncore_event_init_cpu(int cpu)
|
static int uncore_event_cpu_online(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int target;
|
int target;
|
||||||
|
|
||||||
@ -1195,50 +1201,15 @@ static void uncore_event_init_cpu(int cpu)
|
|||||||
*/
|
*/
|
||||||
target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
|
target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
|
||||||
if (target < nr_cpu_ids)
|
if (target < nr_cpu_ids)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
cpumask_set_cpu(cpu, &uncore_cpu_mask);
|
cpumask_set_cpu(cpu, &uncore_cpu_mask);
|
||||||
|
|
||||||
uncore_change_context(uncore_msr_uncores, -1, cpu);
|
uncore_change_context(uncore_msr_uncores, -1, cpu);
|
||||||
uncore_change_context(uncore_pci_uncores, -1, cpu);
|
uncore_change_context(uncore_pci_uncores, -1, cpu);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int uncore_cpu_notifier(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
unsigned int cpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
return notifier_from_errno(uncore_cpu_prepare(cpu));
|
|
||||||
|
|
||||||
case CPU_STARTING:
|
|
||||||
uncore_cpu_starting(cpu, false);
|
|
||||||
case CPU_DOWN_FAILED:
|
|
||||||
uncore_event_init_cpu(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_UP_CANCELED:
|
|
||||||
case CPU_DYING:
|
|
||||||
uncore_cpu_dying(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
uncore_event_exit_cpu(cpu);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block uncore_cpu_nb = {
|
|
||||||
.notifier_call = uncore_cpu_notifier,
|
|
||||||
/*
|
|
||||||
* to migrate uncore events, our notifier should be executed
|
|
||||||
* before perf core's notifier.
|
|
||||||
*/
|
|
||||||
.priority = CPU_PRI_PERF + 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init type_pmu_register(struct intel_uncore_type *type)
|
static int __init type_pmu_register(struct intel_uncore_type *type)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int i, ret;
|
||||||
@ -1282,41 +1253,6 @@ err:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init uncore_cpu_setup(void *dummy)
|
|
||||||
{
|
|
||||||
uncore_cpu_starting(smp_processor_id(), true);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Lazy to avoid allocation of a few bytes for the normal case */
|
|
||||||
static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);
|
|
||||||
|
|
||||||
static int __init uncore_cpumask_init(bool msr)
|
|
||||||
{
|
|
||||||
unsigned int cpu;
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
unsigned int pkg = topology_logical_package_id(cpu);
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (test_and_set_bit(pkg, packages))
|
|
||||||
continue;
|
|
||||||
/*
|
|
||||||
* The first online cpu of each package allocates and takes
|
|
||||||
* the refcounts for all other online cpus in that package.
|
|
||||||
* If msrs are not enabled no allocation is required.
|
|
||||||
*/
|
|
||||||
if (msr) {
|
|
||||||
ret = uncore_cpu_prepare(cpu);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
uncore_event_init_cpu(cpu);
|
|
||||||
smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
|
|
||||||
}
|
|
||||||
__register_cpu_notifier(&uncore_cpu_nb);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define X86_UNCORE_MODEL_MATCH(model, init) \
|
#define X86_UNCORE_MODEL_MATCH(model, init) \
|
||||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
|
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
|
||||||
|
|
||||||
@ -1440,11 +1376,33 @@ static int __init intel_uncore_init(void)
|
|||||||
if (cret && pret)
|
if (cret && pret)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
cpu_notifier_register_begin();
|
/*
|
||||||
ret = uncore_cpumask_init(!cret);
|
* Install callbacks. Core will call them for each online cpu.
|
||||||
|
*
|
||||||
|
* The first online cpu of each package allocates and takes
|
||||||
|
* the refcounts for all other online cpus in that package.
|
||||||
|
* If msrs are not enabled no allocation is required and
|
||||||
|
* uncore_cpu_prepare() is not called for each online cpu.
|
||||||
|
*/
|
||||||
|
if (!cret) {
|
||||||
|
ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
|
||||||
|
"PERF_X86_UNCORE_PREP",
|
||||||
|
uncore_cpu_prepare, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
cpu_notifier_register_done();
|
} else {
|
||||||
|
cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
|
||||||
|
"PERF_X86_UNCORE_PREP",
|
||||||
|
uncore_cpu_prepare, NULL);
|
||||||
|
}
|
||||||
|
first_init = 1;
|
||||||
|
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
|
||||||
|
"AP_PERF_X86_UNCORE_STARTING",
|
||||||
|
uncore_cpu_starting, uncore_cpu_dying);
|
||||||
|
first_init = 0;
|
||||||
|
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
||||||
|
"AP_PERF_X86_UNCORE_ONLINE",
|
||||||
|
uncore_event_cpu_online, uncore_event_cpu_offline);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
@ -1452,17 +1410,16 @@ err:
|
|||||||
on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
|
on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
|
||||||
uncore_types_exit(uncore_msr_uncores);
|
uncore_types_exit(uncore_msr_uncores);
|
||||||
uncore_pci_exit();
|
uncore_pci_exit();
|
||||||
cpu_notifier_register_done();
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
module_init(intel_uncore_init);
|
module_init(intel_uncore_init);
|
||||||
|
|
||||||
static void __exit intel_uncore_exit(void)
|
static void __exit intel_uncore_exit(void)
|
||||||
{
|
{
|
||||||
cpu_notifier_register_begin();
|
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
||||||
__unregister_cpu_notifier(&uncore_cpu_nb);
|
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
|
||||||
|
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
|
||||||
uncore_types_exit(uncore_msr_uncores);
|
uncore_types_exit(uncore_msr_uncores);
|
||||||
uncore_pci_exit();
|
uncore_pci_exit();
|
||||||
cpu_notifier_register_done();
|
|
||||||
}
|
}
|
||||||
module_exit(intel_uncore_exit);
|
module_exit(intel_uncore_exit);
|
||||||
|
@ -215,26 +215,18 @@ void apbt_setup_secondary_clock(void)
|
|||||||
* cpu timers during the offline process due to the ordering of notification.
|
* cpu timers during the offline process due to the ordering of notification.
|
||||||
* the extra interrupt is harmless.
|
* the extra interrupt is harmless.
|
||||||
*/
|
*/
|
||||||
static int apbt_cpuhp_notify(struct notifier_block *n,
|
static int apbt_cpu_dead(unsigned int cpu)
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
unsigned long cpu = (unsigned long)hcpu;
|
|
||||||
struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
|
struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_DEAD:
|
|
||||||
dw_apb_clockevent_pause(adev->timer);
|
dw_apb_clockevent_pause(adev->timer);
|
||||||
if (system_state == SYSTEM_RUNNING) {
|
if (system_state == SYSTEM_RUNNING) {
|
||||||
pr_debug("skipping APBT CPU %lu offline\n", cpu);
|
pr_debug("skipping APBT CPU %u offline\n", cpu);
|
||||||
} else {
|
} else {
|
||||||
pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
|
pr_debug("APBT clockevent for cpu %u offline\n", cpu);
|
||||||
dw_apb_clockevent_stop(adev->timer);
|
dw_apb_clockevent_stop(adev->timer);
|
||||||
}
|
}
|
||||||
break;
|
return 0;
|
||||||
default:
|
|
||||||
pr_debug("APBT notified %lu, no action\n", action);
|
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init int apbt_late_init(void)
|
static __init int apbt_late_init(void)
|
||||||
@ -242,9 +234,8 @@ static __init int apbt_late_init(void)
|
|||||||
if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
|
if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
|
||||||
!apb_timer_block_enabled)
|
!apb_timer_block_enabled)
|
||||||
return 0;
|
return 0;
|
||||||
/* This notifier should be called after workqueue is ready */
|
return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "X86_APB_DEAD", NULL,
|
||||||
hotcpu_notifier(apbt_cpuhp_notify, -20);
|
apbt_cpu_dead);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
fs_initcall(apbt_late_init);
|
fs_initcall(apbt_late_init);
|
||||||
#else
|
#else
|
||||||
|
@ -152,30 +152,26 @@ static void init_x2apic_ldr(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At CPU state changes, update the x2apic cluster sibling info.
|
* At CPU state changes, update the x2apic cluster sibling info.
|
||||||
*/
|
*/
|
||||||
static int
|
int x2apic_prepare_cpu(unsigned int cpu)
|
||||||
update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
unsigned int this_cpu = (unsigned long)hcpu;
|
if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
|
||||||
unsigned int cpu;
|
return -ENOMEM;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
switch (action) {
|
if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) {
|
||||||
case CPU_UP_PREPARE:
|
free_cpumask_var(per_cpu(cpus_in_cluster, cpu));
|
||||||
if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu),
|
return -ENOMEM;
|
||||||
GFP_KERNEL)) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
} else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu),
|
|
||||||
GFP_KERNEL)) {
|
|
||||||
free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
|
|
||||||
err = -ENOMEM;
|
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
case CPU_UP_CANCELED:
|
return 0;
|
||||||
case CPU_UP_CANCELED_FROZEN:
|
}
|
||||||
case CPU_DEAD:
|
|
||||||
|
int x2apic_dead_cpu(unsigned int this_cpu)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
|
if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
|
||||||
continue;
|
continue;
|
||||||
@ -184,36 +180,20 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|||||||
}
|
}
|
||||||
free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
|
free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
|
||||||
free_cpumask_var(per_cpu(ipi_mask, this_cpu));
|
free_cpumask_var(per_cpu(ipi_mask, this_cpu));
|
||||||
break;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
return notifier_from_errno(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block x2apic_cpu_notifier = {
|
|
||||||
.notifier_call = update_clusterinfo,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int x2apic_init_cpu_notifier(void)
|
|
||||||
{
|
|
||||||
int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL);
|
|
||||||
zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL);
|
|
||||||
|
|
||||||
BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
|
|
||||||
|
|
||||||
cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
|
|
||||||
register_hotcpu_notifier(&x2apic_cpu_notifier);
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int x2apic_cluster_probe(void)
|
static int x2apic_cluster_probe(void)
|
||||||
{
|
{
|
||||||
if (x2apic_mode)
|
int cpu = smp_processor_id();
|
||||||
return x2apic_init_cpu_notifier();
|
|
||||||
else
|
if (!x2apic_mode)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
|
||||||
|
cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
|
||||||
|
x2apic_prepare_cpu, x2apic_dead_cpu);
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct cpumask *x2apic_cluster_target_cpus(void)
|
static const struct cpumask *x2apic_cluster_target_cpus(void)
|
||||||
|
@ -710,31 +710,29 @@ static void hpet_work(struct work_struct *w)
|
|||||||
complete(&hpet_work->complete);
|
complete(&hpet_work->complete);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hpet_cpuhp_notify(struct notifier_block *n,
|
static int hpet_cpuhp_online(unsigned int cpu)
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
unsigned long cpu = (unsigned long)hcpu;
|
|
||||||
struct hpet_work_struct work;
|
struct hpet_work_struct work;
|
||||||
struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_ONLINE:
|
|
||||||
INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
|
INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
|
||||||
init_completion(&work.complete);
|
init_completion(&work.complete);
|
||||||
/* FIXME: add schedule_work_on() */
|
/* FIXME: add schedule_work_on() */
|
||||||
schedule_delayed_work_on(cpu, &work.work, 0);
|
schedule_delayed_work_on(cpu, &work.work, 0);
|
||||||
wait_for_completion(&work.complete);
|
wait_for_completion(&work.complete);
|
||||||
destroy_delayed_work_on_stack(&work.work);
|
destroy_delayed_work_on_stack(&work.work);
|
||||||
break;
|
return 0;
|
||||||
case CPU_DEAD:
|
}
|
||||||
if (hdev) {
|
|
||||||
|
static int hpet_cpuhp_dead(unsigned int cpu)
|
||||||
|
{
|
||||||
|
struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
|
||||||
|
|
||||||
|
if (!hdev)
|
||||||
|
return 0;
|
||||||
free_irq(hdev->irq, hdev);
|
free_irq(hdev->irq, hdev);
|
||||||
hdev->flags &= ~HPET_DEV_USED;
|
hdev->flags &= ~HPET_DEV_USED;
|
||||||
per_cpu(cpu_hpet_dev, cpu) = NULL;
|
per_cpu(cpu_hpet_dev, cpu) = NULL;
|
||||||
}
|
return 0;
|
||||||
break;
|
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
|
||||||
@ -750,11 +748,8 @@ static void hpet_reserve_msi_timers(struct hpet_data *hd)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int hpet_cpuhp_notify(struct notifier_block *n,
|
#define hpet_cpuhp_online NULL
|
||||||
unsigned long action, void *hcpu)
|
#define hpet_cpuhp_dead NULL
|
||||||
{
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -931,7 +926,7 @@ out_nohpet:
|
|||||||
*/
|
*/
|
||||||
static __init int hpet_late_init(void)
|
static __init int hpet_late_init(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
int ret;
|
||||||
|
|
||||||
if (boot_hpet_disable)
|
if (boot_hpet_disable)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
@ -961,16 +956,20 @@ static __init int hpet_late_init(void)
|
|||||||
if (boot_cpu_has(X86_FEATURE_ARAT))
|
if (boot_cpu_has(X86_FEATURE_ARAT))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cpu_notifier_register_begin();
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This notifier should be called after workqueue is ready */
|
/* This notifier should be called after workqueue is ready */
|
||||||
__hotcpu_notifier(hpet_cpuhp_notify, -20);
|
ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "AP_X86_HPET_ONLINE",
|
||||||
cpu_notifier_register_done();
|
hpet_cpuhp_online, NULL);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "X86_HPET_DEAD", NULL,
|
||||||
|
hpet_cpuhp_dead);
|
||||||
|
if (ret)
|
||||||
|
goto err_cpuhp;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_cpuhp:
|
||||||
|
cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
fs_initcall(hpet_late_init);
|
fs_initcall(hpet_late_init);
|
||||||
|
|
||||||
|
@ -323,25 +323,16 @@ static int tboot_wait_for_aps(int num_aps)
|
|||||||
return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
|
return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
|
static int tboot_dying_cpu(unsigned int cpu)
|
||||||
void *hcpu)
|
|
||||||
{
|
{
|
||||||
switch (action) {
|
|
||||||
case CPU_DYING:
|
|
||||||
atomic_inc(&ap_wfs_count);
|
atomic_inc(&ap_wfs_count);
|
||||||
if (num_online_cpus() == 1)
|
if (num_online_cpus() == 1) {
|
||||||
if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
|
if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
|
||||||
return NOTIFY_BAD;
|
return -EBUSY;
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
return NOTIFY_OK;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block tboot_cpu_notifier =
|
|
||||||
{
|
|
||||||
.notifier_call = tboot_cpu_callback,
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
|
|
||||||
#define TBOOT_LOG_UUID { 0x26, 0x25, 0x19, 0xc0, 0x30, 0x6b, 0xb4, 0x4d, \
|
#define TBOOT_LOG_UUID { 0x26, 0x25, 0x19, 0xc0, 0x30, 0x6b, 0xb4, 0x4d, \
|
||||||
@ -417,8 +408,8 @@ static __init int tboot_late_init(void)
|
|||||||
tboot_create_trampoline();
|
tboot_create_trampoline();
|
||||||
|
|
||||||
atomic_set(&ap_wfs_count, 0);
|
atomic_set(&ap_wfs_count, 0);
|
||||||
register_hotcpu_notifier(&tboot_cpu_notifier);
|
cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "AP_X86_TBOOT_DYING", NULL,
|
||||||
|
tboot_dying_cpu);
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
debugfs_create_file("tboot_log", S_IRUSR,
|
debugfs_create_file("tboot_log", S_IRUSR,
|
||||||
arch_debugfs_dir, NULL, &tboot_log_fops);
|
arch_debugfs_dir, NULL, &tboot_log_fops);
|
||||||
|
@ -5552,9 +5552,10 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
|
EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
|
||||||
|
|
||||||
static void tsc_bad(void *info)
|
static int kvmclock_cpu_down_prep(unsigned int cpu)
|
||||||
{
|
{
|
||||||
__this_cpu_write(cpu_tsc_khz, 0);
|
__this_cpu_write(cpu_tsc_khz, 0);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tsc_khz_changed(void *data)
|
static void tsc_khz_changed(void *data)
|
||||||
@ -5659,35 +5660,18 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
|
|||||||
.notifier_call = kvmclock_cpufreq_notifier
|
.notifier_call = kvmclock_cpufreq_notifier
|
||||||
};
|
};
|
||||||
|
|
||||||
static int kvmclock_cpu_notifier(struct notifier_block *nfb,
|
static int kvmclock_cpu_online(unsigned int cpu)
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
unsigned int cpu = (unsigned long)hcpu;
|
tsc_khz_changed(NULL);
|
||||||
|
return 0;
|
||||||
switch (action) {
|
|
||||||
case CPU_ONLINE:
|
|
||||||
case CPU_DOWN_FAILED:
|
|
||||||
smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
|
|
||||||
break;
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
smp_call_function_single(cpu, tsc_bad, NULL, 1);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block kvmclock_cpu_notifier_block = {
|
|
||||||
.notifier_call = kvmclock_cpu_notifier,
|
|
||||||
.priority = -INT_MAX
|
|
||||||
};
|
|
||||||
|
|
||||||
static void kvm_timer_init(void)
|
static void kvm_timer_init(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
max_tsc_khz = tsc_khz;
|
max_tsc_khz = tsc_khz;
|
||||||
|
|
||||||
cpu_notifier_register_begin();
|
|
||||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
||||||
#ifdef CONFIG_CPU_FREQ
|
#ifdef CONFIG_CPU_FREQ
|
||||||
struct cpufreq_policy policy;
|
struct cpufreq_policy policy;
|
||||||
@ -5702,12 +5686,9 @@ static void kvm_timer_init(void)
|
|||||||
CPUFREQ_TRANSITION_NOTIFIER);
|
CPUFREQ_TRANSITION_NOTIFIER);
|
||||||
}
|
}
|
||||||
pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
|
pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
|
||||||
for_each_online_cpu(cpu)
|
|
||||||
smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
|
|
||||||
|
|
||||||
__register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
|
|
||||||
cpu_notifier_register_done();
|
|
||||||
|
|
||||||
|
cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "AP_X86_KVM_CLK_ONLINE",
|
||||||
|
kvmclock_cpu_online, kvmclock_cpu_down_prep);
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
|
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
|
||||||
@ -5896,7 +5877,7 @@ void kvm_arch_exit(void)
|
|||||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
||||||
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
|
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
|
||||||
CPUFREQ_TRANSITION_NOTIFIER);
|
CPUFREQ_TRANSITION_NOTIFIER);
|
||||||
unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
|
cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
|
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
|
||||||
#endif
|
#endif
|
||||||
|
@ -404,7 +404,7 @@ static struct pmu xtensa_pmu = {
|
|||||||
.read = xtensa_pmu_read,
|
.read = xtensa_pmu_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void xtensa_pmu_setup(void)
|
static int xtensa_pmu_setup(int cpu)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
@ -413,21 +413,7 @@ static void xtensa_pmu_setup(void)
|
|||||||
set_er(0, XTENSA_PMU_PMCTRL(i));
|
set_er(0, XTENSA_PMU_PMCTRL(i));
|
||||||
set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i));
|
set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i));
|
||||||
}
|
}
|
||||||
}
|
return 0;
|
||||||
|
|
||||||
static int xtensa_pmu_notifier(struct notifier_block *self,
|
|
||||||
unsigned long action, void *data)
|
|
||||||
{
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
xtensa_pmu_setup();
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init xtensa_pmu_init(void)
|
static int __init xtensa_pmu_init(void)
|
||||||
@ -435,7 +421,13 @@ static int __init xtensa_pmu_init(void)
|
|||||||
int ret;
|
int ret;
|
||||||
int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
|
int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
|
||||||
|
|
||||||
perf_cpu_notifier(xtensa_pmu_notifier);
|
ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING,
|
||||||
|
"AP_PERF_XTENSA_STARTING", xtensa_pmu_setup,
|
||||||
|
NULL);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("xtensa_pmu: failed to register CPU-hotplug.\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
#if XTENSA_FAKE_NMI
|
#if XTENSA_FAKE_NMI
|
||||||
enable_irq(irq);
|
enable_irq(irq);
|
||||||
#else
|
#else
|
||||||
|
@ -118,12 +118,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
|
|||||||
struct acpi_device *device;
|
struct acpi_device *device;
|
||||||
action &= ~CPU_TASKS_FROZEN;
|
action &= ~CPU_TASKS_FROZEN;
|
||||||
|
|
||||||
/*
|
switch (action) {
|
||||||
* CPU_STARTING and CPU_DYING must not sleep. Return here since
|
case CPU_ONLINE:
|
||||||
* acpi_bus_get_device() may sleep.
|
case CPU_DEAD:
|
||||||
*/
|
break;
|
||||||
if (action == CPU_STARTING || action == CPU_DYING)
|
default:
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
if (!pr || acpi_bus_get_device(pr->handle, &device))
|
if (!pr || acpi_bus_get_device(pr->handle, &device))
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
@ -144,12 +144,15 @@ struct cci_pmu {
|
|||||||
int num_cntrs;
|
int num_cntrs;
|
||||||
atomic_t active_events;
|
atomic_t active_events;
|
||||||
struct mutex reserve_mutex;
|
struct mutex reserve_mutex;
|
||||||
struct notifier_block cpu_nb;
|
struct list_head entry;
|
||||||
cpumask_t cpus;
|
cpumask_t cpus;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
|
#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
|
||||||
|
|
||||||
|
static DEFINE_MUTEX(cci_pmu_mutex);
|
||||||
|
static LIST_HEAD(cci_pmu_list);
|
||||||
|
|
||||||
enum cci_models {
|
enum cci_models {
|
||||||
#ifdef CONFIG_ARM_CCI400_PMU
|
#ifdef CONFIG_ARM_CCI400_PMU
|
||||||
CCI400_R0,
|
CCI400_R0,
|
||||||
@ -1503,31 +1506,26 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
|
|||||||
return perf_pmu_register(&cci_pmu->pmu, name, -1);
|
return perf_pmu_register(&cci_pmu->pmu, name, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cci_pmu_cpu_notifier(struct notifier_block *self,
|
static int cci_pmu_offline_cpu(unsigned int cpu)
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
struct cci_pmu *cci_pmu = container_of(self,
|
struct cci_pmu *cci_pmu;
|
||||||
struct cci_pmu, cpu_nb);
|
|
||||||
unsigned int cpu = (long)hcpu;
|
|
||||||
unsigned int target;
|
unsigned int target;
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
mutex_lock(&cci_pmu_mutex);
|
||||||
case CPU_DOWN_PREPARE:
|
list_for_each_entry(cci_pmu, &cci_pmu_list, entry) {
|
||||||
if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
|
if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
|
||||||
break;
|
continue;
|
||||||
target = cpumask_any_but(cpu_online_mask, cpu);
|
target = cpumask_any_but(cpu_online_mask, cpu);
|
||||||
if (target >= nr_cpu_ids) // UP, last CPU
|
if (target >= nr_cpu_ids)
|
||||||
break;
|
continue;
|
||||||
/*
|
/*
|
||||||
* TODO: migrate context once core races on event->ctx have
|
* TODO: migrate context once core races on event->ctx have
|
||||||
* been fixed.
|
* been fixed.
|
||||||
*/
|
*/
|
||||||
cpumask_set_cpu(target, &cci_pmu->cpus);
|
cpumask_set_cpu(target, &cci_pmu->cpus);
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&cci_pmu_mutex);
|
||||||
return NOTIFY_OK;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cci_pmu_model cci_pmu_models[] = {
|
static struct cci_pmu_model cci_pmu_models[] = {
|
||||||
@ -1766,24 +1764,13 @@ static int cci_pmu_probe(struct platform_device *pdev)
|
|||||||
atomic_set(&cci_pmu->active_events, 0);
|
atomic_set(&cci_pmu->active_events, 0);
|
||||||
cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
|
cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
|
||||||
|
|
||||||
cci_pmu->cpu_nb = (struct notifier_block) {
|
ret = cci_pmu_init(cci_pmu, pdev);
|
||||||
.notifier_call = cci_pmu_cpu_notifier,
|
|
||||||
/*
|
|
||||||
* to migrate uncore events, our notifier should be executed
|
|
||||||
* before perf core's notifier.
|
|
||||||
*/
|
|
||||||
.priority = CPU_PRI_PERF + 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
ret = register_cpu_notifier(&cci_pmu->cpu_nb);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = cci_pmu_init(cci_pmu, pdev);
|
mutex_lock(&cci_pmu_mutex);
|
||||||
if (ret) {
|
list_add(&cci_pmu->entry, &cci_pmu_list);
|
||||||
unregister_cpu_notifier(&cci_pmu->cpu_nb);
|
mutex_unlock(&cci_pmu_mutex);
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
|
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
|
||||||
return 0;
|
return 0;
|
||||||
@ -1817,6 +1804,12 @@ static int __init cci_platform_init(void)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
||||||
|
"AP_PERF_ARM_CCI_ONLINE", NULL,
|
||||||
|
cci_pmu_offline_cpu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
ret = platform_driver_register(&cci_pmu_driver);
|
ret = platform_driver_register(&cci_pmu_driver);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -167,7 +167,7 @@ struct arm_ccn_dt {
|
|||||||
struct hrtimer hrtimer;
|
struct hrtimer hrtimer;
|
||||||
|
|
||||||
cpumask_t cpu;
|
cpumask_t cpu;
|
||||||
struct notifier_block cpu_nb;
|
struct list_head entry;
|
||||||
|
|
||||||
struct pmu pmu;
|
struct pmu pmu;
|
||||||
};
|
};
|
||||||
@ -189,6 +189,8 @@ struct arm_ccn {
|
|||||||
struct arm_ccn_dt dt;
|
struct arm_ccn_dt dt;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static DEFINE_MUTEX(arm_ccn_mutex);
|
||||||
|
static LIST_HEAD(arm_ccn_list);
|
||||||
|
|
||||||
static int arm_ccn_node_to_xp(int node)
|
static int arm_ccn_node_to_xp(int node)
|
||||||
{
|
{
|
||||||
@ -1171,30 +1173,27 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb,
|
static int arm_ccn_pmu_offline_cpu(unsigned int cpu)
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
struct arm_ccn_dt *dt = container_of(nb, struct arm_ccn_dt, cpu_nb);
|
struct arm_ccn_dt *dt;
|
||||||
struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
|
|
||||||
unsigned int cpu = (long)hcpu; /* for (long) see kernel/cpu.c */
|
|
||||||
unsigned int target;
|
unsigned int target;
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
mutex_lock(&arm_ccn_mutex);
|
||||||
case CPU_DOWN_PREPARE:
|
list_for_each_entry(dt, &arm_ccn_list, entry) {
|
||||||
|
struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
|
||||||
|
|
||||||
if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
|
if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
|
||||||
break;
|
continue;
|
||||||
target = cpumask_any_but(cpu_online_mask, cpu);
|
target = cpumask_any_but(cpu_online_mask, cpu);
|
||||||
if (target >= nr_cpu_ids)
|
if (target >= nr_cpu_ids)
|
||||||
break;
|
continue;
|
||||||
perf_pmu_migrate_context(&dt->pmu, cpu, target);
|
perf_pmu_migrate_context(&dt->pmu, cpu, target);
|
||||||
cpumask_set_cpu(target, &dt->cpu);
|
cpumask_set_cpu(target, &dt->cpu);
|
||||||
if (ccn->irq)
|
if (ccn->irq)
|
||||||
WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
|
WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&arm_ccn_mutex);
|
||||||
return NOTIFY_OK;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1266,16 +1265,6 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
|
|||||||
/* Pick one CPU which we will use to collect data from CCN... */
|
/* Pick one CPU which we will use to collect data from CCN... */
|
||||||
cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
|
cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
|
||||||
|
|
||||||
/*
|
|
||||||
* ... and change the selection when it goes offline. Priority is
|
|
||||||
* picked to have a chance to migrate events before perf is notified.
|
|
||||||
*/
|
|
||||||
ccn->dt.cpu_nb.notifier_call = arm_ccn_pmu_cpu_notifier;
|
|
||||||
ccn->dt.cpu_nb.priority = CPU_PRI_PERF + 1,
|
|
||||||
err = register_cpu_notifier(&ccn->dt.cpu_nb);
|
|
||||||
if (err)
|
|
||||||
goto error_cpu_notifier;
|
|
||||||
|
|
||||||
/* Also make sure that the overflow interrupt is handled by this CPU */
|
/* Also make sure that the overflow interrupt is handled by this CPU */
|
||||||
if (ccn->irq) {
|
if (ccn->irq) {
|
||||||
err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
|
err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
|
||||||
@ -1289,12 +1278,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
|
|||||||
if (err)
|
if (err)
|
||||||
goto error_pmu_register;
|
goto error_pmu_register;
|
||||||
|
|
||||||
|
mutex_lock(&arm_ccn_mutex);
|
||||||
|
list_add(&ccn->dt.entry, &arm_ccn_list);
|
||||||
|
mutex_unlock(&arm_ccn_mutex);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_pmu_register:
|
error_pmu_register:
|
||||||
error_set_affinity:
|
error_set_affinity:
|
||||||
unregister_cpu_notifier(&ccn->dt.cpu_nb);
|
|
||||||
error_cpu_notifier:
|
|
||||||
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
|
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
|
||||||
for (i = 0; i < ccn->num_xps; i++)
|
for (i = 0; i < ccn->num_xps; i++)
|
||||||
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
|
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
|
||||||
@ -1306,9 +1296,12 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
mutex_lock(&arm_ccn_mutex);
|
||||||
|
list_del(&ccn->dt.entry);
|
||||||
|
mutex_unlock(&arm_ccn_mutex);
|
||||||
|
|
||||||
if (ccn->irq)
|
if (ccn->irq)
|
||||||
irq_set_affinity_hint(ccn->irq, NULL);
|
irq_set_affinity_hint(ccn->irq, NULL);
|
||||||
unregister_cpu_notifier(&ccn->dt.cpu_nb);
|
|
||||||
for (i = 0; i < ccn->num_xps; i++)
|
for (i = 0; i < ccn->num_xps; i++)
|
||||||
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
|
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
|
||||||
writel(0, ccn->dt.base + CCN_DT_PMCR);
|
writel(0, ccn->dt.base + CCN_DT_PMCR);
|
||||||
@ -1316,7 +1309,6 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
|
|||||||
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
|
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
|
static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
|
||||||
int (*callback)(struct arm_ccn *ccn, int region,
|
int (*callback)(struct arm_ccn *ccn, int region,
|
||||||
void __iomem *base, u32 type, u32 id))
|
void __iomem *base, u32 type, u32 id))
|
||||||
@ -1533,7 +1525,13 @@ static struct platform_driver arm_ccn_driver = {
|
|||||||
|
|
||||||
static int __init arm_ccn_init(void)
|
static int __init arm_ccn_init(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i, ret;
|
||||||
|
|
||||||
|
ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
|
||||||
|
"AP_PERF_ARM_CCN_ONLINE", NULL,
|
||||||
|
arm_ccn_pmu_offline_cpu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
|
for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
|
||||||
arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
|
arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
|
||||||
@ -1543,6 +1541,7 @@ static int __init arm_ccn_init(void)
|
|||||||
|
|
||||||
static void __exit arm_ccn_exit(void)
|
static void __exit arm_ccn_exit(void)
|
||||||
{
|
{
|
||||||
|
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE);
|
||||||
platform_driver_unregister(&arm_ccn_driver);
|
platform_driver_unregister(&arm_ccn_driver);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -370,8 +370,10 @@ static bool arch_timer_has_nonsecure_ppi(void)
|
|||||||
arch_timer_ppi[PHYS_NONSECURE_PPI]);
|
arch_timer_ppi[PHYS_NONSECURE_PPI]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arch_timer_setup(struct clock_event_device *clk)
|
static int arch_timer_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
|
||||||
|
|
||||||
__arch_timer_setup(ARCH_CP15_TIMER, clk);
|
__arch_timer_setup(ARCH_CP15_TIMER, clk);
|
||||||
|
|
||||||
enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
|
enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
|
||||||
@ -527,29 +529,14 @@ static void arch_timer_stop(struct clock_event_device *clk)
|
|||||||
clk->set_state_shutdown(clk);
|
clk->set_state_shutdown(clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arch_timer_cpu_notify(struct notifier_block *self,
|
static int arch_timer_dying_cpu(unsigned int cpu)
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
/*
|
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
|
||||||
* Grab cpu pointer in each case to avoid spurious
|
|
||||||
* preemptible warnings
|
|
||||||
*/
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
arch_timer_setup(this_cpu_ptr(arch_timer_evt));
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
arch_timer_stop(this_cpu_ptr(arch_timer_evt));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
arch_timer_stop(clk);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block arch_timer_cpu_nb = {
|
|
||||||
.notifier_call = arch_timer_cpu_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_PM
|
#ifdef CONFIG_CPU_PM
|
||||||
static unsigned int saved_cntkctl;
|
static unsigned int saved_cntkctl;
|
||||||
static int arch_timer_cpu_pm_notify(struct notifier_block *self,
|
static int arch_timer_cpu_pm_notify(struct notifier_block *self,
|
||||||
@ -570,11 +557,21 @@ static int __init arch_timer_cpu_pm_init(void)
|
|||||||
{
|
{
|
||||||
return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
|
return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __init arch_timer_cpu_pm_deinit(void)
|
||||||
|
{
|
||||||
|
WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static int __init arch_timer_cpu_pm_init(void)
|
static int __init arch_timer_cpu_pm_init(void)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __init arch_timer_cpu_pm_deinit(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int __init arch_timer_register(void)
|
static int __init arch_timer_register(void)
|
||||||
@ -621,22 +618,23 @@ static int __init arch_timer_register(void)
|
|||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = register_cpu_notifier(&arch_timer_cpu_nb);
|
|
||||||
if (err)
|
|
||||||
goto out_free_irq;
|
|
||||||
|
|
||||||
err = arch_timer_cpu_pm_init();
|
err = arch_timer_cpu_pm_init();
|
||||||
if (err)
|
if (err)
|
||||||
goto out_unreg_notify;
|
goto out_unreg_notify;
|
||||||
|
|
||||||
/* Immediately configure the timer on the boot CPU */
|
|
||||||
arch_timer_setup(this_cpu_ptr(arch_timer_evt));
|
|
||||||
|
|
||||||
|
/* Register and immediately configure the timer on the boot CPU */
|
||||||
|
err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
|
||||||
|
"AP_ARM_ARCH_TIMER_STARTING",
|
||||||
|
arch_timer_starting_cpu, arch_timer_dying_cpu);
|
||||||
|
if (err)
|
||||||
|
goto out_unreg_cpupm;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_unreg_cpupm:
|
||||||
|
arch_timer_cpu_pm_deinit();
|
||||||
|
|
||||||
out_unreg_notify:
|
out_unreg_notify:
|
||||||
unregister_cpu_notifier(&arch_timer_cpu_nb);
|
|
||||||
out_free_irq:
|
|
||||||
free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
|
free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
|
||||||
if (arch_timer_has_nonsecure_ppi())
|
if (arch_timer_has_nonsecure_ppi())
|
||||||
free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
|
free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
|
||||||
|
@ -165,9 +165,9 @@ static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id)
|
|||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gt_clockevents_init(struct clock_event_device *clk)
|
static int gt_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
struct clock_event_device *clk = this_cpu_ptr(gt_evt);
|
||||||
|
|
||||||
clk->name = "arm_global_timer";
|
clk->name = "arm_global_timer";
|
||||||
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
|
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
|
||||||
@ -186,10 +186,13 @@ static int gt_clockevents_init(struct clock_event_device *clk)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gt_clockevents_stop(struct clock_event_device *clk)
|
static int gt_dying_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
struct clock_event_device *clk = this_cpu_ptr(gt_evt);
|
||||||
|
|
||||||
gt_clockevent_shutdown(clk);
|
gt_clockevent_shutdown(clk);
|
||||||
disable_percpu_irq(clk->irq);
|
disable_percpu_irq(clk->irq);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t gt_clocksource_read(struct clocksource *cs)
|
static cycle_t gt_clocksource_read(struct clocksource *cs)
|
||||||
@ -252,24 +255,6 @@ static int __init gt_clocksource_init(void)
|
|||||||
return clocksource_register_hz(>_clocksource, gt_clk_rate);
|
return clocksource_register_hz(>_clocksource, gt_clk_rate);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gt_cpu_notify(struct notifier_block *self, unsigned long action,
|
|
||||||
void *hcpu)
|
|
||||||
{
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
gt_clockevents_init(this_cpu_ptr(gt_evt));
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
gt_clockevents_stop(this_cpu_ptr(gt_evt));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
static struct notifier_block gt_cpu_nb = {
|
|
||||||
.notifier_call = gt_cpu_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init global_timer_of_register(struct device_node *np)
|
static int __init global_timer_of_register(struct device_node *np)
|
||||||
{
|
{
|
||||||
struct clk *gt_clk;
|
struct clk *gt_clk;
|
||||||
@ -325,18 +310,14 @@ static int __init global_timer_of_register(struct device_node *np)
|
|||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = register_cpu_notifier(>_cpu_nb);
|
/* Register and immediately configure the timer on the boot CPU */
|
||||||
if (err) {
|
|
||||||
pr_warn("global-timer: unable to register cpu notifier.\n");
|
|
||||||
goto out_irq;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Immediately configure the timer on the boot CPU */
|
|
||||||
err = gt_clocksource_init();
|
err = gt_clocksource_init();
|
||||||
if (err)
|
if (err)
|
||||||
goto out_irq;
|
goto out_irq;
|
||||||
|
|
||||||
err = gt_clockevents_init(this_cpu_ptr(gt_evt));
|
err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
|
||||||
|
"AP_ARM_GLOBAL_TIMER_STARTING",
|
||||||
|
gt_starting_cpu, gt_dying_cpu);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_irq;
|
goto out_irq;
|
||||||
|
|
||||||
|
@ -16,10 +16,9 @@
|
|||||||
|
|
||||||
static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt);
|
static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt);
|
||||||
|
|
||||||
static void dummy_timer_setup(void)
|
static int dummy_timer_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
struct clock_event_device *evt = per_cpu_ptr(&dummy_timer_evt, cpu);
|
||||||
struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt);
|
|
||||||
|
|
||||||
evt->name = "dummy_timer";
|
evt->name = "dummy_timer";
|
||||||
evt->features = CLOCK_EVT_FEAT_PERIODIC |
|
evt->features = CLOCK_EVT_FEAT_PERIODIC |
|
||||||
@ -29,36 +28,13 @@ static void dummy_timer_setup(void)
|
|||||||
evt->cpumask = cpumask_of(cpu);
|
evt->cpumask = cpumask_of(cpu);
|
||||||
|
|
||||||
clockevents_register_device(evt);
|
clockevents_register_device(evt);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dummy_timer_cpu_notify(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
|
|
||||||
dummy_timer_setup();
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block dummy_timer_cpu_nb = {
|
|
||||||
.notifier_call = dummy_timer_cpu_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init dummy_timer_register(void)
|
static int __init dummy_timer_register(void)
|
||||||
{
|
{
|
||||||
int err = 0;
|
return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING,
|
||||||
|
"AP_DUMMY_TIMER_STARTING",
|
||||||
cpu_notifier_register_begin();
|
dummy_timer_starting_cpu, NULL);
|
||||||
err = __register_cpu_notifier(&dummy_timer_cpu_nb);
|
|
||||||
if (err)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/* We won't get a call on the boot CPU, so register immediately */
|
|
||||||
if (num_possible_cpus() > 1)
|
|
||||||
dummy_timer_setup();
|
|
||||||
|
|
||||||
out:
|
|
||||||
cpu_notifier_register_done();
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
early_initcall(dummy_timer_register);
|
early_initcall(dummy_timer_register);
|
||||||
|
@ -443,10 +443,11 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
|
|||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
|
static int exynos4_mct_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
struct mct_clock_event_device *mevt =
|
||||||
|
per_cpu_ptr(&percpu_mct_tick, cpu);
|
||||||
struct clock_event_device *evt = &mevt->evt;
|
struct clock_event_device *evt = &mevt->evt;
|
||||||
unsigned int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
mevt->base = EXYNOS4_MCT_L_BASE(cpu);
|
mevt->base = EXYNOS4_MCT_L_BASE(cpu);
|
||||||
snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
|
snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
|
||||||
@ -480,8 +481,10 @@ static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
|
static int exynos4_mct_dying_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
struct mct_clock_event_device *mevt =
|
||||||
|
per_cpu_ptr(&percpu_mct_tick, cpu);
|
||||||
struct clock_event_device *evt = &mevt->evt;
|
struct clock_event_device *evt = &mevt->evt;
|
||||||
|
|
||||||
evt->set_state_shutdown(evt);
|
evt->set_state_shutdown(evt);
|
||||||
@ -491,39 +494,12 @@ static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
|
|||||||
} else {
|
} else {
|
||||||
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
|
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int exynos4_mct_cpu_notify(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
struct mct_clock_event_device *mevt;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Grab cpu pointer in each case to avoid spurious
|
|
||||||
* preemptible warnings
|
|
||||||
*/
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
mevt = this_cpu_ptr(&percpu_mct_tick);
|
|
||||||
exynos4_local_timer_setup(mevt);
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
mevt = this_cpu_ptr(&percpu_mct_tick);
|
|
||||||
exynos4_local_timer_stop(mevt);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block exynos4_mct_cpu_nb = {
|
|
||||||
.notifier_call = exynos4_mct_cpu_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
|
static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
|
||||||
{
|
{
|
||||||
int err, cpu;
|
int err, cpu;
|
||||||
struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
|
|
||||||
struct clk *mct_clk, *tick_clk;
|
struct clk *mct_clk, *tick_clk;
|
||||||
|
|
||||||
tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
|
tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
|
||||||
@ -570,12 +546,14 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = register_cpu_notifier(&exynos4_mct_cpu_nb);
|
/* Install hotplug callbacks which configure the timer on this CPU */
|
||||||
|
err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
|
||||||
|
"AP_EXYNOS4_MCT_TIMER_STARTING",
|
||||||
|
exynos4_mct_starting_cpu,
|
||||||
|
exynos4_mct_dying_cpu);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_irq;
|
goto out_irq;
|
||||||
|
|
||||||
/* Immediately configure the timer on the boot CPU */
|
|
||||||
exynos4_local_timer_setup(mevt);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_irq:
|
out_irq:
|
||||||
|
@ -90,7 +90,7 @@ unsigned long long sched_clock(void)
|
|||||||
return ticks << HARDWARE_TO_NS_SHIFT;
|
return ticks << HARDWARE_TO_NS_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arch_timer_setup(unsigned int cpu)
|
static int arch_timer_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned int txdivtime;
|
unsigned int txdivtime;
|
||||||
struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
|
struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
|
||||||
@ -132,27 +132,9 @@ static void arch_timer_setup(unsigned int cpu)
|
|||||||
val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0);
|
val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0);
|
||||||
__core_reg_set(TXTIMER, val);
|
__core_reg_set(TXTIMER, val);
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arch_timer_cpu_notify(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
int cpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
case CPU_STARTING_FROZEN:
|
|
||||||
arch_timer_setup(cpu);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block arch_timer_cpu_nb = {
|
|
||||||
.notifier_call = arch_timer_cpu_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
int __init metag_generic_timer_init(void)
|
int __init metag_generic_timer_init(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -170,11 +152,8 @@ int __init metag_generic_timer_init(void)
|
|||||||
|
|
||||||
setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq);
|
setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq);
|
||||||
|
|
||||||
/* Configure timer on boot CPU */
|
/* Hook cpu boot to configure the CPU's timers */
|
||||||
arch_timer_setup(smp_processor_id());
|
return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
|
||||||
|
"AP_METAG_TIMER_STARTING",
|
||||||
/* Hook cpu boot to configure other CPU's timers */
|
arch_timer_starting_cpu, NULL);
|
||||||
register_cpu_notifier(&arch_timer_cpu_nb);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
@ -49,10 +49,9 @@ struct irqaction gic_compare_irqaction = {
|
|||||||
.name = "timer",
|
.name = "timer",
|
||||||
};
|
};
|
||||||
|
|
||||||
static void gic_clockevent_cpu_init(struct clock_event_device *cd)
|
static void gic_clockevent_cpu_init(unsigned int cpu,
|
||||||
|
struct clock_event_device *cd)
|
||||||
{
|
{
|
||||||
unsigned int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
cd->name = "MIPS GIC";
|
cd->name = "MIPS GIC";
|
||||||
cd->features = CLOCK_EVT_FEAT_ONESHOT |
|
cd->features = CLOCK_EVT_FEAT_ONESHOT |
|
||||||
CLOCK_EVT_FEAT_C3STOP;
|
CLOCK_EVT_FEAT_C3STOP;
|
||||||
@ -79,19 +78,10 @@ static void gic_update_frequency(void *data)
|
|||||||
clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate);
|
clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action,
|
static int gic_starting_cpu(unsigned int cpu)
|
||||||
void *data)
|
|
||||||
{
|
{
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
|
||||||
case CPU_STARTING:
|
return 0;
|
||||||
gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
|
static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
|
||||||
@ -105,10 +95,11 @@ static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
|
|||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int gic_dying_cpu(unsigned int cpu)
|
||||||
static struct notifier_block gic_cpu_nb = {
|
{
|
||||||
.notifier_call = gic_cpu_notifier,
|
gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
|
||||||
};
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct notifier_block gic_clk_nb = {
|
static struct notifier_block gic_clk_nb = {
|
||||||
.notifier_call = gic_clk_notifier,
|
.notifier_call = gic_clk_notifier,
|
||||||
@ -125,12 +116,9 @@ static int gic_clockevent_init(void)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = register_cpu_notifier(&gic_cpu_nb);
|
cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
|
||||||
if (ret < 0)
|
"AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu,
|
||||||
pr_warn("GIC: Unable to register CPU notifier\n");
|
gic_dying_cpu);
|
||||||
|
|
||||||
gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,9 +105,9 @@ static struct clocksource msm_clocksource = {
|
|||||||
static int msm_timer_irq;
|
static int msm_timer_irq;
|
||||||
static int msm_timer_has_ppi;
|
static int msm_timer_has_ppi;
|
||||||
|
|
||||||
static int msm_local_timer_setup(struct clock_event_device *evt)
|
static int msm_local_timer_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
evt->irq = msm_timer_irq;
|
evt->irq = msm_timer_irq;
|
||||||
@ -135,35 +135,15 @@ static int msm_local_timer_setup(struct clock_event_device *evt)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void msm_local_timer_stop(struct clock_event_device *evt)
|
static int msm_local_timer_dying_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
|
||||||
|
|
||||||
evt->set_state_shutdown(evt);
|
evt->set_state_shutdown(evt);
|
||||||
disable_percpu_irq(evt->irq);
|
disable_percpu_irq(evt->irq);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_timer_cpu_notify(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Grab cpu pointer in each case to avoid spurious
|
|
||||||
* preemptible warnings
|
|
||||||
*/
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
msm_local_timer_setup(this_cpu_ptr(msm_evt));
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
msm_local_timer_stop(this_cpu_ptr(msm_evt));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block msm_timer_cpu_nb = {
|
|
||||||
.notifier_call = msm_timer_cpu_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
static u64 notrace msm_sched_clock_read(void)
|
static u64 notrace msm_sched_clock_read(void)
|
||||||
{
|
{
|
||||||
return msm_clocksource.read(&msm_clocksource);
|
return msm_clocksource.read(&msm_clocksource);
|
||||||
@ -200,14 +180,15 @@ static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
|
|||||||
if (res) {
|
if (res) {
|
||||||
pr_err("request_percpu_irq failed\n");
|
pr_err("request_percpu_irq failed\n");
|
||||||
} else {
|
} else {
|
||||||
res = register_cpu_notifier(&msm_timer_cpu_nb);
|
/* Install and invoke hotplug callbacks */
|
||||||
|
res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
|
||||||
|
"AP_QCOM_TIMER_STARTING",
|
||||||
|
msm_local_timer_starting_cpu,
|
||||||
|
msm_local_timer_dying_cpu);
|
||||||
if (res) {
|
if (res) {
|
||||||
free_percpu_irq(irq, msm_evt);
|
free_percpu_irq(irq, msm_evt);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Immediately configure the timer on the boot CPU */
|
|
||||||
msm_local_timer_setup(raw_cpu_ptr(msm_evt));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err:
|
err:
|
||||||
|
@ -170,10 +170,10 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
|
|||||||
/*
|
/*
|
||||||
* Setup the local clock events for a CPU.
|
* Setup the local clock events for a CPU.
|
||||||
*/
|
*/
|
||||||
static int armada_370_xp_timer_setup(struct clock_event_device *evt)
|
static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
|
||||||
u32 clr = 0, set = 0;
|
u32 clr = 0, set = 0;
|
||||||
int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
if (timer25Mhz)
|
if (timer25Mhz)
|
||||||
set = TIMER0_25MHZ;
|
set = TIMER0_25MHZ;
|
||||||
@ -200,35 +200,15 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void armada_370_xp_timer_stop(struct clock_event_device *evt)
|
static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
|
||||||
|
|
||||||
evt->set_state_shutdown(evt);
|
evt->set_state_shutdown(evt);
|
||||||
disable_percpu_irq(evt->irq);
|
disable_percpu_irq(evt->irq);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int armada_370_xp_timer_cpu_notify(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Grab cpu pointer in each case to avoid spurious
|
|
||||||
* preemptible warnings
|
|
||||||
*/
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block armada_370_xp_timer_cpu_nb = {
|
|
||||||
.notifier_call = armada_370_xp_timer_cpu_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
|
static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
|
||||||
|
|
||||||
static int armada_370_xp_timer_suspend(void)
|
static int armada_370_xp_timer_suspend(void)
|
||||||
@ -322,8 +302,6 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
register_cpu_notifier(&armada_370_xp_timer_cpu_nb);
|
|
||||||
|
|
||||||
armada_370_xp_evt = alloc_percpu(struct clock_event_device);
|
armada_370_xp_evt = alloc_percpu(struct clock_event_device);
|
||||||
if (!armada_370_xp_evt)
|
if (!armada_370_xp_evt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -341,9 +319,12 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
|
res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
|
||||||
|
"AP_ARMADA_TIMER_STARTING",
|
||||||
|
armada_370_xp_timer_starting_cpu,
|
||||||
|
armada_370_xp_timer_dying_cpu);
|
||||||
if (res) {
|
if (res) {
|
||||||
pr_err("Failed to setup timer");
|
pr_err("Failed to setup hotplug state and timer");
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,9 +172,9 @@ static struct irqaction sirfsoc_timer1_irq = {
|
|||||||
.handler = sirfsoc_timer_interrupt,
|
.handler = sirfsoc_timer_interrupt,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
|
static int sirfsoc_local_timer_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu);
|
||||||
struct irqaction *action;
|
struct irqaction *action;
|
||||||
|
|
||||||
if (cpu == 0)
|
if (cpu == 0)
|
||||||
@ -203,50 +203,27 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sirfsoc_local_timer_stop(struct clock_event_device *ce)
|
static int sirfsoc_local_timer_dying_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
sirfsoc_timer_count_disable(1);
|
sirfsoc_timer_count_disable(1);
|
||||||
|
|
||||||
if (cpu == 0)
|
if (cpu == 0)
|
||||||
remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
|
remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
|
||||||
else
|
else
|
||||||
remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
|
remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sirfsoc_cpu_notify(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Grab cpu pointer in each case to avoid spurious
|
|
||||||
* preemptible warnings
|
|
||||||
*/
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block sirfsoc_cpu_nb = {
|
|
||||||
.notifier_call = sirfsoc_cpu_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init sirfsoc_clockevent_init(void)
|
static int __init sirfsoc_clockevent_init(void)
|
||||||
{
|
{
|
||||||
sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
|
sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
|
||||||
BUG_ON(!sirfsoc_clockevent);
|
BUG_ON(!sirfsoc_clockevent);
|
||||||
|
|
||||||
BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb));
|
/* Install and invoke hotplug callbacks */
|
||||||
|
return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
|
||||||
/* Immediately configure the timer on the boot CPU */
|
"AP_MARCO_TIMER_STARTING",
|
||||||
return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
|
sirfsoc_local_timer_starting_cpu,
|
||||||
|
sirfsoc_local_timer_dying_cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* initialize the kernel jiffy timer source */
|
/* initialize the kernel jiffy timer source */
|
||||||
|
@ -51,6 +51,8 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
|
|||||||
static int etm_count;
|
static int etm_count;
|
||||||
static struct etm_drvdata *etmdrvdata[NR_CPUS];
|
static struct etm_drvdata *etmdrvdata[NR_CPUS];
|
||||||
|
|
||||||
|
static enum cpuhp_state hp_online;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Memory mapped writes to clear os lock are not supported on some processors
|
* Memory mapped writes to clear os lock are not supported on some processors
|
||||||
* and OS lock must be unlocked before any memory mapped access on such
|
* and OS lock must be unlocked before any memory mapped access on such
|
||||||
@ -481,8 +483,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Configure the ETM only if the CPU is online. If it isn't online
|
* Configure the ETM only if the CPU is online. If it isn't online
|
||||||
* hw configuration will take place when 'CPU_STARTING' is received
|
* hw configuration will take place on the local CPU during bring up.
|
||||||
* in @etm_cpu_callback.
|
|
||||||
*/
|
*/
|
||||||
if (cpu_online(drvdata->cpu)) {
|
if (cpu_online(drvdata->cpu)) {
|
||||||
ret = smp_call_function_single(drvdata->cpu,
|
ret = smp_call_function_single(drvdata->cpu,
|
||||||
@ -641,16 +642,21 @@ static const struct coresight_ops etm_cs_ops = {
|
|||||||
.source_ops = &etm_source_ops,
|
.source_ops = &etm_source_ops,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
|
static int etm_online_cpu(unsigned int cpu)
|
||||||
void *hcpu)
|
|
||||||
{
|
{
|
||||||
unsigned int cpu = (unsigned long)hcpu;
|
|
||||||
|
|
||||||
if (!etmdrvdata[cpu])
|
if (!etmdrvdata[cpu])
|
||||||
goto out;
|
return 0;
|
||||||
|
|
||||||
|
if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
|
||||||
|
coresight_enable(etmdrvdata[cpu]->csdev);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int etm_starting_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
if (!etmdrvdata[cpu])
|
||||||
|
return 0;
|
||||||
|
|
||||||
switch (action & (~CPU_TASKS_FROZEN)) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
spin_lock(&etmdrvdata[cpu]->spinlock);
|
spin_lock(&etmdrvdata[cpu]->spinlock);
|
||||||
if (!etmdrvdata[cpu]->os_unlock) {
|
if (!etmdrvdata[cpu]->os_unlock) {
|
||||||
etm_os_unlock(etmdrvdata[cpu]);
|
etm_os_unlock(etmdrvdata[cpu]);
|
||||||
@ -660,29 +666,21 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
|
|||||||
if (local_read(&etmdrvdata[cpu]->mode))
|
if (local_read(&etmdrvdata[cpu]->mode))
|
||||||
etm_enable_hw(etmdrvdata[cpu]);
|
etm_enable_hw(etmdrvdata[cpu]);
|
||||||
spin_unlock(&etmdrvdata[cpu]->spinlock);
|
spin_unlock(&etmdrvdata[cpu]->spinlock);
|
||||||
break;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
case CPU_ONLINE:
|
static int etm_dying_cpu(unsigned int cpu)
|
||||||
if (etmdrvdata[cpu]->boot_enable &&
|
{
|
||||||
!etmdrvdata[cpu]->sticky_enable)
|
if (!etmdrvdata[cpu])
|
||||||
coresight_enable(etmdrvdata[cpu]->csdev);
|
return 0;
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_DYING:
|
|
||||||
spin_lock(&etmdrvdata[cpu]->spinlock);
|
spin_lock(&etmdrvdata[cpu]->spinlock);
|
||||||
if (local_read(&etmdrvdata[cpu]->mode))
|
if (local_read(&etmdrvdata[cpu]->mode))
|
||||||
etm_disable_hw(etmdrvdata[cpu]);
|
etm_disable_hw(etmdrvdata[cpu]);
|
||||||
spin_unlock(&etmdrvdata[cpu]->spinlock);
|
spin_unlock(&etmdrvdata[cpu]->spinlock);
|
||||||
break;
|
return 0;
|
||||||
}
|
|
||||||
out:
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block etm_cpu_notifier = {
|
|
||||||
.notifier_call = etm_cpu_callback,
|
|
||||||
};
|
|
||||||
|
|
||||||
static bool etm_arch_supported(u8 arch)
|
static bool etm_arch_supported(u8 arch)
|
||||||
{
|
{
|
||||||
switch (arch) {
|
switch (arch) {
|
||||||
@ -806,9 +804,17 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
etm_init_arch_data, drvdata, 1))
|
etm_init_arch_data, drvdata, 1))
|
||||||
dev_err(dev, "ETM arch init failed\n");
|
dev_err(dev, "ETM arch init failed\n");
|
||||||
|
|
||||||
if (!etm_count++)
|
if (!etm_count++) {
|
||||||
register_hotcpu_notifier(&etm_cpu_notifier);
|
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
|
||||||
|
"AP_ARM_CORESIGHT_STARTING",
|
||||||
|
etm_starting_cpu, etm_dying_cpu);
|
||||||
|
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
|
||||||
|
"AP_ARM_CORESIGHT_ONLINE",
|
||||||
|
etm_online_cpu, NULL);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err_arch_supported;
|
||||||
|
hp_online = ret;
|
||||||
|
}
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
|
|
||||||
if (etm_arch_supported(drvdata->arch) == false) {
|
if (etm_arch_supported(drvdata->arch) == false) {
|
||||||
@ -839,7 +845,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
|
|
||||||
pm_runtime_put(&adev->dev);
|
pm_runtime_put(&adev->dev);
|
||||||
dev_info(dev, "%s initialized\n", (char *)id->data);
|
dev_info(dev, "%s initialized\n", (char *)id->data);
|
||||||
|
|
||||||
if (boot_enable) {
|
if (boot_enable) {
|
||||||
coresight_enable(drvdata->csdev);
|
coresight_enable(drvdata->csdev);
|
||||||
drvdata->boot_enable = true;
|
drvdata->boot_enable = true;
|
||||||
@ -848,8 +853,11 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_arch_supported:
|
err_arch_supported:
|
||||||
if (--etm_count == 0)
|
if (--etm_count == 0) {
|
||||||
unregister_hotcpu_notifier(&etm_cpu_notifier);
|
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
|
||||||
|
if (hp_online)
|
||||||
|
cpuhp_remove_state_nocalls(hp_online);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,6 +48,8 @@ static int etm4_count;
|
|||||||
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
|
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
|
||||||
static void etm4_set_default(struct etmv4_config *config);
|
static void etm4_set_default(struct etmv4_config *config);
|
||||||
|
|
||||||
|
static enum cpuhp_state hp_online;
|
||||||
|
|
||||||
static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
|
static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
|
||||||
{
|
{
|
||||||
/* Writing any value to ETMOSLAR unlocks the trace registers */
|
/* Writing any value to ETMOSLAR unlocks the trace registers */
|
||||||
@ -673,16 +675,21 @@ void etm4_config_trace_mode(struct etmv4_config *config)
|
|||||||
config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
|
config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
|
static int etm4_online_cpu(unsigned int cpu)
|
||||||
void *hcpu)
|
|
||||||
{
|
{
|
||||||
unsigned int cpu = (unsigned long)hcpu;
|
|
||||||
|
|
||||||
if (!etmdrvdata[cpu])
|
if (!etmdrvdata[cpu])
|
||||||
goto out;
|
return 0;
|
||||||
|
|
||||||
|
if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
|
||||||
|
coresight_enable(etmdrvdata[cpu]->csdev);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int etm4_starting_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
if (!etmdrvdata[cpu])
|
||||||
|
return 0;
|
||||||
|
|
||||||
switch (action & (~CPU_TASKS_FROZEN)) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
spin_lock(&etmdrvdata[cpu]->spinlock);
|
spin_lock(&etmdrvdata[cpu]->spinlock);
|
||||||
if (!etmdrvdata[cpu]->os_unlock) {
|
if (!etmdrvdata[cpu]->os_unlock) {
|
||||||
etm4_os_unlock(etmdrvdata[cpu]);
|
etm4_os_unlock(etmdrvdata[cpu]);
|
||||||
@ -692,29 +699,21 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
|
|||||||
if (local_read(&etmdrvdata[cpu]->mode))
|
if (local_read(&etmdrvdata[cpu]->mode))
|
||||||
etm4_enable_hw(etmdrvdata[cpu]);
|
etm4_enable_hw(etmdrvdata[cpu]);
|
||||||
spin_unlock(&etmdrvdata[cpu]->spinlock);
|
spin_unlock(&etmdrvdata[cpu]->spinlock);
|
||||||
break;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
case CPU_ONLINE:
|
static int etm4_dying_cpu(unsigned int cpu)
|
||||||
if (etmdrvdata[cpu]->boot_enable &&
|
{
|
||||||
!etmdrvdata[cpu]->sticky_enable)
|
if (!etmdrvdata[cpu])
|
||||||
coresight_enable(etmdrvdata[cpu]->csdev);
|
return 0;
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_DYING:
|
|
||||||
spin_lock(&etmdrvdata[cpu]->spinlock);
|
spin_lock(&etmdrvdata[cpu]->spinlock);
|
||||||
if (local_read(&etmdrvdata[cpu]->mode))
|
if (local_read(&etmdrvdata[cpu]->mode))
|
||||||
etm4_disable_hw(etmdrvdata[cpu]);
|
etm4_disable_hw(etmdrvdata[cpu]);
|
||||||
spin_unlock(&etmdrvdata[cpu]->spinlock);
|
spin_unlock(&etmdrvdata[cpu]->spinlock);
|
||||||
break;
|
return 0;
|
||||||
}
|
|
||||||
out:
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block etm4_cpu_notifier = {
|
|
||||||
.notifier_call = etm4_cpu_callback,
|
|
||||||
};
|
|
||||||
|
|
||||||
static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
|
static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
|
||||||
{
|
{
|
||||||
drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
|
drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
|
||||||
@ -767,8 +766,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
etm4_init_arch_data, drvdata, 1))
|
etm4_init_arch_data, drvdata, 1))
|
||||||
dev_err(dev, "ETM arch init failed\n");
|
dev_err(dev, "ETM arch init failed\n");
|
||||||
|
|
||||||
if (!etm4_count++)
|
if (!etm4_count++) {
|
||||||
register_hotcpu_notifier(&etm4_cpu_notifier);
|
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING,
|
||||||
|
"AP_ARM_CORESIGHT4_STARTING",
|
||||||
|
etm4_starting_cpu, etm4_dying_cpu);
|
||||||
|
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
|
||||||
|
"AP_ARM_CORESIGHT4_ONLINE",
|
||||||
|
etm4_online_cpu, NULL);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err_arch_supported;
|
||||||
|
hp_online = ret;
|
||||||
|
}
|
||||||
|
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
|
|
||||||
@ -809,8 +817,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_arch_supported:
|
err_arch_supported:
|
||||||
if (--etm4_count == 0)
|
if (--etm4_count == 0) {
|
||||||
unregister_hotcpu_notifier(&etm4_cpu_notifier);
|
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING);
|
||||||
|
if (hp_online)
|
||||||
|
cpuhp_remove_state_nocalls(hp_online);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -345,38 +345,20 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask,
|
|||||||
ARMADA_370_XP_SW_TRIG_INT_OFFS);
|
ARMADA_370_XP_SW_TRIG_INT_OFFS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
|
static int armada_xp_mpic_starting_cpu(unsigned int cpu)
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) {
|
|
||||||
armada_xp_mpic_perf_init();
|
armada_xp_mpic_perf_init();
|
||||||
armada_xp_mpic_smp_cpu_init();
|
armada_xp_mpic_smp_cpu_init();
|
||||||
}
|
return 0;
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
|
static int mpic_cascaded_starting_cpu(unsigned int cpu)
|
||||||
.notifier_call = armada_xp_mpic_secondary_init,
|
|
||||||
.priority = 100,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) {
|
|
||||||
armada_xp_mpic_perf_init();
|
armada_xp_mpic_perf_init();
|
||||||
enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
|
enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
|
||||||
}
|
return 0;
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
static struct notifier_block mpic_cascaded_cpu_notifier = {
|
|
||||||
.notifier_call = mpic_cascaded_secondary_init,
|
|
||||||
.priority = 100,
|
|
||||||
};
|
|
||||||
#endif /* CONFIG_SMP */
|
|
||||||
|
|
||||||
static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
|
static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
|
||||||
.map = armada_370_xp_mpic_irq_map,
|
.map = armada_370_xp_mpic_irq_map,
|
||||||
@ -595,11 +577,15 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
|
|||||||
set_handle_irq(armada_370_xp_handle_irq);
|
set_handle_irq(armada_370_xp_handle_irq);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
set_smp_cross_call(armada_mpic_send_doorbell);
|
set_smp_cross_call(armada_mpic_send_doorbell);
|
||||||
register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
|
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
|
||||||
|
"AP_IRQ_ARMADA_XP_STARTING",
|
||||||
|
armada_xp_mpic_starting_cpu, NULL);
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
register_cpu_notifier(&mpic_cascaded_cpu_notifier);
|
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
|
||||||
|
"AP_IRQ_ARMADA_CASC_STARTING",
|
||||||
|
mpic_cascaded_starting_cpu, NULL);
|
||||||
#endif
|
#endif
|
||||||
irq_set_chained_handler(parent_irq,
|
irq_set_chained_handler(parent_irq,
|
||||||
armada_370_xp_mpic_handle_cascade_irq);
|
armada_370_xp_mpic_handle_cascade_irq);
|
||||||
|
@ -202,26 +202,19 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unmasks the IPI on the CPU when it's online. */
|
static int bcm2836_cpu_starting(unsigned int cpu)
|
||||||
static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
unsigned int cpu = (unsigned long)hcpu;
|
bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
|
||||||
unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0;
|
cpu);
|
||||||
unsigned int mailbox = 0;
|
return 0;
|
||||||
|
|
||||||
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
|
||||||
bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu);
|
|
||||||
else if (action == CPU_DYING)
|
|
||||||
bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu);
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
|
static int bcm2836_cpu_dying(unsigned int cpu)
|
||||||
.notifier_call = bcm2836_arm_irqchip_cpu_notify,
|
{
|
||||||
.priority = 100,
|
bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
|
||||||
};
|
cpu);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARM
|
#ifdef CONFIG_ARM
|
||||||
static int __init bcm2836_smp_boot_secondary(unsigned int cpu,
|
static int __init bcm2836_smp_boot_secondary(unsigned int cpu,
|
||||||
@ -251,10 +244,9 @@ bcm2836_arm_irqchip_smp_init(void)
|
|||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* Unmask IPIs to the boot CPU. */
|
/* Unmask IPIs to the boot CPU. */
|
||||||
bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
|
cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
|
||||||
CPU_STARTING,
|
"AP_IRQ_BCM2836_STARTING", bcm2836_cpu_starting,
|
||||||
(void *)(uintptr_t)smp_processor_id());
|
bcm2836_cpu_dying);
|
||||||
register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
|
|
||||||
|
|
||||||
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
|
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
|
||||||
|
|
||||||
|
@ -538,22 +538,12 @@ static void gic_cpu_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static int gic_secondary_init(struct notifier_block *nfb,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
|
||||||
gic_cpu_init();
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
static int gic_starting_cpu(unsigned int cpu)
|
||||||
* Notifier for enabling the GIC CPU interface. Set an arbitrarily high
|
{
|
||||||
* priority because the GIC needs to be up before the ARM generic timers.
|
gic_cpu_init();
|
||||||
*/
|
return 0;
|
||||||
static struct notifier_block gic_cpu_notifier = {
|
}
|
||||||
.notifier_call = gic_secondary_init,
|
|
||||||
.priority = 100,
|
|
||||||
};
|
|
||||||
|
|
||||||
static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
|
static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
|
||||||
unsigned long cluster_id)
|
unsigned long cluster_id)
|
||||||
@ -634,7 +624,9 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
|||||||
static void gic_smp_init(void)
|
static void gic_smp_init(void)
|
||||||
{
|
{
|
||||||
set_smp_cross_call(gic_raise_softirq);
|
set_smp_cross_call(gic_raise_softirq);
|
||||||
register_cpu_notifier(&gic_cpu_notifier);
|
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING,
|
||||||
|
"AP_IRQ_GICV3_STARTING", gic_starting_cpu,
|
||||||
|
NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
||||||
|
@ -984,25 +984,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
static int gic_starting_cpu(unsigned int cpu)
|
||||||
static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
|
|
||||||
void *hcpu)
|
|
||||||
{
|
{
|
||||||
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
|
||||||
gic_cpu_init(&gic_data[0]);
|
gic_cpu_init(&gic_data[0]);
|
||||||
return NOTIFY_OK;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Notifier for enabling the GIC CPU interface. Set an arbitrarily high
|
|
||||||
* priority because the GIC needs to be up before the ARM generic timers.
|
|
||||||
*/
|
|
||||||
static struct notifier_block gic_cpu_notifier = {
|
|
||||||
.notifier_call = gic_secondary_init,
|
|
||||||
.priority = 100,
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||||
unsigned int nr_irqs, void *arg)
|
unsigned int nr_irqs, void *arg)
|
||||||
{
|
{
|
||||||
@ -1177,8 +1164,10 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
|
|||||||
gic_cpu_map[i] = 0xff;
|
gic_cpu_map[i] = 0xff;
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
set_smp_cross_call(gic_raise_softirq);
|
set_smp_cross_call(gic_raise_softirq);
|
||||||
register_cpu_notifier(&gic_cpu_notifier);
|
|
||||||
#endif
|
#endif
|
||||||
|
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
|
||||||
|
"AP_IRQ_GIC_STARTING",
|
||||||
|
gic_starting_cpu, NULL);
|
||||||
set_handle_irq(gic_handle_irq);
|
set_handle_irq(gic_handle_irq);
|
||||||
if (static_key_true(&supports_deactivate))
|
if (static_key_true(&supports_deactivate))
|
||||||
pr_info("GIC: Using split EOI/Deactivate mode\n");
|
pr_info("GIC: Using split EOI/Deactivate mode\n");
|
||||||
|
@ -342,26 +342,12 @@ static int hip04_irq_domain_xlate(struct irq_domain *d,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
static int hip04_irq_starting_cpu(unsigned int cpu)
|
||||||
static int hip04_irq_secondary_init(struct notifier_block *nfb,
|
|
||||||
unsigned long action,
|
|
||||||
void *hcpu)
|
|
||||||
{
|
{
|
||||||
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
|
||||||
hip04_irq_cpu_init(&hip04_data);
|
hip04_irq_cpu_init(&hip04_data);
|
||||||
return NOTIFY_OK;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Notifier for enabling the INTC CPU interface. Set an arbitrarily high
|
|
||||||
* priority because the GIC needs to be up before the ARM generic timers.
|
|
||||||
*/
|
|
||||||
static struct notifier_block hip04_irq_cpu_notifier = {
|
|
||||||
.notifier_call = hip04_irq_secondary_init,
|
|
||||||
.priority = 100,
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static const struct irq_domain_ops hip04_irq_domain_ops = {
|
static const struct irq_domain_ops hip04_irq_domain_ops = {
|
||||||
.map = hip04_irq_domain_map,
|
.map = hip04_irq_domain_map,
|
||||||
.xlate = hip04_irq_domain_xlate,
|
.xlate = hip04_irq_domain_xlate,
|
||||||
@ -417,13 +403,12 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
|
|||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
set_smp_cross_call(hip04_raise_softirq);
|
set_smp_cross_call(hip04_raise_softirq);
|
||||||
register_cpu_notifier(&hip04_irq_cpu_notifier);
|
|
||||||
#endif
|
#endif
|
||||||
set_handle_irq(hip04_handle_irq);
|
set_handle_irq(hip04_handle_irq);
|
||||||
|
|
||||||
hip04_irq_dist_init(&hip04_data);
|
hip04_irq_dist_init(&hip04_data);
|
||||||
hip04_irq_cpu_init(&hip04_data);
|
cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "AP_IRQ_HIP04_STARTING",
|
||||||
|
hip04_irq_starting_cpu, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init);
|
IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init);
|
||||||
|
@ -92,29 +92,22 @@ static struct syscore_ops ledtrig_cpu_syscore_ops = {
|
|||||||
.resume = ledtrig_cpu_syscore_resume,
|
.resume = ledtrig_cpu_syscore_resume,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int ledtrig_cpu_notify(struct notifier_block *self,
|
static int ledtrig_online_cpu(unsigned int cpu)
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
ledtrig_cpu(CPU_LED_START);
|
ledtrig_cpu(CPU_LED_START);
|
||||||
break;
|
return 0;
|
||||||
case CPU_DYING:
|
|
||||||
ledtrig_cpu(CPU_LED_STOP);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int ledtrig_prepare_down_cpu(unsigned int cpu)
|
||||||
static struct notifier_block ledtrig_cpu_nb = {
|
{
|
||||||
.notifier_call = ledtrig_cpu_notify,
|
ledtrig_cpu(CPU_LED_STOP);
|
||||||
};
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int __init ledtrig_cpu_init(void)
|
static int __init ledtrig_cpu_init(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/* Supports up to 9999 cpu cores */
|
/* Supports up to 9999 cpu cores */
|
||||||
BUILD_BUG_ON(CONFIG_NR_CPUS > 9999);
|
BUILD_BUG_ON(CONFIG_NR_CPUS > 9999);
|
||||||
@ -133,7 +126,12 @@ static int __init ledtrig_cpu_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
register_syscore_ops(&ledtrig_cpu_syscore_ops);
|
register_syscore_ops(&ledtrig_cpu_syscore_ops);
|
||||||
register_cpu_notifier(&ledtrig_cpu_nb);
|
|
||||||
|
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_LEDTRIG_STARTING",
|
||||||
|
ledtrig_online_cpu, ledtrig_prepare_down_cpu);
|
||||||
|
if (ret < 0)
|
||||||
|
pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n",
|
||||||
|
ret);
|
||||||
|
|
||||||
pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n");
|
pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n");
|
||||||
|
|
||||||
|
@ -688,30 +688,29 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static DEFINE_MUTEX(arm_pmu_mutex);
|
||||||
|
static LIST_HEAD(arm_pmu_list);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PMU hardware loses all context when a CPU goes offline.
|
* PMU hardware loses all context when a CPU goes offline.
|
||||||
* When a CPU is hotplugged back in, since some hardware registers are
|
* When a CPU is hotplugged back in, since some hardware registers are
|
||||||
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
|
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
|
||||||
* junk values out of them.
|
* junk values out of them.
|
||||||
*/
|
*/
|
||||||
static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
|
static int arm_perf_starting_cpu(unsigned int cpu)
|
||||||
void *hcpu)
|
|
||||||
{
|
{
|
||||||
int cpu = (unsigned long)hcpu;
|
struct arm_pmu *pmu;
|
||||||
struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
|
|
||||||
|
|
||||||
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
|
mutex_lock(&arm_pmu_mutex);
|
||||||
return NOTIFY_DONE;
|
list_for_each_entry(pmu, &arm_pmu_list, entry) {
|
||||||
|
|
||||||
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
|
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
|
||||||
return NOTIFY_DONE;
|
continue;
|
||||||
|
|
||||||
if (pmu->reset)
|
if (pmu->reset)
|
||||||
pmu->reset(pmu);
|
pmu->reset(pmu);
|
||||||
else
|
}
|
||||||
return NOTIFY_DONE;
|
mutex_unlock(&arm_pmu_mutex);
|
||||||
|
return 0;
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_PM
|
#ifdef CONFIG_CPU_PM
|
||||||
@ -822,10 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
|||||||
if (!cpu_hw_events)
|
if (!cpu_hw_events)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
|
mutex_lock(&arm_pmu_mutex);
|
||||||
err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
|
list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
|
||||||
if (err)
|
mutex_unlock(&arm_pmu_mutex);
|
||||||
goto out_hw_events;
|
|
||||||
|
|
||||||
err = cpu_pm_pmu_register(cpu_pmu);
|
err = cpu_pm_pmu_register(cpu_pmu);
|
||||||
if (err)
|
if (err)
|
||||||
@ -861,8 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_unregister:
|
out_unregister:
|
||||||
unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
|
mutex_lock(&arm_pmu_mutex);
|
||||||
out_hw_events:
|
list_del(&cpu_pmu->entry);
|
||||||
|
mutex_unlock(&arm_pmu_mutex);
|
||||||
free_percpu(cpu_hw_events);
|
free_percpu(cpu_hw_events);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -870,7 +869,9 @@ out_hw_events:
|
|||||||
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
|
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
|
||||||
{
|
{
|
||||||
cpu_pm_pmu_unregister(cpu_pmu);
|
cpu_pm_pmu_unregister(cpu_pmu);
|
||||||
unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
|
mutex_lock(&arm_pmu_mutex);
|
||||||
|
list_del(&cpu_pmu->entry);
|
||||||
|
mutex_unlock(&arm_pmu_mutex);
|
||||||
free_percpu(cpu_pmu->hw_events);
|
free_percpu(cpu_pmu->hw_events);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1061,3 +1062,17 @@ out_free:
|
|||||||
kfree(pmu);
|
kfree(pmu);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int arm_pmu_hp_init(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING,
|
||||||
|
"AP_PERF_ARM_STARTING",
|
||||||
|
arm_perf_starting_cpu, NULL);
|
||||||
|
if (ret)
|
||||||
|
pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
|
||||||
|
ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
subsys_initcall(arm_pmu_hp_init);
|
||||||
|
@ -55,17 +55,6 @@ extern ssize_t arch_cpu_release(const char *, size_t);
|
|||||||
#endif
|
#endif
|
||||||
struct notifier_block;
|
struct notifier_block;
|
||||||
|
|
||||||
/*
|
|
||||||
* CPU notifier priorities.
|
|
||||||
*/
|
|
||||||
enum {
|
|
||||||
CPU_PRI_PERF = 20,
|
|
||||||
|
|
||||||
/* bring up workqueues before normal notifiers and down after */
|
|
||||||
CPU_PRI_WORKQUEUE_UP = 5,
|
|
||||||
CPU_PRI_WORKQUEUE_DOWN = -5,
|
|
||||||
};
|
|
||||||
|
|
||||||
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
|
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
|
||||||
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
|
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
|
||||||
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
|
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
|
||||||
|
@ -4,19 +4,95 @@
|
|||||||
enum cpuhp_state {
|
enum cpuhp_state {
|
||||||
CPUHP_OFFLINE,
|
CPUHP_OFFLINE,
|
||||||
CPUHP_CREATE_THREADS,
|
CPUHP_CREATE_THREADS,
|
||||||
|
CPUHP_PERF_PREPARE,
|
||||||
|
CPUHP_PERF_X86_PREPARE,
|
||||||
|
CPUHP_PERF_X86_UNCORE_PREP,
|
||||||
|
CPUHP_PERF_X86_AMD_UNCORE_PREP,
|
||||||
|
CPUHP_PERF_X86_RAPL_PREP,
|
||||||
|
CPUHP_PERF_BFIN,
|
||||||
|
CPUHP_PERF_POWER,
|
||||||
|
CPUHP_PERF_SUPERH,
|
||||||
|
CPUHP_X86_HPET_DEAD,
|
||||||
|
CPUHP_X86_APB_DEAD,
|
||||||
|
CPUHP_WORKQUEUE_PREP,
|
||||||
|
CPUHP_POWER_NUMA_PREPARE,
|
||||||
|
CPUHP_HRTIMERS_PREPARE,
|
||||||
|
CPUHP_PROFILE_PREPARE,
|
||||||
|
CPUHP_X2APIC_PREPARE,
|
||||||
|
CPUHP_SMPCFD_PREPARE,
|
||||||
|
CPUHP_RCUTREE_PREP,
|
||||||
CPUHP_NOTIFY_PREPARE,
|
CPUHP_NOTIFY_PREPARE,
|
||||||
|
CPUHP_TIMERS_DEAD,
|
||||||
CPUHP_BRINGUP_CPU,
|
CPUHP_BRINGUP_CPU,
|
||||||
CPUHP_AP_IDLE_DEAD,
|
CPUHP_AP_IDLE_DEAD,
|
||||||
CPUHP_AP_OFFLINE,
|
CPUHP_AP_OFFLINE,
|
||||||
CPUHP_AP_SCHED_STARTING,
|
CPUHP_AP_SCHED_STARTING,
|
||||||
|
CPUHP_AP_RCUTREE_DYING,
|
||||||
|
CPUHP_AP_IRQ_GIC_STARTING,
|
||||||
|
CPUHP_AP_IRQ_GICV3_STARTING,
|
||||||
|
CPUHP_AP_IRQ_HIP04_STARTING,
|
||||||
|
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
|
||||||
|
CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
|
||||||
|
CPUHP_AP_IRQ_BCM2836_STARTING,
|
||||||
|
CPUHP_AP_ARM_MVEBU_COHERENCY,
|
||||||
|
CPUHP_AP_PERF_X86_UNCORE_STARTING,
|
||||||
|
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
|
||||||
|
CPUHP_AP_PERF_X86_STARTING,
|
||||||
|
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
|
||||||
|
CPUHP_AP_PERF_X86_CQM_STARTING,
|
||||||
|
CPUHP_AP_PERF_X86_CSTATE_STARTING,
|
||||||
|
CPUHP_AP_PERF_XTENSA_STARTING,
|
||||||
|
CPUHP_AP_PERF_METAG_STARTING,
|
||||||
|
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
|
||||||
|
CPUHP_AP_ARM_VFP_STARTING,
|
||||||
|
CPUHP_AP_PERF_ARM_STARTING,
|
||||||
|
CPUHP_AP_ARM_L2X0_STARTING,
|
||||||
|
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
|
||||||
|
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
|
||||||
|
CPUHP_AP_DUMMY_TIMER_STARTING,
|
||||||
|
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
|
||||||
|
CPUHP_AP_ARM_TWD_STARTING,
|
||||||
|
CPUHP_AP_METAG_TIMER_STARTING,
|
||||||
|
CPUHP_AP_QCOM_TIMER_STARTING,
|
||||||
|
CPUHP_AP_ARMADA_TIMER_STARTING,
|
||||||
|
CPUHP_AP_MARCO_TIMER_STARTING,
|
||||||
|
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
|
||||||
|
CPUHP_AP_ARC_TIMER_STARTING,
|
||||||
|
CPUHP_AP_KVM_STARTING,
|
||||||
|
CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
|
||||||
|
CPUHP_AP_KVM_ARM_VGIC_STARTING,
|
||||||
|
CPUHP_AP_KVM_ARM_TIMER_STARTING,
|
||||||
|
CPUHP_AP_ARM_XEN_STARTING,
|
||||||
|
CPUHP_AP_ARM_CORESIGHT_STARTING,
|
||||||
|
CPUHP_AP_ARM_CORESIGHT4_STARTING,
|
||||||
|
CPUHP_AP_ARM64_ISNDEP_STARTING,
|
||||||
|
CPUHP_AP_SMPCFD_DYING,
|
||||||
|
CPUHP_AP_X86_TBOOT_DYING,
|
||||||
CPUHP_AP_NOTIFY_STARTING,
|
CPUHP_AP_NOTIFY_STARTING,
|
||||||
CPUHP_AP_ONLINE,
|
CPUHP_AP_ONLINE,
|
||||||
CPUHP_TEARDOWN_CPU,
|
CPUHP_TEARDOWN_CPU,
|
||||||
CPUHP_AP_ONLINE_IDLE,
|
CPUHP_AP_ONLINE_IDLE,
|
||||||
CPUHP_AP_SMPBOOT_THREADS,
|
CPUHP_AP_SMPBOOT_THREADS,
|
||||||
|
CPUHP_AP_X86_VDSO_VMA_ONLINE,
|
||||||
|
CPUHP_AP_PERF_ONLINE,
|
||||||
|
CPUHP_AP_PERF_X86_ONLINE,
|
||||||
|
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
||||||
|
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
|
||||||
|
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
|
||||||
|
CPUHP_AP_PERF_X86_RAPL_ONLINE,
|
||||||
|
CPUHP_AP_PERF_X86_CQM_ONLINE,
|
||||||
|
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
|
||||||
|
CPUHP_AP_PERF_S390_CF_ONLINE,
|
||||||
|
CPUHP_AP_PERF_S390_SF_ONLINE,
|
||||||
|
CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
||||||
|
CPUHP_AP_PERF_ARM_CCN_ONLINE,
|
||||||
|
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||||
|
CPUHP_AP_RCUTREE_ONLINE,
|
||||||
CPUHP_AP_NOTIFY_ONLINE,
|
CPUHP_AP_NOTIFY_ONLINE,
|
||||||
CPUHP_AP_ONLINE_DYN,
|
CPUHP_AP_ONLINE_DYN,
|
||||||
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
||||||
|
CPUHP_AP_X86_HPET_ONLINE,
|
||||||
|
CPUHP_AP_X86_KVM_CLK_ONLINE,
|
||||||
CPUHP_AP_ACTIVE,
|
CPUHP_AP_ACTIVE,
|
||||||
CPUHP_ONLINE,
|
CPUHP_ONLINE,
|
||||||
};
|
};
|
||||||
|
@ -494,4 +494,11 @@ extern void __init hrtimers_init(void);
|
|||||||
/* Show pending timers: */
|
/* Show pending timers: */
|
||||||
extern void sysrq_timer_list_show(void);
|
extern void sysrq_timer_list_show(void);
|
||||||
|
|
||||||
|
int hrtimers_prepare_cpu(unsigned int cpu);
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
int hrtimers_dead_cpu(unsigned int cpu);
|
||||||
|
#else
|
||||||
|
#define hrtimers_dead_cpu NULL
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -109,7 +109,7 @@ struct arm_pmu {
|
|||||||
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
|
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
|
||||||
struct platform_device *plat_device;
|
struct platform_device *plat_device;
|
||||||
struct pmu_hw_events __percpu *hw_events;
|
struct pmu_hw_events __percpu *hw_events;
|
||||||
struct notifier_block hotplug_nb;
|
struct list_head entry;
|
||||||
struct notifier_block cpu_pm_nb;
|
struct notifier_block cpu_pm_nb;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1309,41 +1309,6 @@ static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
|
|||||||
|
|
||||||
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
|
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
|
||||||
|
|
||||||
/*
|
|
||||||
* This has to have a higher priority than migration_notifier in sched/core.c.
|
|
||||||
*/
|
|
||||||
#define perf_cpu_notifier(fn) \
|
|
||||||
do { \
|
|
||||||
static struct notifier_block fn##_nb = \
|
|
||||||
{ .notifier_call = fn, .priority = CPU_PRI_PERF }; \
|
|
||||||
unsigned long cpu = smp_processor_id(); \
|
|
||||||
unsigned long flags; \
|
|
||||||
\
|
|
||||||
cpu_notifier_register_begin(); \
|
|
||||||
fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
|
|
||||||
(void *)(unsigned long)cpu); \
|
|
||||||
local_irq_save(flags); \
|
|
||||||
fn(&fn##_nb, (unsigned long)CPU_STARTING, \
|
|
||||||
(void *)(unsigned long)cpu); \
|
|
||||||
local_irq_restore(flags); \
|
|
||||||
fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
|
|
||||||
(void *)(unsigned long)cpu); \
|
|
||||||
__register_cpu_notifier(&fn##_nb); \
|
|
||||||
cpu_notifier_register_done(); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
|
|
||||||
* callback for already online CPUs.
|
|
||||||
*/
|
|
||||||
#define __perf_cpu_notifier(fn) \
|
|
||||||
do { \
|
|
||||||
static struct notifier_block fn##_nb = \
|
|
||||||
{ .notifier_call = fn, .priority = CPU_PRI_PERF }; \
|
|
||||||
\
|
|
||||||
__register_cpu_notifier(&fn##_nb); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
struct perf_pmu_events_attr {
|
struct perf_pmu_events_attr {
|
||||||
struct device_attribute attr;
|
struct device_attribute attr;
|
||||||
u64 id;
|
u64 id;
|
||||||
@ -1385,4 +1350,13 @@ _name##_show(struct device *dev, \
|
|||||||
\
|
\
|
||||||
static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
|
static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
|
||||||
|
|
||||||
|
/* Performance counter hotplug functions */
|
||||||
|
#ifdef CONFIG_PERF_EVENTS
|
||||||
|
int perf_event_init_cpu(unsigned int cpu);
|
||||||
|
int perf_event_exit_cpu(unsigned int cpu);
|
||||||
|
#else
|
||||||
|
#define perf_event_init_cpu NULL
|
||||||
|
#define perf_event_exit_cpu NULL
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* _LINUX_PERF_EVENT_H */
|
#endif /* _LINUX_PERF_EVENT_H */
|
||||||
|
@ -243,4 +243,11 @@ static inline void rcu_all_qs(void)
|
|||||||
barrier(); /* Avoid RCU read-side critical sections leaking across. */
|
barrier(); /* Avoid RCU read-side critical sections leaking across. */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* RCUtree hotplug events */
|
||||||
|
#define rcutree_prepare_cpu NULL
|
||||||
|
#define rcutree_online_cpu NULL
|
||||||
|
#define rcutree_offline_cpu NULL
|
||||||
|
#define rcutree_dead_cpu NULL
|
||||||
|
#define rcutree_dying_cpu NULL
|
||||||
|
|
||||||
#endif /* __LINUX_RCUTINY_H */
|
#endif /* __LINUX_RCUTINY_H */
|
||||||
|
@ -111,4 +111,11 @@ bool rcu_is_watching(void);
|
|||||||
|
|
||||||
void rcu_all_qs(void);
|
void rcu_all_qs(void);
|
||||||
|
|
||||||
|
/* RCUtree hotplug events */
|
||||||
|
int rcutree_prepare_cpu(unsigned int cpu);
|
||||||
|
int rcutree_online_cpu(unsigned int cpu);
|
||||||
|
int rcutree_offline_cpu(unsigned int cpu);
|
||||||
|
int rcutree_dead_cpu(unsigned int cpu);
|
||||||
|
int rcutree_dying_cpu(unsigned int cpu);
|
||||||
|
|
||||||
#endif /* __LINUX_RCUTREE_H */
|
#endif /* __LINUX_RCUTREE_H */
|
||||||
|
@ -196,4 +196,9 @@ extern void arch_enable_nonboot_cpus_end(void);
|
|||||||
|
|
||||||
void smp_setup_processor_id(void);
|
void smp_setup_processor_id(void);
|
||||||
|
|
||||||
|
/* SMP core functions */
|
||||||
|
int smpcfd_prepare_cpu(unsigned int cpu);
|
||||||
|
int smpcfd_dead_cpu(unsigned int cpu);
|
||||||
|
int smpcfd_dying_cpu(unsigned int cpu);
|
||||||
|
|
||||||
#endif /* __LINUX_SMP_H */
|
#endif /* __LINUX_SMP_H */
|
||||||
|
@ -273,4 +273,10 @@ unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
|
|||||||
unsigned long round_jiffies_up(unsigned long j);
|
unsigned long round_jiffies_up(unsigned long j);
|
||||||
unsigned long round_jiffies_up_relative(unsigned long j);
|
unsigned long round_jiffies_up_relative(unsigned long j);
|
||||||
|
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
int timers_dead_cpu(unsigned int cpu);
|
||||||
|
#else
|
||||||
|
#define timers_dead_cpu NULL
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -625,4 +625,10 @@ void wq_watchdog_touch(int cpu);
|
|||||||
static inline void wq_watchdog_touch(int cpu) { }
|
static inline void wq_watchdog_touch(int cpu) { }
|
||||||
#endif /* CONFIG_WQ_WATCHDOG */
|
#endif /* CONFIG_WQ_WATCHDOG */
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
int workqueue_prepare_cpu(unsigned int cpu);
|
||||||
|
int workqueue_online_cpu(unsigned int cpu);
|
||||||
|
int workqueue_offline_cpu(unsigned int cpu);
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
66
kernel/cpu.c
66
kernel/cpu.c
@ -517,6 +517,13 @@ static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state,
|
|||||||
if (!cpu_online(cpu))
|
if (!cpu_online(cpu))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we are up and running, use the hotplug thread. For early calls
|
||||||
|
* we invoke the thread function directly.
|
||||||
|
*/
|
||||||
|
if (!st->thread)
|
||||||
|
return cpuhp_invoke_callback(cpu, state, cb);
|
||||||
|
|
||||||
st->cb_state = state;
|
st->cb_state = state;
|
||||||
st->cb = cb;
|
st->cb = cb;
|
||||||
/*
|
/*
|
||||||
@ -1173,6 +1180,31 @@ static struct cpuhp_step cpuhp_bp_states[] = {
|
|||||||
.teardown = NULL,
|
.teardown = NULL,
|
||||||
.cant_stop = true,
|
.cant_stop = true,
|
||||||
},
|
},
|
||||||
|
[CPUHP_PERF_PREPARE] = {
|
||||||
|
.name = "perf prepare",
|
||||||
|
.startup = perf_event_init_cpu,
|
||||||
|
.teardown = perf_event_exit_cpu,
|
||||||
|
},
|
||||||
|
[CPUHP_WORKQUEUE_PREP] = {
|
||||||
|
.name = "workqueue prepare",
|
||||||
|
.startup = workqueue_prepare_cpu,
|
||||||
|
.teardown = NULL,
|
||||||
|
},
|
||||||
|
[CPUHP_HRTIMERS_PREPARE] = {
|
||||||
|
.name = "hrtimers prepare",
|
||||||
|
.startup = hrtimers_prepare_cpu,
|
||||||
|
.teardown = hrtimers_dead_cpu,
|
||||||
|
},
|
||||||
|
[CPUHP_SMPCFD_PREPARE] = {
|
||||||
|
.name = "SMPCFD prepare",
|
||||||
|
.startup = smpcfd_prepare_cpu,
|
||||||
|
.teardown = smpcfd_dead_cpu,
|
||||||
|
},
|
||||||
|
[CPUHP_RCUTREE_PREP] = {
|
||||||
|
.name = "RCU-tree prepare",
|
||||||
|
.startup = rcutree_prepare_cpu,
|
||||||
|
.teardown = rcutree_dead_cpu,
|
||||||
|
},
|
||||||
/*
|
/*
|
||||||
* Preparatory and dead notifiers. Will be replaced once the notifiers
|
* Preparatory and dead notifiers. Will be replaced once the notifiers
|
||||||
* are converted to states.
|
* are converted to states.
|
||||||
@ -1184,6 +1216,16 @@ static struct cpuhp_step cpuhp_bp_states[] = {
|
|||||||
.skip_onerr = true,
|
.skip_onerr = true,
|
||||||
.cant_stop = true,
|
.cant_stop = true,
|
||||||
},
|
},
|
||||||
|
/*
|
||||||
|
* On the tear-down path, timers_dead_cpu() must be invoked
|
||||||
|
* before blk_mq_queue_reinit_notify() from notify_dead(),
|
||||||
|
* otherwise a RCU stall occurs.
|
||||||
|
*/
|
||||||
|
[CPUHP_TIMERS_DEAD] = {
|
||||||
|
.name = "timers dead",
|
||||||
|
.startup = NULL,
|
||||||
|
.teardown = timers_dead_cpu,
|
||||||
|
},
|
||||||
/* Kicks the plugged cpu into life */
|
/* Kicks the plugged cpu into life */
|
||||||
[CPUHP_BRINGUP_CPU] = {
|
[CPUHP_BRINGUP_CPU] = {
|
||||||
.name = "cpu:bringup",
|
.name = "cpu:bringup",
|
||||||
@ -1191,6 +1233,10 @@ static struct cpuhp_step cpuhp_bp_states[] = {
|
|||||||
.teardown = NULL,
|
.teardown = NULL,
|
||||||
.cant_stop = true,
|
.cant_stop = true,
|
||||||
},
|
},
|
||||||
|
[CPUHP_AP_SMPCFD_DYING] = {
|
||||||
|
.startup = NULL,
|
||||||
|
.teardown = smpcfd_dying_cpu,
|
||||||
|
},
|
||||||
/*
|
/*
|
||||||
* Handled on controll processor until the plugged processor manages
|
* Handled on controll processor until the plugged processor manages
|
||||||
* this itself.
|
* this itself.
|
||||||
@ -1227,6 +1273,10 @@ static struct cpuhp_step cpuhp_ap_states[] = {
|
|||||||
.startup = sched_cpu_starting,
|
.startup = sched_cpu_starting,
|
||||||
.teardown = sched_cpu_dying,
|
.teardown = sched_cpu_dying,
|
||||||
},
|
},
|
||||||
|
[CPUHP_AP_RCUTREE_DYING] = {
|
||||||
|
.startup = NULL,
|
||||||
|
.teardown = rcutree_dying_cpu,
|
||||||
|
},
|
||||||
/*
|
/*
|
||||||
* Low level startup/teardown notifiers. Run with interrupts
|
* Low level startup/teardown notifiers. Run with interrupts
|
||||||
* disabled. Will be removed once the notifiers are converted to
|
* disabled. Will be removed once the notifiers are converted to
|
||||||
@ -1250,6 +1300,22 @@ static struct cpuhp_step cpuhp_ap_states[] = {
|
|||||||
.startup = smpboot_unpark_threads,
|
.startup = smpboot_unpark_threads,
|
||||||
.teardown = NULL,
|
.teardown = NULL,
|
||||||
},
|
},
|
||||||
|
[CPUHP_AP_PERF_ONLINE] = {
|
||||||
|
.name = "perf online",
|
||||||
|
.startup = perf_event_init_cpu,
|
||||||
|
.teardown = perf_event_exit_cpu,
|
||||||
|
},
|
||||||
|
[CPUHP_AP_WORKQUEUE_ONLINE] = {
|
||||||
|
.name = "workqueue online",
|
||||||
|
.startup = workqueue_online_cpu,
|
||||||
|
.teardown = workqueue_offline_cpu,
|
||||||
|
},
|
||||||
|
[CPUHP_AP_RCUTREE_ONLINE] = {
|
||||||
|
.name = "RCU-tree online",
|
||||||
|
.startup = rcutree_online_cpu,
|
||||||
|
.teardown = rcutree_offline_cpu,
|
||||||
|
},
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Online/down_prepare notifiers. Will be removed once the notifiers
|
* Online/down_prepare notifiers. Will be removed once the notifiers
|
||||||
* are converted to states.
|
* are converted to states.
|
||||||
|
@ -10357,7 +10357,7 @@ static void __init perf_event_init_all_cpus(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_event_init_cpu(int cpu)
|
int perf_event_init_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
||||||
|
|
||||||
@ -10370,6 +10370,7 @@ static void perf_event_init_cpu(int cpu)
|
|||||||
rcu_assign_pointer(swhash->swevent_hlist, hlist);
|
rcu_assign_pointer(swhash->swevent_hlist, hlist);
|
||||||
}
|
}
|
||||||
mutex_unlock(&swhash->hlist_mutex);
|
mutex_unlock(&swhash->hlist_mutex);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
|
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
|
||||||
@ -10401,14 +10402,17 @@ static void perf_event_exit_cpu_context(int cpu)
|
|||||||
}
|
}
|
||||||
srcu_read_unlock(&pmus_srcu, idx);
|
srcu_read_unlock(&pmus_srcu, idx);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
|
||||||
static void perf_event_exit_cpu(int cpu)
|
static void perf_event_exit_cpu_context(int cpu) { }
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int perf_event_exit_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
perf_event_exit_cpu_context(cpu);
|
perf_event_exit_cpu_context(cpu);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static inline void perf_event_exit_cpu(int cpu) { }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
|
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
|
||||||
@ -10430,46 +10434,6 @@ static struct notifier_block perf_reboot_notifier = {
|
|||||||
.priority = INT_MIN,
|
.priority = INT_MIN,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int
|
|
||||||
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
unsigned int cpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
/*
|
|
||||||
* This must be done before the CPU comes alive, because the
|
|
||||||
* moment we can run tasks we can encounter (software) events.
|
|
||||||
*
|
|
||||||
* Specifically, someone can have inherited events on kthreadd
|
|
||||||
* or a pre-existing worker thread that gets re-bound.
|
|
||||||
*/
|
|
||||||
perf_event_init_cpu(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
/*
|
|
||||||
* This must be done before the CPU dies because after that an
|
|
||||||
* active event might want to IPI the CPU and that'll not work
|
|
||||||
* so great for dead CPUs.
|
|
||||||
*
|
|
||||||
* XXX smp_call_function_single() return -ENXIO without a warn
|
|
||||||
* so we could possibly deal with this.
|
|
||||||
*
|
|
||||||
* This is safe against new events arriving because
|
|
||||||
* sys_perf_event_open() serializes against hotplug using
|
|
||||||
* get_online_cpus().
|
|
||||||
*/
|
|
||||||
perf_event_exit_cpu(cpu);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init perf_event_init(void)
|
void __init perf_event_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@ -10482,7 +10446,7 @@ void __init perf_event_init(void)
|
|||||||
perf_pmu_register(&perf_cpu_clock, NULL, -1);
|
perf_pmu_register(&perf_cpu_clock, NULL, -1);
|
||||||
perf_pmu_register(&perf_task_clock, NULL, -1);
|
perf_pmu_register(&perf_task_clock, NULL, -1);
|
||||||
perf_tp_register();
|
perf_tp_register();
|
||||||
perf_cpu_notifier(perf_cpu_notify);
|
perf_event_init_cpu(smp_processor_id());
|
||||||
register_reboot_notifier(&perf_reboot_notifier);
|
register_reboot_notifier(&perf_reboot_notifier);
|
||||||
|
|
||||||
ret = init_hw_breakpoint();
|
ret = init_hw_breakpoint();
|
||||||
|
179
kernel/profile.c
179
kernel/profile.c
@ -328,68 +328,57 @@ out:
|
|||||||
put_cpu();
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int profile_cpu_callback(struct notifier_block *info,
|
static int profile_dead_cpu(unsigned int cpu)
|
||||||
unsigned long action, void *__cpu)
|
|
||||||
{
|
{
|
||||||
int node, cpu = (unsigned long)__cpu;
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
int i;
|
||||||
|
|
||||||
switch (action) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
case CPU_UP_PREPARE_FROZEN:
|
|
||||||
node = cpu_to_mem(cpu);
|
|
||||||
per_cpu(cpu_profile_flip, cpu) = 0;
|
|
||||||
if (!per_cpu(cpu_profile_hits, cpu)[1]) {
|
|
||||||
page = __alloc_pages_node(node,
|
|
||||||
GFP_KERNEL | __GFP_ZERO,
|
|
||||||
0);
|
|
||||||
if (!page)
|
|
||||||
return notifier_from_errno(-ENOMEM);
|
|
||||||
per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
|
|
||||||
}
|
|
||||||
if (!per_cpu(cpu_profile_hits, cpu)[0]) {
|
|
||||||
page = __alloc_pages_node(node,
|
|
||||||
GFP_KERNEL | __GFP_ZERO,
|
|
||||||
0);
|
|
||||||
if (!page)
|
|
||||||
goto out_free;
|
|
||||||
per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
out_free:
|
|
||||||
page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
|
|
||||||
per_cpu(cpu_profile_hits, cpu)[1] = NULL;
|
|
||||||
__free_page(page);
|
|
||||||
return notifier_from_errno(-ENOMEM);
|
|
||||||
case CPU_ONLINE:
|
|
||||||
case CPU_ONLINE_FROZEN:
|
|
||||||
if (prof_cpu_mask != NULL)
|
|
||||||
cpumask_set_cpu(cpu, prof_cpu_mask);
|
|
||||||
break;
|
|
||||||
case CPU_UP_CANCELED:
|
|
||||||
case CPU_UP_CANCELED_FROZEN:
|
|
||||||
case CPU_DEAD:
|
|
||||||
case CPU_DEAD_FROZEN:
|
|
||||||
if (prof_cpu_mask != NULL)
|
if (prof_cpu_mask != NULL)
|
||||||
cpumask_clear_cpu(cpu, prof_cpu_mask);
|
cpumask_clear_cpu(cpu, prof_cpu_mask);
|
||||||
if (per_cpu(cpu_profile_hits, cpu)[0]) {
|
|
||||||
page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
|
for (i = 0; i < 2; i++) {
|
||||||
per_cpu(cpu_profile_hits, cpu)[0] = NULL;
|
if (per_cpu(cpu_profile_hits, cpu)[i]) {
|
||||||
|
page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
|
||||||
|
per_cpu(cpu_profile_hits, cpu)[i] = NULL;
|
||||||
__free_page(page);
|
__free_page(page);
|
||||||
}
|
}
|
||||||
if (per_cpu(cpu_profile_hits, cpu)[1]) {
|
|
||||||
page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
|
|
||||||
per_cpu(cpu_profile_hits, cpu)[1] = NULL;
|
|
||||||
__free_page(page);
|
|
||||||
}
|
}
|
||||||
break;
|
return 0;
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int profile_prepare_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
int i, node = cpu_to_mem(cpu);
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
per_cpu(cpu_profile_flip, cpu) = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < 2; i++) {
|
||||||
|
if (per_cpu(cpu_profile_hits, cpu)[i])
|
||||||
|
continue;
|
||||||
|
|
||||||
|
page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
|
||||||
|
if (!page) {
|
||||||
|
profile_dead_cpu(cpu);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
|
||||||
|
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int profile_online_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
if (prof_cpu_mask != NULL)
|
||||||
|
cpumask_set_cpu(cpu, prof_cpu_mask);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_SMP */
|
#else /* !CONFIG_SMP */
|
||||||
#define profile_flip_buffers() do { } while (0)
|
#define profile_flip_buffers() do { } while (0)
|
||||||
#define profile_discard_flip_buffers() do { } while (0)
|
#define profile_discard_flip_buffers() do { } while (0)
|
||||||
#define profile_cpu_callback NULL
|
|
||||||
|
|
||||||
static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
|
static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
|
||||||
{
|
{
|
||||||
@ -531,83 +520,43 @@ static const struct file_operations proc_profile_operations = {
|
|||||||
.llseek = default_llseek,
|
.llseek = default_llseek,
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
int __ref create_proc_profile(void)
|
||||||
static void profile_nop(void *unused)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static int create_hash_tables(void)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
int node = cpu_to_mem(cpu);
|
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
page = __alloc_pages_node(node,
|
|
||||||
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
|
|
||||||
0);
|
|
||||||
if (!page)
|
|
||||||
goto out_cleanup;
|
|
||||||
per_cpu(cpu_profile_hits, cpu)[1]
|
|
||||||
= (struct profile_hit *)page_address(page);
|
|
||||||
page = __alloc_pages_node(node,
|
|
||||||
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
|
|
||||||
0);
|
|
||||||
if (!page)
|
|
||||||
goto out_cleanup;
|
|
||||||
per_cpu(cpu_profile_hits, cpu)[0]
|
|
||||||
= (struct profile_hit *)page_address(page);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
out_cleanup:
|
|
||||||
prof_on = 0;
|
|
||||||
smp_mb();
|
|
||||||
on_each_cpu(profile_nop, NULL, 1);
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
if (per_cpu(cpu_profile_hits, cpu)[0]) {
|
|
||||||
page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
|
|
||||||
per_cpu(cpu_profile_hits, cpu)[0] = NULL;
|
|
||||||
__free_page(page);
|
|
||||||
}
|
|
||||||
if (per_cpu(cpu_profile_hits, cpu)[1]) {
|
|
||||||
page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
|
|
||||||
per_cpu(cpu_profile_hits, cpu)[1] = NULL;
|
|
||||||
__free_page(page);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define create_hash_tables() ({ 0; })
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
|
|
||||||
{
|
{
|
||||||
struct proc_dir_entry *entry;
|
struct proc_dir_entry *entry;
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
enum cpuhp_state online_state;
|
||||||
|
#endif
|
||||||
|
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (!prof_on)
|
if (!prof_on)
|
||||||
return 0;
|
return 0;
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE",
|
||||||
|
profile_prepare_cpu, profile_dead_cpu);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
cpu_notifier_register_begin();
|
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE",
|
||||||
|
profile_online_cpu, NULL);
|
||||||
if (create_hash_tables()) {
|
if (err < 0)
|
||||||
err = -ENOMEM;
|
goto err_state_prep;
|
||||||
goto out;
|
online_state = err;
|
||||||
}
|
err = 0;
|
||||||
|
#endif
|
||||||
entry = proc_create("profile", S_IWUSR | S_IRUGO,
|
entry = proc_create("profile", S_IWUSR | S_IRUGO,
|
||||||
NULL, &proc_profile_operations);
|
NULL, &proc_profile_operations);
|
||||||
if (!entry)
|
if (!entry)
|
||||||
goto out;
|
goto err_state_onl;
|
||||||
proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
|
proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
|
||||||
__hotcpu_notifier(profile_cpu_callback, 0);
|
|
||||||
|
|
||||||
out:
|
return err;
|
||||||
cpu_notifier_register_done();
|
err_state_onl:
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
cpuhp_remove_state(online_state);
|
||||||
|
err_state_prep:
|
||||||
|
cpuhp_remove_state(CPUHP_PROFILE_PREPARE);
|
||||||
|
#endif
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
subsys_initcall(create_proc_profile);
|
subsys_initcall(create_proc_profile);
|
||||||
|
@ -1073,11 +1073,11 @@ EXPORT_SYMBOL_GPL(rcu_is_watching);
|
|||||||
* offline to continue to use RCU for one jiffy after marking itself
|
* offline to continue to use RCU for one jiffy after marking itself
|
||||||
* offline in the cpu_online_mask. This leniency is necessary given the
|
* offline in the cpu_online_mask. This leniency is necessary given the
|
||||||
* non-atomic nature of the online and offline processing, for example,
|
* non-atomic nature of the online and offline processing, for example,
|
||||||
* the fact that a CPU enters the scheduler after completing the CPU_DYING
|
* the fact that a CPU enters the scheduler after completing the teardown
|
||||||
* notifiers.
|
* of the CPU.
|
||||||
*
|
*
|
||||||
* This is also why RCU internally marks CPUs online during the
|
* This is also why RCU internally marks CPUs online during in the
|
||||||
* CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
|
* preparation phase and offline after the CPU has been taken down.
|
||||||
*
|
*
|
||||||
* Disable checking if in an NMI handler because we cannot safely report
|
* Disable checking if in an NMI handler because we cannot safely report
|
||||||
* errors from NMI handlers anyway.
|
* errors from NMI handlers anyway.
|
||||||
@ -3806,12 +3806,58 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rcu_prepare_cpu(int cpu)
|
int rcutree_prepare_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct rcu_state *rsp;
|
struct rcu_state *rsp;
|
||||||
|
|
||||||
for_each_rcu_flavor(rsp)
|
for_each_rcu_flavor(rsp)
|
||||||
rcu_init_percpu_data(cpu, rsp);
|
rcu_init_percpu_data(cpu, rsp);
|
||||||
|
|
||||||
|
rcu_prepare_kthreads(cpu);
|
||||||
|
rcu_spawn_all_nocb_kthreads(cpu);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
|
||||||
|
{
|
||||||
|
struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
|
||||||
|
|
||||||
|
rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
|
||||||
|
}
|
||||||
|
|
||||||
|
int rcutree_online_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
sync_sched_exp_online_cleanup(cpu);
|
||||||
|
rcutree_affinity_setting(cpu, -1);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int rcutree_offline_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
rcutree_affinity_setting(cpu, cpu);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int rcutree_dying_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
struct rcu_state *rsp;
|
||||||
|
|
||||||
|
for_each_rcu_flavor(rsp)
|
||||||
|
rcu_cleanup_dying_cpu(rsp);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int rcutree_dead_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
struct rcu_state *rsp;
|
||||||
|
|
||||||
|
for_each_rcu_flavor(rsp) {
|
||||||
|
rcu_cleanup_dead_cpu(cpu, rsp);
|
||||||
|
do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
@ -3851,52 +3897,6 @@ void rcu_report_dead(unsigned int cpu)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Handle CPU online/offline notification events.
|
|
||||||
*/
|
|
||||||
int rcu_cpu_notify(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
long cpu = (long)hcpu;
|
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
|
|
||||||
struct rcu_node *rnp = rdp->mynode;
|
|
||||||
struct rcu_state *rsp;
|
|
||||||
|
|
||||||
switch (action) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
case CPU_UP_PREPARE_FROZEN:
|
|
||||||
rcu_prepare_cpu(cpu);
|
|
||||||
rcu_prepare_kthreads(cpu);
|
|
||||||
rcu_spawn_all_nocb_kthreads(cpu);
|
|
||||||
break;
|
|
||||||
case CPU_ONLINE:
|
|
||||||
case CPU_DOWN_FAILED:
|
|
||||||
sync_sched_exp_online_cleanup(cpu);
|
|
||||||
rcu_boost_kthread_setaffinity(rnp, -1);
|
|
||||||
break;
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
rcu_boost_kthread_setaffinity(rnp, cpu);
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
case CPU_DYING_FROZEN:
|
|
||||||
for_each_rcu_flavor(rsp)
|
|
||||||
rcu_cleanup_dying_cpu(rsp);
|
|
||||||
break;
|
|
||||||
case CPU_DEAD:
|
|
||||||
case CPU_DEAD_FROZEN:
|
|
||||||
case CPU_UP_CANCELED:
|
|
||||||
case CPU_UP_CANCELED_FROZEN:
|
|
||||||
for_each_rcu_flavor(rsp) {
|
|
||||||
rcu_cleanup_dead_cpu(cpu, rsp);
|
|
||||||
do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int rcu_pm_notify(struct notifier_block *self,
|
static int rcu_pm_notify(struct notifier_block *self,
|
||||||
unsigned long action, void *hcpu)
|
unsigned long action, void *hcpu)
|
||||||
{
|
{
|
||||||
@ -4208,10 +4208,9 @@ void __init rcu_init(void)
|
|||||||
* this is called early in boot, before either interrupts
|
* this is called early in boot, before either interrupts
|
||||||
* or the scheduler are operational.
|
* or the scheduler are operational.
|
||||||
*/
|
*/
|
||||||
cpu_notifier(rcu_cpu_notify, 0);
|
|
||||||
pm_notifier(rcu_pm_notify, 0);
|
pm_notifier(rcu_pm_notify, 0);
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
|
rcutree_prepare_cpu(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
#include "tree_exp.h"
|
#include "tree_exp.h"
|
||||||
|
45
kernel/smp.c
45
kernel/smp.c
@ -33,38 +33,33 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
|
|||||||
|
|
||||||
static void flush_smp_call_function_queue(bool warn_cpu_offline);
|
static void flush_smp_call_function_queue(bool warn_cpu_offline);
|
||||||
|
|
||||||
static int
|
int smpcfd_prepare_cpu(unsigned int cpu)
|
||||||
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
||||||
{
|
{
|
||||||
long cpu = (long)hcpu;
|
|
||||||
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
|
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
|
||||||
|
|
||||||
switch (action) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
case CPU_UP_PREPARE_FROZEN:
|
|
||||||
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
|
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
|
||||||
cpu_to_node(cpu)))
|
cpu_to_node(cpu)))
|
||||||
return notifier_from_errno(-ENOMEM);
|
return -ENOMEM;
|
||||||
cfd->csd = alloc_percpu(struct call_single_data);
|
cfd->csd = alloc_percpu(struct call_single_data);
|
||||||
if (!cfd->csd) {
|
if (!cfd->csd) {
|
||||||
free_cpumask_var(cfd->cpumask);
|
free_cpumask_var(cfd->cpumask);
|
||||||
return notifier_from_errno(-ENOMEM);
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
return 0;
|
||||||
case CPU_UP_CANCELED:
|
}
|
||||||
case CPU_UP_CANCELED_FROZEN:
|
|
||||||
/* Fall-through to the CPU_DEAD[_FROZEN] case. */
|
int smpcfd_dead_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
|
||||||
|
|
||||||
case CPU_DEAD:
|
|
||||||
case CPU_DEAD_FROZEN:
|
|
||||||
free_cpumask_var(cfd->cpumask);
|
free_cpumask_var(cfd->cpumask);
|
||||||
free_percpu(cfd->csd);
|
free_percpu(cfd->csd);
|
||||||
break;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
case CPU_DYING:
|
int smpcfd_dying_cpu(unsigned int cpu)
|
||||||
case CPU_DYING_FROZEN:
|
{
|
||||||
/*
|
/*
|
||||||
* The IPIs for the smp-call-function callbacks queued by other
|
* The IPIs for the smp-call-function callbacks queued by other
|
||||||
* CPUs might arrive late, either due to hardware latencies or
|
* CPUs might arrive late, either due to hardware latencies or
|
||||||
@ -75,27 +70,17 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|||||||
* still pending.
|
* still pending.
|
||||||
*/
|
*/
|
||||||
flush_smp_call_function_queue(false);
|
flush_smp_call_function_queue(false);
|
||||||
break;
|
return 0;
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block hotplug_cfd_notifier = {
|
|
||||||
.notifier_call = hotplug_cfd,
|
|
||||||
};
|
|
||||||
|
|
||||||
void __init call_function_init(void)
|
void __init call_function_init(void)
|
||||||
{
|
{
|
||||||
void *cpu = (void *)(long)smp_processor_id();
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_possible_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
init_llist_head(&per_cpu(call_single_queue, i));
|
init_llist_head(&per_cpu(call_single_queue, i));
|
||||||
|
|
||||||
hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
|
smpcfd_prepare_cpu(smp_processor_id());
|
||||||
register_cpu_notifier(&hotplug_cfd_notifier);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1590,7 +1590,7 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
|
|||||||
/*
|
/*
|
||||||
* Functions related to boot-time initialization:
|
* Functions related to boot-time initialization:
|
||||||
*/
|
*/
|
||||||
static void init_hrtimers_cpu(int cpu)
|
int hrtimers_prepare_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
|
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
|
||||||
int i;
|
int i;
|
||||||
@ -1602,6 +1602,7 @@ static void init_hrtimers_cpu(int cpu)
|
|||||||
|
|
||||||
cpu_base->cpu = cpu;
|
cpu_base->cpu = cpu;
|
||||||
hrtimer_init_hres(cpu_base);
|
hrtimer_init_hres(cpu_base);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
@ -1636,7 +1637,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void migrate_hrtimers(int scpu)
|
int hrtimers_dead_cpu(unsigned int scpu)
|
||||||
{
|
{
|
||||||
struct hrtimer_cpu_base *old_base, *new_base;
|
struct hrtimer_cpu_base *old_base, *new_base;
|
||||||
int i;
|
int i;
|
||||||
@ -1665,45 +1666,14 @@ static void migrate_hrtimers(int scpu)
|
|||||||
/* Check, if we got expired work to do */
|
/* Check, if we got expired work to do */
|
||||||
__hrtimer_peek_ahead_timers();
|
__hrtimer_peek_ahead_timers();
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_HOTPLUG_CPU */
|
#endif /* CONFIG_HOTPLUG_CPU */
|
||||||
|
|
||||||
static int hrtimer_cpu_notify(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
int scpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action) {
|
|
||||||
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
case CPU_UP_PREPARE_FROZEN:
|
|
||||||
init_hrtimers_cpu(scpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
|
||||||
case CPU_DEAD:
|
|
||||||
case CPU_DEAD_FROZEN:
|
|
||||||
migrate_hrtimers(scpu);
|
|
||||||
break;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block hrtimers_nb = {
|
|
||||||
.notifier_call = hrtimer_cpu_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
void __init hrtimers_init(void)
|
void __init hrtimers_init(void)
|
||||||
{
|
{
|
||||||
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
|
hrtimers_prepare_cpu(smp_processor_id());
|
||||||
(void *)(long)smp_processor_id());
|
|
||||||
register_cpu_notifier(&hrtimers_nb);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1804,7 +1804,7 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void migrate_timers(int cpu)
|
int timers_dead_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct timer_base *old_base;
|
struct timer_base *old_base;
|
||||||
struct timer_base *new_base;
|
struct timer_base *new_base;
|
||||||
@ -1831,29 +1831,9 @@ static void migrate_timers(int cpu)
|
|||||||
spin_unlock_irq(&new_base->lock);
|
spin_unlock_irq(&new_base->lock);
|
||||||
put_cpu_ptr(&timer_bases);
|
put_cpu_ptr(&timer_bases);
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int timer_cpu_notify(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
switch (action) {
|
|
||||||
case CPU_DEAD:
|
|
||||||
case CPU_DEAD_FROZEN:
|
|
||||||
migrate_timers((long)hcpu);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void timer_register_cpu_notifier(void)
|
|
||||||
{
|
|
||||||
cpu_notifier(timer_cpu_notify, 0);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static inline void timer_register_cpu_notifier(void) { }
|
|
||||||
#endif /* CONFIG_HOTPLUG_CPU */
|
#endif /* CONFIG_HOTPLUG_CPU */
|
||||||
|
|
||||||
static void __init init_timer_cpu(int cpu)
|
static void __init init_timer_cpu(int cpu)
|
||||||
@ -1881,7 +1861,6 @@ void __init init_timers(void)
|
|||||||
{
|
{
|
||||||
init_timer_cpus();
|
init_timer_cpus();
|
||||||
init_timer_stats();
|
init_timer_stats();
|
||||||
timer_register_cpu_notifier();
|
|
||||||
open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
|
open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4607,31 +4607,25 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
|
|||||||
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
|
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
int workqueue_prepare_cpu(unsigned int cpu)
|
||||||
* Workqueues should be brought up before normal priority CPU notifiers.
|
|
||||||
* This will be registered high priority CPU notifier.
|
|
||||||
*/
|
|
||||||
static int workqueue_cpu_up_callback(struct notifier_block *nfb,
|
|
||||||
unsigned long action,
|
|
||||||
void *hcpu)
|
|
||||||
{
|
{
|
||||||
int cpu = (unsigned long)hcpu;
|
|
||||||
struct worker_pool *pool;
|
struct worker_pool *pool;
|
||||||
struct workqueue_struct *wq;
|
|
||||||
int pi;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
for_each_cpu_worker_pool(pool, cpu) {
|
for_each_cpu_worker_pool(pool, cpu) {
|
||||||
if (pool->nr_workers)
|
if (pool->nr_workers)
|
||||||
continue;
|
continue;
|
||||||
if (!create_worker(pool))
|
if (!create_worker(pool))
|
||||||
return NOTIFY_BAD;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
break;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int workqueue_online_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
struct worker_pool *pool;
|
||||||
|
struct workqueue_struct *wq;
|
||||||
|
int pi;
|
||||||
|
|
||||||
case CPU_DOWN_FAILED:
|
|
||||||
case CPU_ONLINE:
|
|
||||||
mutex_lock(&wq_pool_mutex);
|
mutex_lock(&wq_pool_mutex);
|
||||||
|
|
||||||
for_each_pool(pool, pi) {
|
for_each_pool(pool, pi) {
|
||||||
@ -4650,25 +4644,14 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb,
|
|||||||
wq_update_unbound_numa(wq, cpu, true);
|
wq_update_unbound_numa(wq, cpu, true);
|
||||||
|
|
||||||
mutex_unlock(&wq_pool_mutex);
|
mutex_unlock(&wq_pool_mutex);
|
||||||
break;
|
return 0;
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
int workqueue_offline_cpu(unsigned int cpu)
|
||||||
* Workqueues should be brought down after normal priority CPU notifiers.
|
|
||||||
* This will be registered as low priority CPU notifier.
|
|
||||||
*/
|
|
||||||
static int workqueue_cpu_down_callback(struct notifier_block *nfb,
|
|
||||||
unsigned long action,
|
|
||||||
void *hcpu)
|
|
||||||
{
|
{
|
||||||
int cpu = (unsigned long)hcpu;
|
|
||||||
struct work_struct unbind_work;
|
struct work_struct unbind_work;
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
/* unbinding per-cpu workers should happen on the local CPU */
|
/* unbinding per-cpu workers should happen on the local CPU */
|
||||||
INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
|
INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
|
||||||
queue_work_on(cpu, system_highpri_wq, &unbind_work);
|
queue_work_on(cpu, system_highpri_wq, &unbind_work);
|
||||||
@ -4682,9 +4665,7 @@ static int workqueue_cpu_down_callback(struct notifier_block *nfb,
|
|||||||
/* wait for per-cpu unbinding to finish */
|
/* wait for per-cpu unbinding to finish */
|
||||||
flush_work(&unbind_work);
|
flush_work(&unbind_work);
|
||||||
destroy_work_on_stack(&unbind_work);
|
destroy_work_on_stack(&unbind_work);
|
||||||
break;
|
return 0;
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
@ -5486,9 +5467,6 @@ static int __init init_workqueues(void)
|
|||||||
|
|
||||||
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
|
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
|
||||||
|
|
||||||
cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
|
|
||||||
hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
|
|
||||||
|
|
||||||
wq_numa_init();
|
wq_numa_init();
|
||||||
|
|
||||||
/* initialize CPU pools */
|
/* initialize CPU pools */
|
||||||
|
@ -405,26 +405,17 @@ u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
|
|||||||
return (u64)-1;
|
return (u64)-1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_timer_cpu_notify(struct notifier_block *self,
|
static int kvm_timer_starting_cpu(unsigned int cpu)
|
||||||
unsigned long action, void *cpu)
|
|
||||||
{
|
{
|
||||||
switch (action) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
case CPU_STARTING_FROZEN:
|
|
||||||
kvm_timer_init_interrupt(NULL);
|
kvm_timer_init_interrupt(NULL);
|
||||||
break;
|
return 0;
|
||||||
case CPU_DYING:
|
|
||||||
case CPU_DYING_FROZEN:
|
|
||||||
disable_percpu_irq(host_vtimer_irq);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block kvm_timer_cpu_nb = {
|
static int kvm_timer_dying_cpu(unsigned int cpu)
|
||||||
.notifier_call = kvm_timer_cpu_notify,
|
{
|
||||||
};
|
disable_percpu_irq(host_vtimer_irq);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int kvm_timer_hyp_init(void)
|
int kvm_timer_hyp_init(void)
|
||||||
{
|
{
|
||||||
@ -449,12 +440,6 @@ int kvm_timer_hyp_init(void)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = __register_cpu_notifier(&kvm_timer_cpu_nb);
|
|
||||||
if (err) {
|
|
||||||
kvm_err("Cannot register timer CPU notifier\n");
|
|
||||||
goto out_free;
|
|
||||||
}
|
|
||||||
|
|
||||||
wqueue = create_singlethread_workqueue("kvm_arch_timer");
|
wqueue = create_singlethread_workqueue("kvm_arch_timer");
|
||||||
if (!wqueue) {
|
if (!wqueue) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
@ -462,8 +447,10 @@ int kvm_timer_hyp_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
|
kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
|
||||||
on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
|
|
||||||
|
|
||||||
|
cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
|
||||||
|
"AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu,
|
||||||
|
kvm_timer_dying_cpu);
|
||||||
goto out;
|
goto out;
|
||||||
out_free:
|
out_free:
|
||||||
free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
|
free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
|
||||||
|
@ -2326,32 +2326,18 @@ int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset)
|
|||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vgic_init_maintenance_interrupt(void *info)
|
static int vgic_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
enable_percpu_irq(vgic->maint_irq, 0);
|
enable_percpu_irq(vgic->maint_irq, 0);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vgic_cpu_notify(struct notifier_block *self,
|
static int vgic_dying_cpu(unsigned int cpu)
|
||||||
unsigned long action, void *cpu)
|
|
||||||
{
|
{
|
||||||
switch (action) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
case CPU_STARTING_FROZEN:
|
|
||||||
vgic_init_maintenance_interrupt(NULL);
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
case CPU_DYING_FROZEN:
|
|
||||||
disable_percpu_irq(vgic->maint_irq);
|
disable_percpu_irq(vgic->maint_irq);
|
||||||
break;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block vgic_cpu_nb = {
|
|
||||||
.notifier_call = vgic_cpu_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int kvm_vgic_probe(void)
|
static int kvm_vgic_probe(void)
|
||||||
{
|
{
|
||||||
const struct gic_kvm_info *gic_kvm_info;
|
const struct gic_kvm_info *gic_kvm_info;
|
||||||
@ -2392,19 +2378,10 @@ int kvm_vgic_hyp_init(void)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = __register_cpu_notifier(&vgic_cpu_nb);
|
cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_STARTING,
|
||||||
if (ret) {
|
"AP_KVM_ARM_VGIC_STARTING", vgic_starting_cpu,
|
||||||
kvm_err("Cannot register vgic CPU notifier\n");
|
vgic_dying_cpu);
|
||||||
goto out_free_irq;
|
|
||||||
}
|
|
||||||
|
|
||||||
on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_irq:
|
|
||||||
free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_irq_map_gsi(struct kvm *kvm,
|
int kvm_irq_map_gsi(struct kvm *kvm,
|
||||||
|
@ -353,32 +353,19 @@ out:
|
|||||||
|
|
||||||
/* GENERIC PROBE */
|
/* GENERIC PROBE */
|
||||||
|
|
||||||
static void vgic_init_maintenance_interrupt(void *info)
|
static int vgic_init_cpu_starting(unsigned int cpu)
|
||||||
{
|
{
|
||||||
enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
|
enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vgic_cpu_notify(struct notifier_block *self,
|
|
||||||
unsigned long action, void *cpu)
|
static int vgic_init_cpu_dying(unsigned int cpu)
|
||||||
{
|
{
|
||||||
switch (action) {
|
|
||||||
case CPU_STARTING:
|
|
||||||
case CPU_STARTING_FROZEN:
|
|
||||||
vgic_init_maintenance_interrupt(NULL);
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
case CPU_DYING_FROZEN:
|
|
||||||
disable_percpu_irq(kvm_vgic_global_state.maint_irq);
|
disable_percpu_irq(kvm_vgic_global_state.maint_irq);
|
||||||
break;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block vgic_cpu_nb = {
|
|
||||||
.notifier_call = vgic_cpu_notify,
|
|
||||||
};
|
|
||||||
|
|
||||||
static irqreturn_t vgic_maintenance_handler(int irq, void *data)
|
static irqreturn_t vgic_maintenance_handler(int irq, void *data)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -434,14 +421,14 @@ int kvm_vgic_hyp_init(void)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = __register_cpu_notifier(&vgic_cpu_nb);
|
ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
|
||||||
|
"AP_KVM_ARM_VGIC_INIT_STARTING",
|
||||||
|
vgic_init_cpu_starting, vgic_init_cpu_dying);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kvm_err("Cannot register vgic CPU notifier\n");
|
kvm_err("Cannot register vgic CPU notifier\n");
|
||||||
goto out_free_irq;
|
goto out_free_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
|
|
||||||
|
|
||||||
kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
|
kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -3155,12 +3155,13 @@ static void hardware_enable_nolock(void *junk)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hardware_enable(void)
|
static int kvm_starting_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
raw_spin_lock(&kvm_count_lock);
|
raw_spin_lock(&kvm_count_lock);
|
||||||
if (kvm_usage_count)
|
if (kvm_usage_count)
|
||||||
hardware_enable_nolock(NULL);
|
hardware_enable_nolock(NULL);
|
||||||
raw_spin_unlock(&kvm_count_lock);
|
raw_spin_unlock(&kvm_count_lock);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hardware_disable_nolock(void *junk)
|
static void hardware_disable_nolock(void *junk)
|
||||||
@ -3173,12 +3174,13 @@ static void hardware_disable_nolock(void *junk)
|
|||||||
kvm_arch_hardware_disable();
|
kvm_arch_hardware_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hardware_disable(void)
|
static int kvm_dying_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
raw_spin_lock(&kvm_count_lock);
|
raw_spin_lock(&kvm_count_lock);
|
||||||
if (kvm_usage_count)
|
if (kvm_usage_count)
|
||||||
hardware_disable_nolock(NULL);
|
hardware_disable_nolock(NULL);
|
||||||
raw_spin_unlock(&kvm_count_lock);
|
raw_spin_unlock(&kvm_count_lock);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hardware_disable_all_nolock(void)
|
static void hardware_disable_all_nolock(void)
|
||||||
@ -3219,21 +3221,6 @@ static int hardware_enable_all(void)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
|
|
||||||
void *v)
|
|
||||||
{
|
|
||||||
val &= ~CPU_TASKS_FROZEN;
|
|
||||||
switch (val) {
|
|
||||||
case CPU_DYING:
|
|
||||||
hardware_disable();
|
|
||||||
break;
|
|
||||||
case CPU_STARTING:
|
|
||||||
hardware_enable();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
|
static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
|
||||||
void *v)
|
void *v)
|
||||||
{
|
{
|
||||||
@ -3500,10 +3487,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block kvm_cpu_notifier = {
|
|
||||||
.notifier_call = kvm_cpu_hotplug,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int kvm_debugfs_open(struct inode *inode, struct file *file,
|
static int kvm_debugfs_open(struct inode *inode, struct file *file,
|
||||||
int (*get)(void *, u64 *), int (*set)(void *, u64),
|
int (*get)(void *, u64 *), int (*set)(void *, u64),
|
||||||
const char *fmt)
|
const char *fmt)
|
||||||
@ -3754,7 +3737,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
|
|||||||
goto out_free_1;
|
goto out_free_1;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = register_cpu_notifier(&kvm_cpu_notifier);
|
r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "AP_KVM_STARTING",
|
||||||
|
kvm_starting_cpu, kvm_dying_cpu);
|
||||||
if (r)
|
if (r)
|
||||||
goto out_free_2;
|
goto out_free_2;
|
||||||
register_reboot_notifier(&kvm_reboot_notifier);
|
register_reboot_notifier(&kvm_reboot_notifier);
|
||||||
@ -3808,7 +3792,7 @@ out_free:
|
|||||||
kmem_cache_destroy(kvm_vcpu_cache);
|
kmem_cache_destroy(kvm_vcpu_cache);
|
||||||
out_free_3:
|
out_free_3:
|
||||||
unregister_reboot_notifier(&kvm_reboot_notifier);
|
unregister_reboot_notifier(&kvm_reboot_notifier);
|
||||||
unregister_cpu_notifier(&kvm_cpu_notifier);
|
cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
|
||||||
out_free_2:
|
out_free_2:
|
||||||
out_free_1:
|
out_free_1:
|
||||||
kvm_arch_hardware_unsetup();
|
kvm_arch_hardware_unsetup();
|
||||||
@ -3831,7 +3815,7 @@ void kvm_exit(void)
|
|||||||
kvm_async_pf_deinit();
|
kvm_async_pf_deinit();
|
||||||
unregister_syscore_ops(&kvm_syscore_ops);
|
unregister_syscore_ops(&kvm_syscore_ops);
|
||||||
unregister_reboot_notifier(&kvm_reboot_notifier);
|
unregister_reboot_notifier(&kvm_reboot_notifier);
|
||||||
unregister_cpu_notifier(&kvm_cpu_notifier);
|
cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
|
||||||
on_each_cpu(hardware_disable_nolock, NULL, 1);
|
on_each_cpu(hardware_disable_nolock, NULL, 1);
|
||||||
kvm_arch_hardware_unsetup();
|
kvm_arch_hardware_unsetup();
|
||||||
kvm_arch_exit();
|
kvm_arch_exit();
|
||||||
|
Loading…
Reference in New Issue
Block a user