linux/kernel/cpu.c

224 lines
5.1 KiB
C
Raw Normal View History

/* CPU control.
* (C) 2001, 2002, 2003, 2004 Rusty Russell
*
* This code is licenced under the GPL.
*/
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/stop_machine.h>
#include <asm/semaphore.h>
/* This protects CPUs going up and down... */
DECLARE_MUTEX(cpucontrol);
[PATCH] create and destroy cpufreq sysfs entries based on cpu notifiers cpufreq entries in sysfs should only be populated when CPU is online state. When we either boot with maxcpus=x and then boot the other cpus by echoing to sysfs online file, these entries should be created and destroyed when CPU_DEAD is notified. Same treatement as cache entries under sysfs. We place the processor in the lowest frequency, so hw managed P-State transitions can still work on the other threads to save power. Primary goal was to just make these directories appear/disapper dynamically. There is one in this patch i had to do, which i really dont like myself but probably best if someone handling the cpufreq infrastructure could give this code right treatment if this is not acceptable. I guess its probably good for the first cut. - Converting lock_cpu_hotplug()/unlock_cpu_hotplug() to disable/enable preempt. The locking was smack in the middle of the notification path, when the hotplug is already holding the lock. I tried another solution to avoid this so avoid taking locks if we know we are from notification path. The solution was getting very ugly and i decided this was probably good for this iteration until someone who understands cpufreq could do a better job than me. (akpm: export cpucontrol to GPL modules: drivers/cpufreq/cpufreq_stats.c now does lock_cpu_hotplug()) Signed-off-by: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Cc: Dave Jones <davej@codemonkey.org.uk> Cc: Zwane Mwaikambo <zwane@holomorphy.com> Cc: Greg KH <greg@kroah.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-31 01:59:54 +03:00
EXPORT_SYMBOL_GPL(cpucontrol);
static struct notifier_block *cpu_chain;
/*
* Used to check by callers if they need to acquire the cpucontrol
* or not to protect a cpu from being removed. Its sometimes required to
* call these functions both for normal operations, and in response to
* a cpu being added/removed. If the context of the call is in the same
* thread context as a CPU hotplug thread, we dont need to take the lock
* since its already protected
* check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
*/
int current_in_cpu_hotplug(void)
{
return (current->flags & PF_HOTPLUG_CPU);
}
EXPORT_SYMBOL_GPL(current_in_cpu_hotplug);
/* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb)
{
int ret;
if ((ret = down_interruptible(&cpucontrol)) != 0)
return ret;
ret = notifier_chain_register(&cpu_chain, nb);
up(&cpucontrol);
return ret;
}
EXPORT_SYMBOL(register_cpu_notifier);
void unregister_cpu_notifier(struct notifier_block *nb)
{
down(&cpucontrol);
notifier_chain_unregister(&cpu_chain, nb);
up(&cpucontrol);
}
EXPORT_SYMBOL(unregister_cpu_notifier);
#ifdef CONFIG_HOTPLUG_CPU
static inline void check_for_tasks(int cpu)
{
struct task_struct *p;
write_lock_irq(&tasklist_lock);
for_each_process(p) {
if (task_cpu(p) == cpu &&
(!cputime_eq(p->utime, cputime_zero) ||
!cputime_eq(p->stime, cputime_zero)))
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
(state = %ld, flags = %lx) \n",
p->comm, p->pid, cpu, p->state, p->flags);
}
write_unlock_irq(&tasklist_lock);
}
/* Take this CPU down. */
static int take_cpu_down(void *unused)
{
int err;
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable();
if (err < 0)
[PATCH] i386 CPU hotplug (The i386 CPU hotplug patch provides infrastructure for some work which Pavel is doing as well as for ACPI S3 (suspend-to-RAM) work which Li Shaohua <shaohua.li@intel.com> is doing) The following provides i386 architecture support for safely unregistering and registering processors during runtime, updated for the current -mm tree. In order to avoid dumping cpu hotplug code into kernel/irq/* i dropped the cpu_online check in do_IRQ() by modifying fixup_irqs(). The difference being that on cpu offline, fixup_irqs() is called before we clear the cpu from cpu_online_map and a long delay in order to ensure that we never have any queued external interrupts on the APICs. There are additional changes to s390 and ppc64 to account for this change. 1) Add CONFIG_HOTPLUG_CPU 2) disable local APIC timer on dead cpus. 3) Disable preempt around irq balancing to prevent CPUs going down. 4) Print irq stats for all possible cpus. 5) Debugging check for interrupts on offline cpus. 6) Hacky fixup_irqs() to redirect irqs when cpus go off/online. 7) play_dead() for offline cpus to spin inside. 8) Handle offline cpus set in flush_tlb_others(). 9) Grab lock earlier in smp_call_function() to prevent CPUs going down. 10) Implement __cpu_disable() and __cpu_die(). 11) Enable local interrupts in cpu_enable() after fixup_irqs() 12) Don't fiddle with NMI on dead cpu, but leave intact on other cpus. 13) Program IRQ affinity whilst cpu is still in cpu_online_map on offline. Signed-off-by: Zwane Mwaikambo <zwane@linuxpower.ca> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-26 01:54:50 +04:00
return err;
[PATCH] i386 CPU hotplug (The i386 CPU hotplug patch provides infrastructure for some work which Pavel is doing as well as for ACPI S3 (suspend-to-RAM) work which Li Shaohua <shaohua.li@intel.com> is doing) The following provides i386 architecture support for safely unregistering and registering processors during runtime, updated for the current -mm tree. In order to avoid dumping cpu hotplug code into kernel/irq/* i dropped the cpu_online check in do_IRQ() by modifying fixup_irqs(). The difference being that on cpu offline, fixup_irqs() is called before we clear the cpu from cpu_online_map and a long delay in order to ensure that we never have any queued external interrupts on the APICs. There are additional changes to s390 and ppc64 to account for this change. 1) Add CONFIG_HOTPLUG_CPU 2) disable local APIC timer on dead cpus. 3) Disable preempt around irq balancing to prevent CPUs going down. 4) Print irq stats for all possible cpus. 5) Debugging check for interrupts on offline cpus. 6) Hacky fixup_irqs() to redirect irqs when cpus go off/online. 7) play_dead() for offline cpus to spin inside. 8) Handle offline cpus set in flush_tlb_others(). 9) Grab lock earlier in smp_call_function() to prevent CPUs going down. 10) Implement __cpu_disable() and __cpu_die(). 11) Enable local interrupts in cpu_enable() after fixup_irqs() 12) Don't fiddle with NMI on dead cpu, but leave intact on other cpus. 13) Program IRQ affinity whilst cpu is still in cpu_online_map on offline. Signed-off-by: Zwane Mwaikambo <zwane@linuxpower.ca> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-26 01:54:50 +04:00
/* Force idle task to run as soon as we yield: it should
immediately notice cpu is offline and die quickly. */
sched_idle_next();
return 0;
}
int cpu_down(unsigned int cpu)
{
int err;
struct task_struct *p;
cpumask_t old_allowed, tmp;
if ((err = lock_cpu_hotplug_interruptible()) != 0)
return err;
if (num_online_cpus() == 1) {
err = -EBUSY;
goto out;
}
if (!cpu_online(cpu)) {
err = -EINVAL;
goto out;
}
/*
* Leave a trace in current->flags indicating we are already in
* process of performing CPU hotplug. Callers can check if cpucontrol
* is already acquired by current thread, and if so not cause
* a dead lock by not acquiring the lock
*/
current->flags |= PF_HOTPLUG_CPU;
err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
(void *)(long)cpu);
if (err == NOTIFY_BAD) {
printk("%s: attempt to take down CPU %u failed\n",
__FUNCTION__, cpu);
err = -EINVAL;
goto out;
}
/* Ensure that we are not runnable on dying cpu */
old_allowed = current->cpus_allowed;
tmp = CPU_MASK_ALL;
cpu_clear(cpu, tmp);
set_cpus_allowed(current, tmp);
p = __stop_machine_run(take_cpu_down, NULL, cpu);
if (IS_ERR(p)) {
/* CPU didn't die: tell everyone. Can't complain. */
if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
(void *)(long)cpu) == NOTIFY_BAD)
BUG();
err = PTR_ERR(p);
goto out_allowed;
}
if (cpu_online(cpu))
goto out_thread;
/* Wait for it to sleep (leaving idle task). */
while (!idle_cpu(cpu))
yield();
/* This actually kills the CPU. */
__cpu_die(cpu);
/* Move it here so it can run. */
kthread_bind(p, get_cpu());
put_cpu();
/* CPU is completely dead: tell everyone. Too late to complain. */
if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu)
== NOTIFY_BAD)
BUG();
check_for_tasks(cpu);
out_thread:
err = kthread_stop(p);
out_allowed:
set_cpus_allowed(current, old_allowed);
out:
current->flags &= ~PF_HOTPLUG_CPU;
unlock_cpu_hotplug();
return err;
}
#endif /*CONFIG_HOTPLUG_CPU*/
int __devinit cpu_up(unsigned int cpu)
{
int ret;
void *hcpu = (void *)(long)cpu;
if ((ret = down_interruptible(&cpucontrol)) != 0)
return ret;
if (cpu_online(cpu) || !cpu_present(cpu)) {
ret = -EINVAL;
goto out;
}
/*
* Leave a trace in current->flags indicating we are already in
* process of performing CPU hotplug.
*/
current->flags |= PF_HOTPLUG_CPU;
ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
if (ret == NOTIFY_BAD) {
printk("%s: attempt to bring up CPU %u failed\n",
__FUNCTION__, cpu);
ret = -EINVAL;
goto out_notify;
}
/* Arch-specific enabling code. */
ret = __cpu_up(cpu);
if (ret != 0)
goto out_notify;
if (!cpu_online(cpu))
BUG();
/* Now call notifier in preparation. */
notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
out_notify:
if (ret != 0)
notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
out:
current->flags &= ~PF_HOTPLUG_CPU;
up(&cpucontrol);
return ret;
}