smpboot/threads, watchdog/core: Avoid runtime allocation
smpboot_update_cpumask_threads_percpu() allocates a temporary cpumask at runtime. This is suboptimal because the call site needs more code size for proper error handling than a statically allocated temporary mask requires data size. Add static temporary cpumask. The function is globaly serialized, so no further protection required. Remove the half baken error handling in the watchdog code and get rid of the export as there are no in tree modular users of that function. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Don Zickus <dzickus@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Ulrich Obergfell <uobergfe@redhat.com> Link: http://lkml.kernel.org/r/20170912194147.297288838@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
05ba3de74a
commit
0d85923c7a
@ -55,7 +55,7 @@ smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
|
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
|
||||||
int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
|
void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
|
||||||
const struct cpumask *);
|
const struct cpumask *);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -344,39 +344,31 @@ EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
|
|||||||
* by the client, but only by calling this function.
|
* by the client, but only by calling this function.
|
||||||
* This function can only be called on a registered smp_hotplug_thread.
|
* This function can only be called on a registered smp_hotplug_thread.
|
||||||
*/
|
*/
|
||||||
int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
|
void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
|
||||||
const struct cpumask *new)
|
const struct cpumask *new)
|
||||||
{
|
{
|
||||||
struct cpumask *old = plug_thread->cpumask;
|
struct cpumask *old = plug_thread->cpumask;
|
||||||
cpumask_var_t tmp;
|
static struct cpumask tmp;
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
|
||||||
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
mutex_lock(&smpboot_threads_lock);
|
mutex_lock(&smpboot_threads_lock);
|
||||||
|
|
||||||
/* Park threads that were exclusively enabled on the old mask. */
|
/* Park threads that were exclusively enabled on the old mask. */
|
||||||
cpumask_andnot(tmp, old, new);
|
cpumask_andnot(&tmp, old, new);
|
||||||
for_each_cpu_and(cpu, tmp, cpu_online_mask)
|
for_each_cpu_and(cpu, &tmp, cpu_online_mask)
|
||||||
smpboot_park_thread(plug_thread, cpu);
|
smpboot_park_thread(plug_thread, cpu);
|
||||||
|
|
||||||
/* Unpark threads that are exclusively enabled on the new mask. */
|
/* Unpark threads that are exclusively enabled on the new mask. */
|
||||||
cpumask_andnot(tmp, new, old);
|
cpumask_andnot(&tmp, new, old);
|
||||||
for_each_cpu_and(cpu, tmp, cpu_online_mask)
|
for_each_cpu_and(cpu, &tmp, cpu_online_mask)
|
||||||
smpboot_unpark_thread(plug_thread, cpu);
|
smpboot_unpark_thread(plug_thread, cpu);
|
||||||
|
|
||||||
cpumask_copy(old, new);
|
cpumask_copy(old, new);
|
||||||
|
|
||||||
mutex_unlock(&smpboot_threads_lock);
|
mutex_unlock(&smpboot_threads_lock);
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
|
|
||||||
free_cpumask_var(tmp);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread);
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
|
static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
|
||||||
|
|
||||||
|
@ -787,31 +787,20 @@ out:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int watchdog_update_cpus(void)
|
static void watchdog_update_cpus(void)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_SOFTLOCKUP_DETECTOR)) {
|
if (IS_ENABLED(CONFIG_SOFTLOCKUP_DETECTOR) && watchdog_running) {
|
||||||
return smpboot_update_cpumask_percpu_thread(&watchdog_threads,
|
smpboot_update_cpumask_percpu_thread(&watchdog_threads,
|
||||||
&watchdog_cpumask);
|
&watchdog_cpumask);
|
||||||
__lockup_detector_cleanup();
|
__lockup_detector_cleanup();
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void proc_watchdog_cpumask_update(void)
|
static void proc_watchdog_cpumask_update(void)
|
||||||
{
|
{
|
||||||
/* Remove impossible cpus to keep sysctl output clean. */
|
/* Remove impossible cpus to keep sysctl output clean. */
|
||||||
cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
|
cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
|
||||||
|
watchdog_update_cpus();
|
||||||
if (watchdog_running) {
|
|
||||||
/*
|
|
||||||
* Failure would be due to being unable to allocate a
|
|
||||||
* temporary cpumask, so we are likely not in a position to
|
|
||||||
* do much else to make things better.
|
|
||||||
*/
|
|
||||||
if (watchdog_update_cpus() != 0)
|
|
||||||
pr_err("cpumask update failed\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
watchdog_nmi_reconfigure();
|
watchdog_nmi_reconfigure();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user