sched: Add sched_smt_active()
Add the sched_smt_active() function needed for some x86 speculation mitigations. This was introduced upstream by commits 1b568f0aabf2 "sched/core: Optimize SCHED_SMT", ba2591a5993e "sched/smt: Update sched_smt_present at runtime", c5511d03ec09 "sched/smt: Make sched_smt_present track topology", and 321a874a7ef8 "sched/smt: Expose sched_smt_present static key". The upstream implementation uses the static_key_{disable,enable}_cpuslocked() functions, which aren't practical to backport. Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
f576a78075
commit
1f562beba7
18
include/linux/sched/smt.h
Normal file
18
include/linux/sched/smt.h
Normal file
@ -0,0 +1,18 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_SCHED_SMT_H
|
||||
#define _LINUX_SCHED_SMT_H
|
||||
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
extern atomic_t sched_smt_present;
|
||||
|
||||
static __always_inline bool sched_smt_active(void)
|
||||
{
|
||||
return atomic_read(&sched_smt_present);
|
||||
}
|
||||
#else
|
||||
static inline bool sched_smt_active(void) { return false; }
|
||||
#endif
|
||||
|
||||
#endif
|
@ -5610,6 +5610,10 @@ static void set_cpu_rq_start_time(void)
|
||||
rq->age_stamp = sched_clock_cpu(cpu);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
atomic_t sched_smt_present = ATOMIC_INIT(0);
|
||||
#endif
|
||||
|
||||
static int sched_cpu_active(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
@ -5626,11 +5630,23 @@ static int sched_cpu_active(struct notifier_block *nfb,
|
||||
* set_cpu_online(). But it might not yet have marked itself
|
||||
* as active, which is essential from here on.
|
||||
*/
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
/*
|
||||
* When going up, increment the number of cores with SMT present.
|
||||
*/
|
||||
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
|
||||
atomic_inc(&sched_smt_present);
|
||||
#endif
|
||||
set_cpu_active(cpu, true);
|
||||
stop_machine_unpark(cpu);
|
||||
return NOTIFY_OK;
|
||||
|
||||
case CPU_DOWN_FAILED:
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
/* Same as for CPU_ONLINE */
|
||||
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
|
||||
atomic_inc(&sched_smt_present);
|
||||
#endif
|
||||
set_cpu_active(cpu, true);
|
||||
return NOTIFY_OK;
|
||||
|
||||
@ -5645,7 +5661,15 @@ static int sched_cpu_inactive(struct notifier_block *nfb,
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_DOWN_PREPARE:
|
||||
set_cpu_active((long)hcpu, false);
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
/*
|
||||
* When going down, decrement the number of cores with SMT present.
|
||||
*/
|
||||
if (cpumask_weight(cpu_smt_mask((long)hcpu)) == 2)
|
||||
atomic_dec(&sched_smt_present);
|
||||
#endif
|
||||
return NOTIFY_OK;
|
||||
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/sysctl.h>
|
||||
#include <linux/sched/rt.h>
|
||||
#include <linux/sched/smt.h>
|
||||
#include <linux/sched/deadline.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
Loading…
x
Reference in New Issue
Block a user