From 73a5fa7d51366a549a9f2e3ee875ae51aa0b5580 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 15 Apr 2022 21:19:57 +0200 Subject: [PATCH] x86/aperfmperf: Restructure arch_scale_freq_tick() Preparation for sharing code with the CPU frequency portion of the aperf/mperf code. No functional change. Signed-off-by: Thomas Gleixner Reviewed-by: Rafael J. Wysocki Acked-by: Peter Zijlstra (Intel) Acked-by: Paul E. McKenney Link: https://lore.kernel.org/r/20220415161206.706185092@linutronix.de --- arch/x86/kernel/cpu/aperfmperf.c | 36 +++++++++++++++++++------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 6922c77d98d8..6220503af26a 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -477,22 +477,9 @@ static DECLARE_WORK(disable_freq_invariance_work, DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE; -void arch_scale_freq_tick(void) +static void scale_freq_tick(u64 acnt, u64 mcnt) { - struct aperfmperf *s = this_cpu_ptr(&cpu_samples); - u64 aperf, mperf, acnt, mcnt, freq_scale; - - if (!arch_scale_freq_invariant()) - return; - - rdmsrl(MSR_IA32_APERF, aperf); - rdmsrl(MSR_IA32_MPERF, mperf); - - acnt = aperf - s->aperf; - mcnt = mperf - s->mperf; - - s->aperf = aperf; - s->mperf = mperf; + u64 freq_scale; if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt)) goto error; @@ -514,4 +501,23 @@ error: pr_warn("Scheduler frequency invariance went wobbly, disabling!\n"); schedule_work(&disable_freq_invariance_work); } + +void arch_scale_freq_tick(void) +{ + struct aperfmperf *s = this_cpu_ptr(&cpu_samples); + u64 acnt, mcnt, aperf, mperf; + + if (!arch_scale_freq_invariant()) + return; + + rdmsrl(MSR_IA32_APERF, aperf); + rdmsrl(MSR_IA32_MPERF, mperf); + acnt = aperf - s->aperf; + mcnt = mperf - s->mperf; + + s->aperf = aperf; + s->mperf = mperf; + + scale_freq_tick(acnt, mcnt); +} #endif /* CONFIG_X86_64 && CONFIG_SMP */