Merge branch 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cache control updates from Borislav Petkov: - The generalization of the RDT code to accommodate the addition of AMD's very similar implementation of the cache monitoring feature. This entails a subsystem move into a separate and generic arch/x86/kernel/cpu/resctrl/ directory along with adding vendor-specific initialization and feature detection helpers. Ontop of that is the unification of user-visible strings, both in the resctrl filesystem error handling and Kconfig. Provided by Babu Moger and Sherry Hurwitz. - Code simplifications and error handling improvements by Reinette Chatre. * 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/resctrl: Fix rdt_find_domain() return value and checks x86/resctrl: Remove unnecessary check for cbm_validate() x86/resctrl: Use rdt_last_cmd_puts() where possible MAINTAINERS: Update resctrl filename patterns Documentation: Rename and update intel_rdt_ui.txt to resctrl_ui.txt x86/resctrl: Introduce AMD QOS feature x86/resctrl: Fixup the user-visible strings x86/resctrl: Add AMD's X86_FEATURE_MBA to the scattered CPUID features x86/resctrl: Rename the config option INTEL_RDT to RESCTRL x86/resctrl: Add vendor check for the MBA software controller x86/resctrl: Bring cbm_validate() into the resource structure x86/resctrl: Initialize the vendor-specific resource functions x86/resctrl: Move all the macros to resctrl/internal.h x86/resctrl: Re-arrange the RDT init code x86/resctrl: Rename the RDT functions and definitions x86/resctrl: Rename and move rdt files to a separate directory
This commit is contained in:
commit
a52fb43a5f
@ -1,4 +1,7 @@
|
||||
User Interface for Resource Allocation in Intel Resource Director Technology
|
||||
User Interface for Resource Control feature
|
||||
|
||||
Intel refers to this feature as Intel Resource Director Technology(Intel(R) RDT).
|
||||
AMD refers to this feature as AMD Platform Quality of Service(AMD QoS).
|
||||
|
||||
Copyright (C) 2016 Intel Corporation
|
||||
|
||||
@ -6,8 +9,8 @@ Fenghua Yu <fenghua.yu@intel.com>
|
||||
Tony Luck <tony.luck@intel.com>
|
||||
Vikas Shivappa <vikas.shivappa@intel.com>
|
||||
|
||||
This feature is enabled by the CONFIG_INTEL_RDT Kconfig and the
|
||||
X86 /proc/cpuinfo flag bits:
|
||||
This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo
|
||||
flag bits:
|
||||
RDT (Resource Director Technology) Allocation - "rdt_a"
|
||||
CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
|
||||
CDP (Code and Data Prioritization ) - "cdp_l3", "cdp_l2"
|
@ -12717,9 +12717,9 @@ M: Fenghua Yu <fenghua.yu@intel.com>
|
||||
M: Reinette Chatre <reinette.chatre@intel.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
F: arch/x86/kernel/cpu/intel_rdt*
|
||||
F: arch/x86/include/asm/intel_rdt_sched.h
|
||||
F: Documentation/x86/intel_rdt*
|
||||
F: arch/x86/kernel/cpu/resctrl/
|
||||
F: arch/x86/include/asm/resctrl_sched.h
|
||||
F: Documentation/x86/resctrl*
|
||||
|
||||
READ-COPY UPDATE (RCU)
|
||||
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
|
||||
|
@ -444,15 +444,23 @@ config RETPOLINE
|
||||
branches. Requires a compiler with -mindirect-branch=thunk-extern
|
||||
support for full protection. The kernel may run slower.
|
||||
|
||||
config INTEL_RDT
|
||||
bool "Intel Resource Director Technology support"
|
||||
depends on X86 && CPU_SUP_INTEL
|
||||
config RESCTRL
|
||||
bool "Resource Control support"
|
||||
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
|
||||
select KERNFS
|
||||
help
|
||||
Select to enable resource allocation and monitoring which are
|
||||
sub-features of Intel Resource Director Technology(RDT). More
|
||||
information about RDT can be found in the Intel x86
|
||||
Architecture Software Developer Manual.
|
||||
Enable Resource Control support.
|
||||
|
||||
Provide support for the allocation and monitoring of system resources
|
||||
usage by the CPU.
|
||||
|
||||
Intel calls this Intel Resource Director Technology
|
||||
(Intel(R) RDT). More information about RDT can be found in the
|
||||
Intel x86 Architecture Software Developer Manual.
|
||||
|
||||
AMD calls this AMD Platform Quality of Service (AMD QoS).
|
||||
More information about AMD QoS can be found in the AMD64 Technology
|
||||
Platform Quality of Service Extensions manual.
|
||||
|
||||
Say N if unsure.
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_X86_INTEL_RDT_SCHED_H
|
||||
#define _ASM_X86_INTEL_RDT_SCHED_H
|
||||
#ifndef _ASM_X86_RESCTRL_SCHED_H
|
||||
#define _ASM_X86_RESCTRL_SCHED_H
|
||||
|
||||
#ifdef CONFIG_INTEL_RDT
|
||||
#ifdef CONFIG_RESCTRL
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/jump_label.h>
|
||||
@ -10,7 +10,7 @@
|
||||
#define IA32_PQR_ASSOC 0x0c8f
|
||||
|
||||
/**
|
||||
* struct intel_pqr_state - State cache for the PQR MSR
|
||||
* struct resctrl_pqr_state - State cache for the PQR MSR
|
||||
* @cur_rmid: The cached Resource Monitoring ID
|
||||
* @cur_closid: The cached Class Of Service ID
|
||||
* @default_rmid: The user assigned Resource Monitoring ID
|
||||
@ -24,21 +24,21 @@
|
||||
* The cache also helps to avoid pointless updates if the value does
|
||||
* not change.
|
||||
*/
|
||||
struct intel_pqr_state {
|
||||
struct resctrl_pqr_state {
|
||||
u32 cur_rmid;
|
||||
u32 cur_closid;
|
||||
u32 default_rmid;
|
||||
u32 default_closid;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
|
||||
DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
|
||||
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
|
||||
DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
|
||||
|
||||
/*
|
||||
* __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
|
||||
* __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
|
||||
*
|
||||
* Following considerations are made so that this has minimal impact
|
||||
* on scheduler hot path:
|
||||
@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
|
||||
* simple as possible.
|
||||
* Must be called with preemption disabled.
|
||||
*/
|
||||
static void __intel_rdt_sched_in(void)
|
||||
static void __resctrl_sched_in(void)
|
||||
{
|
||||
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
|
||||
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
|
||||
u32 closid = state->default_closid;
|
||||
u32 rmid = state->default_rmid;
|
||||
|
||||
@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void intel_rdt_sched_in(void)
|
||||
static inline void resctrl_sched_in(void)
|
||||
{
|
||||
if (static_branch_likely(&rdt_enable_key))
|
||||
__intel_rdt_sched_in();
|
||||
__resctrl_sched_in();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void intel_rdt_sched_in(void) {}
|
||||
static inline void resctrl_sched_in(void) {}
|
||||
|
||||
#endif /* CONFIG_INTEL_RDT */
|
||||
#endif /* CONFIG_RESCTRL */
|
||||
|
||||
#endif /* _ASM_X86_INTEL_RDT_SCHED_H */
|
||||
#endif /* _ASM_X86_RESCTRL_SCHED_H */
|
@ -36,13 +36,10 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
|
||||
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
|
||||
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
|
||||
|
||||
obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o
|
||||
obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o
|
||||
CFLAGS_intel_rdt_pseudo_lock.o = -I$(src)
|
||||
|
||||
obj-$(CONFIG_X86_MCE) += mcheck/
|
||||
obj-$(CONFIG_MTRR) += mtrr/
|
||||
obj-$(CONFIG_MICROCODE) += microcode/
|
||||
obj-$(CONFIG_RESCTRL) += resctrl/
|
||||
|
||||
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
|
||||
|
||||
|
4
arch/x86/kernel/cpu/resctrl/Makefile
Normal file
4
arch/x86/kernel/cpu/resctrl/Makefile
Normal file
@ -0,0 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o
|
||||
obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o
|
||||
CFLAGS_pseudo_lock.o = -I$(src)
|
@ -22,7 +22,7 @@
|
||||
* Software Developer Manual June 2016, volume 3, section 17.17.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
#define pr_fmt(fmt) "resctrl: " fmt
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
@ -30,22 +30,19 @@
|
||||
#include <linux/cpuhotplug.h>
|
||||
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/intel_rdt_sched.h>
|
||||
#include "intel_rdt.h"
|
||||
|
||||
#define MBA_IS_LINEAR 0x4
|
||||
#define MBA_MAX_MBPS U32_MAX
|
||||
#include <asm/resctrl_sched.h>
|
||||
#include "internal.h"
|
||||
|
||||
/* Mutex to protect rdtgroup access. */
|
||||
DEFINE_MUTEX(rdtgroup_mutex);
|
||||
|
||||
/*
|
||||
* The cached intel_pqr_state is strictly per CPU and can never be
|
||||
* The cached resctrl_pqr_state is strictly per CPU and can never be
|
||||
* updated from a remote CPU. Functions which modify the state
|
||||
* are called with interrupts disabled and no preemption, which
|
||||
* is sufficient for the protection.
|
||||
*/
|
||||
DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
|
||||
DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
|
||||
|
||||
/*
|
||||
* Used to store the max resource name width and max resource data width
|
||||
@ -60,9 +57,13 @@ int max_name_width, max_data_width;
|
||||
bool rdt_alloc_capable;
|
||||
|
||||
static void
|
||||
mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
|
||||
mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
|
||||
struct rdt_resource *r);
|
||||
static void
|
||||
cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
|
||||
static void
|
||||
mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
|
||||
struct rdt_resource *r);
|
||||
|
||||
#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
|
||||
|
||||
@ -72,7 +73,7 @@ struct rdt_resource rdt_resources_all[] = {
|
||||
.rid = RDT_RESOURCE_L3,
|
||||
.name = "L3",
|
||||
.domains = domain_init(RDT_RESOURCE_L3),
|
||||
.msr_base = IA32_L3_CBM_BASE,
|
||||
.msr_base = MSR_IA32_L3_CBM_BASE,
|
||||
.msr_update = cat_wrmsr,
|
||||
.cache_level = 3,
|
||||
.cache = {
|
||||
@ -89,7 +90,7 @@ struct rdt_resource rdt_resources_all[] = {
|
||||
.rid = RDT_RESOURCE_L3DATA,
|
||||
.name = "L3DATA",
|
||||
.domains = domain_init(RDT_RESOURCE_L3DATA),
|
||||
.msr_base = IA32_L3_CBM_BASE,
|
||||
.msr_base = MSR_IA32_L3_CBM_BASE,
|
||||
.msr_update = cat_wrmsr,
|
||||
.cache_level = 3,
|
||||
.cache = {
|
||||
@ -106,7 +107,7 @@ struct rdt_resource rdt_resources_all[] = {
|
||||
.rid = RDT_RESOURCE_L3CODE,
|
||||
.name = "L3CODE",
|
||||
.domains = domain_init(RDT_RESOURCE_L3CODE),
|
||||
.msr_base = IA32_L3_CBM_BASE,
|
||||
.msr_base = MSR_IA32_L3_CBM_BASE,
|
||||
.msr_update = cat_wrmsr,
|
||||
.cache_level = 3,
|
||||
.cache = {
|
||||
@ -123,7 +124,7 @@ struct rdt_resource rdt_resources_all[] = {
|
||||
.rid = RDT_RESOURCE_L2,
|
||||
.name = "L2",
|
||||
.domains = domain_init(RDT_RESOURCE_L2),
|
||||
.msr_base = IA32_L2_CBM_BASE,
|
||||
.msr_base = MSR_IA32_L2_CBM_BASE,
|
||||
.msr_update = cat_wrmsr,
|
||||
.cache_level = 2,
|
||||
.cache = {
|
||||
@ -140,7 +141,7 @@ struct rdt_resource rdt_resources_all[] = {
|
||||
.rid = RDT_RESOURCE_L2DATA,
|
||||
.name = "L2DATA",
|
||||
.domains = domain_init(RDT_RESOURCE_L2DATA),
|
||||
.msr_base = IA32_L2_CBM_BASE,
|
||||
.msr_base = MSR_IA32_L2_CBM_BASE,
|
||||
.msr_update = cat_wrmsr,
|
||||
.cache_level = 2,
|
||||
.cache = {
|
||||
@ -157,7 +158,7 @@ struct rdt_resource rdt_resources_all[] = {
|
||||
.rid = RDT_RESOURCE_L2CODE,
|
||||
.name = "L2CODE",
|
||||
.domains = domain_init(RDT_RESOURCE_L2CODE),
|
||||
.msr_base = IA32_L2_CBM_BASE,
|
||||
.msr_base = MSR_IA32_L2_CBM_BASE,
|
||||
.msr_update = cat_wrmsr,
|
||||
.cache_level = 2,
|
||||
.cache = {
|
||||
@ -174,10 +175,7 @@ struct rdt_resource rdt_resources_all[] = {
|
||||
.rid = RDT_RESOURCE_MBA,
|
||||
.name = "MB",
|
||||
.domains = domain_init(RDT_RESOURCE_MBA),
|
||||
.msr_base = IA32_MBA_THRTL_BASE,
|
||||
.msr_update = mba_wrmsr,
|
||||
.cache_level = 3,
|
||||
.parse_ctrlval = parse_bw,
|
||||
.format_str = "%d=%*u",
|
||||
.fflags = RFTYPE_RES_MB,
|
||||
},
|
||||
@ -211,9 +209,10 @@ static inline void cache_alloc_hsw_probe(void)
|
||||
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
|
||||
u32 l, h, max_cbm = BIT_MASK(20) - 1;
|
||||
|
||||
if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
|
||||
if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
|
||||
return;
|
||||
rdmsr(IA32_L3_CBM_BASE, l, h);
|
||||
|
||||
rdmsr(MSR_IA32_L3_CBM_BASE, l, h);
|
||||
|
||||
/* If all the bits were set in MSR, return success */
|
||||
if (l != max_cbm)
|
||||
@ -259,7 +258,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool rdt_get_mem_config(struct rdt_resource *r)
|
||||
static bool __get_mem_config_intel(struct rdt_resource *r)
|
||||
{
|
||||
union cpuid_0x10_3_eax eax;
|
||||
union cpuid_0x10_x_edx edx;
|
||||
@ -285,6 +284,30 @@ static bool rdt_get_mem_config(struct rdt_resource *r)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
|
||||
{
|
||||
union cpuid_0x10_3_eax eax;
|
||||
union cpuid_0x10_x_edx edx;
|
||||
u32 ebx, ecx;
|
||||
|
||||
cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
|
||||
r->num_closid = edx.split.cos_max + 1;
|
||||
r->default_ctrl = MAX_MBA_BW_AMD;
|
||||
|
||||
/* AMD does not use delay */
|
||||
r->membw.delay_linear = false;
|
||||
|
||||
r->membw.min_bw = 0;
|
||||
r->membw.bw_gran = 1;
|
||||
/* Max value is 2048, Data width should be 4 in decimal */
|
||||
r->data_width = 4;
|
||||
|
||||
r->alloc_capable = true;
|
||||
r->alloc_enabled = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
|
||||
{
|
||||
union cpuid_0x10_1_eax eax;
|
||||
@ -344,6 +367,15 @@ static int get_cache_id(int cpu, int level)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void
|
||||
mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = m->low; i < m->high; i++)
|
||||
wrmsrl(r->msr_base + i, d->ctrl_val[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the memory b/w percentage value to delay values
|
||||
* that can be written to QOS_MSRs.
|
||||
@ -359,7 +391,8 @@ u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
|
||||
}
|
||||
|
||||
static void
|
||||
mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
|
||||
mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
|
||||
struct rdt_resource *r)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
@ -421,7 +454,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
|
||||
struct list_head *l;
|
||||
|
||||
if (id < 0)
|
||||
return ERR_PTR(id);
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
list_for_each(l, &r->domains) {
|
||||
d = list_entry(l, struct rdt_domain, list);
|
||||
@ -639,7 +672,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
|
||||
|
||||
static void clear_closid_rmid(int cpu)
|
||||
{
|
||||
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
|
||||
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
|
||||
|
||||
state->default_closid = 0;
|
||||
state->default_rmid = 0;
|
||||
@ -648,7 +681,7 @@ static void clear_closid_rmid(int cpu)
|
||||
wrmsr(IA32_PQR_ASSOC, 0, 0);
|
||||
}
|
||||
|
||||
static int intel_rdt_online_cpu(unsigned int cpu)
|
||||
static int resctrl_online_cpu(unsigned int cpu)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
|
||||
@ -674,7 +707,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
|
||||
}
|
||||
}
|
||||
|
||||
static int intel_rdt_offline_cpu(unsigned int cpu)
|
||||
static int resctrl_offline_cpu(unsigned int cpu)
|
||||
{
|
||||
struct rdtgroup *rdtgrp;
|
||||
struct rdt_resource *r;
|
||||
@ -794,6 +827,19 @@ static bool __init rdt_cpu_has(int flag)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __init bool get_mem_config(void)
|
||||
{
|
||||
if (!rdt_cpu_has(X86_FEATURE_MBA))
|
||||
return false;
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
||||
return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]);
|
||||
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static __init bool get_rdt_alloc_resources(void)
|
||||
{
|
||||
bool ret = false;
|
||||
@ -818,10 +864,9 @@ static __init bool get_rdt_alloc_resources(void)
|
||||
ret = true;
|
||||
}
|
||||
|
||||
if (rdt_cpu_has(X86_FEATURE_MBA)) {
|
||||
if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA]))
|
||||
ret = true;
|
||||
}
|
||||
if (get_mem_config())
|
||||
ret = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -840,7 +885,7 @@ static __init bool get_rdt_mon_resources(void)
|
||||
return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
|
||||
}
|
||||
|
||||
static __init void rdt_quirks(void)
|
||||
static __init void __check_quirks_intel(void)
|
||||
{
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case INTEL_FAM6_HASWELL_X:
|
||||
@ -855,30 +900,91 @@ static __init void rdt_quirks(void)
|
||||
}
|
||||
}
|
||||
|
||||
static __init void check_quirks(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
||||
__check_quirks_intel();
|
||||
}
|
||||
|
||||
static __init bool get_rdt_resources(void)
|
||||
{
|
||||
rdt_quirks();
|
||||
rdt_alloc_capable = get_rdt_alloc_resources();
|
||||
rdt_mon_capable = get_rdt_mon_resources();
|
||||
|
||||
return (rdt_mon_capable || rdt_alloc_capable);
|
||||
}
|
||||
|
||||
static __init void rdt_init_res_defs_intel(void)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
|
||||
for_each_rdt_resource(r) {
|
||||
if (r->rid == RDT_RESOURCE_L3 ||
|
||||
r->rid == RDT_RESOURCE_L3DATA ||
|
||||
r->rid == RDT_RESOURCE_L3CODE ||
|
||||
r->rid == RDT_RESOURCE_L2 ||
|
||||
r->rid == RDT_RESOURCE_L2DATA ||
|
||||
r->rid == RDT_RESOURCE_L2CODE)
|
||||
r->cbm_validate = cbm_validate_intel;
|
||||
else if (r->rid == RDT_RESOURCE_MBA) {
|
||||
r->msr_base = MSR_IA32_MBA_THRTL_BASE;
|
||||
r->msr_update = mba_wrmsr_intel;
|
||||
r->parse_ctrlval = parse_bw_intel;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static __init void rdt_init_res_defs_amd(void)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
|
||||
for_each_rdt_resource(r) {
|
||||
if (r->rid == RDT_RESOURCE_L3 ||
|
||||
r->rid == RDT_RESOURCE_L3DATA ||
|
||||
r->rid == RDT_RESOURCE_L3CODE ||
|
||||
r->rid == RDT_RESOURCE_L2 ||
|
||||
r->rid == RDT_RESOURCE_L2DATA ||
|
||||
r->rid == RDT_RESOURCE_L2CODE)
|
||||
r->cbm_validate = cbm_validate_amd;
|
||||
else if (r->rid == RDT_RESOURCE_MBA) {
|
||||
r->msr_base = MSR_IA32_MBA_BW_BASE;
|
||||
r->msr_update = mba_wrmsr_amd;
|
||||
r->parse_ctrlval = parse_bw_amd;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static __init void rdt_init_res_defs(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
||||
rdt_init_res_defs_intel();
|
||||
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
rdt_init_res_defs_amd();
|
||||
}
|
||||
|
||||
static enum cpuhp_state rdt_online;
|
||||
|
||||
static int __init intel_rdt_late_init(void)
|
||||
static int __init resctrl_late_init(void)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
int state, ret;
|
||||
|
||||
/*
|
||||
* Initialize functions(or definitions) that are different
|
||||
* between vendors here.
|
||||
*/
|
||||
rdt_init_res_defs();
|
||||
|
||||
check_quirks();
|
||||
|
||||
if (!get_rdt_resources())
|
||||
return -ENODEV;
|
||||
|
||||
rdt_init_padding();
|
||||
|
||||
state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
|
||||
"x86/rdt/cat:online:",
|
||||
intel_rdt_online_cpu, intel_rdt_offline_cpu);
|
||||
"x86/resctrl/cat:online:",
|
||||
resctrl_online_cpu, resctrl_offline_cpu);
|
||||
if (state < 0)
|
||||
return state;
|
||||
|
||||
@ -890,20 +996,20 @@ static int __init intel_rdt_late_init(void)
|
||||
rdt_online = state;
|
||||
|
||||
for_each_alloc_capable_rdt_resource(r)
|
||||
pr_info("Intel RDT %s allocation detected\n", r->name);
|
||||
pr_info("%s allocation detected\n", r->name);
|
||||
|
||||
for_each_mon_capable_rdt_resource(r)
|
||||
pr_info("Intel RDT %s monitoring detected\n", r->name);
|
||||
pr_info("%s monitoring detected\n", r->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(intel_rdt_late_init);
|
||||
late_initcall(resctrl_late_init);
|
||||
|
||||
static void __exit intel_rdt_exit(void)
|
||||
static void __exit resctrl_exit(void)
|
||||
{
|
||||
cpuhp_remove_state(rdt_online);
|
||||
rdtgroup_exit();
|
||||
}
|
||||
|
||||
__exitcall(intel_rdt_exit);
|
||||
__exitcall(resctrl_exit);
|
@ -27,7 +27,54 @@
|
||||
#include <linux/kernfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include "intel_rdt.h"
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* Check whether MBA bandwidth percentage value is correct. The value is
|
||||
* checked against the minimum and maximum bandwidth values specified by
|
||||
* the hardware. The allocated bandwidth percentage is rounded to the next
|
||||
* control step available on the hardware.
|
||||
*/
|
||||
static bool bw_validate_amd(char *buf, unsigned long *data,
|
||||
struct rdt_resource *r)
|
||||
{
|
||||
unsigned long bw;
|
||||
int ret;
|
||||
|
||||
ret = kstrtoul(buf, 10, &bw);
|
||||
if (ret) {
|
||||
rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (bw < r->membw.min_bw || bw > r->default_ctrl) {
|
||||
rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
|
||||
r->membw.min_bw, r->default_ctrl);
|
||||
return false;
|
||||
}
|
||||
|
||||
*data = roundup(bw, (unsigned long)r->membw.bw_gran);
|
||||
return true;
|
||||
}
|
||||
|
||||
int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
struct rdt_domain *d)
|
||||
{
|
||||
unsigned long bw_val;
|
||||
|
||||
if (d->have_new_ctrl) {
|
||||
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!bw_validate_amd(data->buf, &bw_val, r))
|
||||
return -EINVAL;
|
||||
|
||||
d->new_ctrl = bw_val;
|
||||
d->have_new_ctrl = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether MBA bandwidth percentage value is correct. The value is
|
||||
@ -65,13 +112,13 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
|
||||
return true;
|
||||
}
|
||||
|
||||
int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
struct rdt_domain *d)
|
||||
int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
struct rdt_domain *d)
|
||||
{
|
||||
unsigned long bw_val;
|
||||
|
||||
if (d->have_new_ctrl) {
|
||||
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
|
||||
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -89,7 +136,7 @@ int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
* are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
|
||||
* Additionally Haswell requires at least two bits set.
|
||||
*/
|
||||
static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
|
||||
bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
|
||||
{
|
||||
unsigned long first_bit, zero_bit, val;
|
||||
unsigned int cbm_len = r->cache.cbm_len;
|
||||
@ -97,12 +144,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
|
||||
|
||||
ret = kstrtoul(buf, 16, &val);
|
||||
if (ret) {
|
||||
rdt_last_cmd_printf("non-hex character in mask %s\n", buf);
|
||||
rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (val == 0 || val > r->default_ctrl) {
|
||||
rdt_last_cmd_puts("mask out of range\n");
|
||||
rdt_last_cmd_puts("Mask out of range\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -110,12 +157,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
|
||||
zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
|
||||
|
||||
if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) {
|
||||
rdt_last_cmd_printf("mask %lx has non-consecutive 1-bits\n", val);
|
||||
rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
|
||||
rdt_last_cmd_printf("Need at least %d bits in mask\n",
|
||||
rdt_last_cmd_printf("Need at least %d bits in the mask\n",
|
||||
r->cache.min_cbm_bits);
|
||||
return false;
|
||||
}
|
||||
@ -124,6 +171,30 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether a cache bit mask is valid. AMD allows non-contiguous
|
||||
* bitmasks
|
||||
*/
|
||||
bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r)
|
||||
{
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
ret = kstrtoul(buf, 16, &val);
|
||||
if (ret) {
|
||||
rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (val > r->default_ctrl) {
|
||||
rdt_last_cmd_puts("Mask out of range\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
*data = val;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read one cache bit mask (hex). Check that it is valid for the current
|
||||
* resource type.
|
||||
@ -135,7 +206,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
u32 cbm_val;
|
||||
|
||||
if (d->have_new_ctrl) {
|
||||
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
|
||||
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -145,17 +216,17 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
*/
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
|
||||
rdtgroup_pseudo_locked_in_hierarchy(d)) {
|
||||
rdt_last_cmd_printf("pseudo-locked region in hierarchy\n");
|
||||
rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!cbm_validate(data->buf, &cbm_val, r))
|
||||
if (!r->cbm_validate(data->buf, &cbm_val, r))
|
||||
return -EINVAL;
|
||||
|
||||
if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
|
||||
rdtgrp->mode == RDT_MODE_SHAREABLE) &&
|
||||
rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
|
||||
rdt_last_cmd_printf("CBM overlaps with pseudo-locked region\n");
|
||||
rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -164,14 +235,14 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
* either is exclusive.
|
||||
*/
|
||||
if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
|
||||
rdt_last_cmd_printf("overlaps with exclusive group\n");
|
||||
rdt_last_cmd_puts("Overlaps with exclusive group\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
|
||||
if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
|
||||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
|
||||
rdt_last_cmd_printf("overlaps with other group\n");
|
||||
rdt_last_cmd_puts("Overlaps with other group\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -293,7 +364,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok,
|
||||
if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
|
||||
return parse_line(tok, r, rdtgrp);
|
||||
}
|
||||
rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname);
|
||||
rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -326,7 +397,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
||||
*/
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
|
||||
ret = -EINVAL;
|
||||
rdt_last_cmd_puts("resource group is pseudo-locked\n");
|
||||
rdt_last_cmd_puts("Resource group is pseudo-locked\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -467,7 +538,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
||||
|
||||
r = &rdt_resources_all[resid];
|
||||
d = rdt_find_domain(r, domid, NULL);
|
||||
if (!d) {
|
||||
if (IS_ERR_OR_NULL(d)) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
@ -1,20 +1,24 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_X86_INTEL_RDT_H
|
||||
#define _ASM_X86_INTEL_RDT_H
|
||||
#ifndef _ASM_X86_RESCTRL_INTERNAL_H
|
||||
#define _ASM_X86_RESCTRL_INTERNAL_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernfs.h>
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
#define IA32_L3_QOS_CFG 0xc81
|
||||
#define IA32_L2_QOS_CFG 0xc82
|
||||
#define IA32_L3_CBM_BASE 0xc90
|
||||
#define IA32_L2_CBM_BASE 0xd10
|
||||
#define IA32_MBA_THRTL_BASE 0xd50
|
||||
#define MSR_IA32_L3_QOS_CFG 0xc81
|
||||
#define MSR_IA32_L2_QOS_CFG 0xc82
|
||||
#define MSR_IA32_L3_CBM_BASE 0xc90
|
||||
#define MSR_IA32_L2_CBM_BASE 0xd10
|
||||
#define MSR_IA32_MBA_THRTL_BASE 0xd50
|
||||
#define MSR_IA32_MBA_BW_BASE 0xc0000200
|
||||
|
||||
#define L3_QOS_CDP_ENABLE 0x01ULL
|
||||
#define MSR_IA32_QM_CTR 0x0c8e
|
||||
#define MSR_IA32_QM_EVTSEL 0x0c8d
|
||||
|
||||
#define L2_QOS_CDP_ENABLE 0x01ULL
|
||||
#define L3_QOS_CDP_ENABLE 0x01ULL
|
||||
|
||||
#define L2_QOS_CDP_ENABLE 0x01ULL
|
||||
|
||||
/*
|
||||
* Event IDs are used to program IA32_QM_EVTSEL before reading event
|
||||
@ -29,6 +33,9 @@
|
||||
#define MBM_CNTR_WIDTH 24
|
||||
#define MBM_OVERFLOW_INTERVAL 1000
|
||||
#define MAX_MBA_BW 100u
|
||||
#define MBA_IS_LINEAR 0x4
|
||||
#define MBA_MAX_MBPS U32_MAX
|
||||
#define MAX_MBA_BW_AMD 0x800
|
||||
|
||||
#define RMID_VAL_ERROR BIT_ULL(63)
|
||||
#define RMID_VAL_UNAVAIL BIT_ULL(62)
|
||||
@ -69,7 +76,7 @@ struct rmid_read {
|
||||
u64 val;
|
||||
};
|
||||
|
||||
extern unsigned int intel_cqm_threshold;
|
||||
extern unsigned int resctrl_cqm_threshold;
|
||||
extern bool rdt_alloc_capable;
|
||||
extern bool rdt_mon_capable;
|
||||
extern unsigned int rdt_mon_features;
|
||||
@ -391,9 +398,9 @@ struct rdt_parse_data {
|
||||
* struct rdt_resource - attributes of an RDT resource
|
||||
* @rid: The index of the resource
|
||||
* @alloc_enabled: Is allocation enabled on this machine
|
||||
* @mon_enabled: Is monitoring enabled for this feature
|
||||
* @mon_enabled: Is monitoring enabled for this feature
|
||||
* @alloc_capable: Is allocation available on this machine
|
||||
* @mon_capable: Is monitor feature available on this machine
|
||||
* @mon_capable: Is monitor feature available on this machine
|
||||
* @name: Name to use in "schemata" file
|
||||
* @num_closid: Number of CLOSIDs available
|
||||
* @cache_level: Which cache level defines scope of this resource
|
||||
@ -405,10 +412,11 @@ struct rdt_parse_data {
|
||||
* @cache: Cache allocation related data
|
||||
* @format_str: Per resource format string to show domain value
|
||||
* @parse_ctrlval: Per resource function pointer to parse control values
|
||||
* @evt_list: List of monitoring events
|
||||
* @num_rmid: Number of RMIDs available
|
||||
* @mon_scale: cqm counter * mon_scale = occupancy in bytes
|
||||
* @fflags: flags to choose base and info files
|
||||
* @cbm_validate Cache bitmask validate function
|
||||
* @evt_list: List of monitoring events
|
||||
* @num_rmid: Number of RMIDs available
|
||||
* @mon_scale: cqm counter * mon_scale = occupancy in bytes
|
||||
* @fflags: flags to choose base and info files
|
||||
*/
|
||||
struct rdt_resource {
|
||||
int rid;
|
||||
@ -431,6 +439,7 @@ struct rdt_resource {
|
||||
int (*parse_ctrlval)(struct rdt_parse_data *data,
|
||||
struct rdt_resource *r,
|
||||
struct rdt_domain *d);
|
||||
bool (*cbm_validate)(char *buf, u32 *data, struct rdt_resource *r);
|
||||
struct list_head evt_list;
|
||||
int num_rmid;
|
||||
unsigned int mon_scale;
|
||||
@ -439,8 +448,10 @@ struct rdt_resource {
|
||||
|
||||
int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
struct rdt_domain *d);
|
||||
int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
struct rdt_domain *d);
|
||||
int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
struct rdt_domain *d);
|
||||
int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
struct rdt_domain *d);
|
||||
|
||||
extern struct mutex rdtgroup_mutex;
|
||||
|
||||
@ -463,6 +474,10 @@ enum {
|
||||
RDT_NUM_RESOURCES,
|
||||
};
|
||||
|
||||
#define for_each_rdt_resource(r) \
|
||||
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
|
||||
r++)
|
||||
|
||||
#define for_each_capable_rdt_resource(r) \
|
||||
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
|
||||
r++) \
|
||||
@ -567,5 +582,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms);
|
||||
void cqm_handle_limbo(struct work_struct *work);
|
||||
bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
|
||||
void __check_limbo(struct rdt_domain *d, bool force_free);
|
||||
bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r);
|
||||
bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r);
|
||||
|
||||
#endif /* _ASM_X86_INTEL_RDT_H */
|
||||
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
|
@ -26,10 +26,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include "intel_rdt.h"
|
||||
|
||||
#define MSR_IA32_QM_CTR 0x0c8e
|
||||
#define MSR_IA32_QM_EVTSEL 0x0c8d
|
||||
#include "internal.h"
|
||||
|
||||
struct rmid_entry {
|
||||
u32 rmid;
|
||||
@ -73,7 +70,7 @@ unsigned int rdt_mon_features;
|
||||
* This is the threshold cache occupancy at which we will consider an
|
||||
* RMID available for re-allocation.
|
||||
*/
|
||||
unsigned int intel_cqm_threshold;
|
||||
unsigned int resctrl_cqm_threshold;
|
||||
|
||||
static inline struct rmid_entry *__rmid_entry(u32 rmid)
|
||||
{
|
||||
@ -107,7 +104,7 @@ static bool rmid_dirty(struct rmid_entry *entry)
|
||||
{
|
||||
u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
|
||||
|
||||
return val >= intel_cqm_threshold;
|
||||
return val >= resctrl_cqm_threshold;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -187,7 +184,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
|
||||
val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
|
||||
if (val <= intel_cqm_threshold)
|
||||
if (val <= resctrl_cqm_threshold)
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -625,6 +622,7 @@ static void l3_mon_evt_init(struct rdt_resource *r)
|
||||
|
||||
int rdt_get_mon_l3_config(struct rdt_resource *r)
|
||||
{
|
||||
unsigned int cl_size = boot_cpu_data.x86_cache_size;
|
||||
int ret;
|
||||
|
||||
r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
|
||||
@ -637,10 +635,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
|
||||
*
|
||||
* For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
|
||||
*/
|
||||
intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid;
|
||||
resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
|
||||
|
||||
/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
|
||||
intel_cqm_threshold /= r->mon_scale;
|
||||
resctrl_cqm_threshold /= r->mon_scale;
|
||||
|
||||
ret = dom_data_init(r);
|
||||
if (ret)
|
@ -24,14 +24,14 @@
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/intel_rdt_sched.h>
|
||||
#include <asm/resctrl_sched.h>
|
||||
#include <asm/perf_event.h>
|
||||
|
||||
#include "../../events/perf_event.h" /* For X86_CONFIG() */
|
||||
#include "intel_rdt.h"
|
||||
#include "internal.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "intel_rdt_pseudo_lock_event.h"
|
||||
#include "pseudo_lock_event.h"
|
||||
|
||||
/*
|
||||
* MSR_MISC_FEATURE_CONTROL register enables the modification of hardware
|
||||
@ -213,7 +213,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
|
||||
for_each_cpu(cpu, &plr->d->cpu_mask) {
|
||||
pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
|
||||
if (!pm_req) {
|
||||
rdt_last_cmd_puts("fail allocating mem for PM QoS\n");
|
||||
rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
@ -222,7 +222,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
|
||||
DEV_PM_QOS_RESUME_LATENCY,
|
||||
30);
|
||||
if (ret < 0) {
|
||||
rdt_last_cmd_printf("fail to add latency req cpu%d\n",
|
||||
rdt_last_cmd_printf("Failed to add latency req CPU%d\n",
|
||||
cpu);
|
||||
kfree(pm_req);
|
||||
ret = -1;
|
||||
@ -289,7 +289,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
|
||||
plr->cpu = cpumask_first(&plr->d->cpu_mask);
|
||||
|
||||
if (!cpu_online(plr->cpu)) {
|
||||
rdt_last_cmd_printf("cpu %u associated with cache not online\n",
|
||||
rdt_last_cmd_printf("CPU %u associated with cache not online\n",
|
||||
plr->cpu);
|
||||
ret = -ENODEV;
|
||||
goto out_region;
|
||||
@ -307,7 +307,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
|
||||
}
|
||||
|
||||
ret = -1;
|
||||
rdt_last_cmd_puts("unable to determine cache line size\n");
|
||||
rdt_last_cmd_puts("Unable to determine cache line size\n");
|
||||
out_region:
|
||||
pseudo_lock_region_clear(plr);
|
||||
return ret;
|
||||
@ -361,14 +361,14 @@ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
|
||||
* KMALLOC_MAX_SIZE.
|
||||
*/
|
||||
if (plr->size > KMALLOC_MAX_SIZE) {
|
||||
rdt_last_cmd_puts("requested region exceeds maximum size\n");
|
||||
rdt_last_cmd_puts("Requested region exceeds maximum size\n");
|
||||
ret = -E2BIG;
|
||||
goto out_region;
|
||||
}
|
||||
|
||||
plr->kmem = kzalloc(plr->size, GFP_KERNEL);
|
||||
if (!plr->kmem) {
|
||||
rdt_last_cmd_puts("unable to allocate memory\n");
|
||||
rdt_last_cmd_puts("Unable to allocate memory\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_region;
|
||||
}
|
||||
@ -665,7 +665,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
|
||||
* default closid associated with it.
|
||||
*/
|
||||
if (rdtgrp == &rdtgroup_default) {
|
||||
rdt_last_cmd_puts("cannot pseudo-lock default group\n");
|
||||
rdt_last_cmd_puts("Cannot pseudo-lock default group\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -707,17 +707,17 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
|
||||
*/
|
||||
prefetch_disable_bits = get_prefetch_disable_bits();
|
||||
if (prefetch_disable_bits == 0) {
|
||||
rdt_last_cmd_puts("pseudo-locking not supported\n");
|
||||
rdt_last_cmd_puts("Pseudo-locking not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rdtgroup_monitor_in_progress(rdtgrp)) {
|
||||
rdt_last_cmd_puts("monitoring in progress\n");
|
||||
rdt_last_cmd_puts("Monitoring in progress\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rdtgroup_tasks_assigned(rdtgrp)) {
|
||||
rdt_last_cmd_puts("tasks assigned to resource group\n");
|
||||
rdt_last_cmd_puts("Tasks assigned to resource group\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -727,13 +727,13 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
|
||||
}
|
||||
|
||||
if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
|
||||
rdt_last_cmd_puts("unable to modify resctrl permissions\n");
|
||||
rdt_last_cmd_puts("Unable to modify resctrl permissions\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ret = pseudo_lock_init(rdtgrp);
|
||||
if (ret) {
|
||||
rdt_last_cmd_puts("unable to init pseudo-lock region\n");
|
||||
rdt_last_cmd_puts("Unable to init pseudo-lock region\n");
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
@ -770,7 +770,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
|
||||
if (rdt_mon_capable) {
|
||||
ret = alloc_rmid();
|
||||
if (ret < 0) {
|
||||
rdt_last_cmd_puts("out of RMIDs\n");
|
||||
rdt_last_cmd_puts("Out of RMIDs\n");
|
||||
return ret;
|
||||
}
|
||||
rdtgrp->mon.rmid = ret;
|
||||
@ -1304,7 +1304,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
|
||||
"pseudo_lock/%u", plr->cpu);
|
||||
if (IS_ERR(thread)) {
|
||||
ret = PTR_ERR(thread);
|
||||
rdt_last_cmd_printf("locking thread returned error %d\n", ret);
|
||||
rdt_last_cmd_printf("Locking thread returned error %d\n", ret);
|
||||
goto out_cstates;
|
||||
}
|
||||
|
||||
@ -1322,13 +1322,13 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
|
||||
* the cleared, but not freed, plr struct resulting in an
|
||||
* empty pseudo-locking loop.
|
||||
*/
|
||||
rdt_last_cmd_puts("locking thread interrupted\n");
|
||||
rdt_last_cmd_puts("Locking thread interrupted\n");
|
||||
goto out_cstates;
|
||||
}
|
||||
|
||||
ret = pseudo_lock_minor_get(&new_minor);
|
||||
if (ret < 0) {
|
||||
rdt_last_cmd_puts("unable to obtain a new minor number\n");
|
||||
rdt_last_cmd_puts("Unable to obtain a new minor number\n");
|
||||
goto out_cstates;
|
||||
}
|
||||
|
||||
@ -1360,7 +1360,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
|
||||
|
||||
if (IS_ERR(dev)) {
|
||||
ret = PTR_ERR(dev);
|
||||
rdt_last_cmd_printf("failed to create character device: %d\n",
|
||||
rdt_last_cmd_printf("Failed to create character device: %d\n",
|
||||
ret);
|
||||
goto out_debugfs;
|
||||
}
|
@ -39,5 +39,5 @@ TRACE_EVENT(pseudo_lock_l3,
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE intel_rdt_pseudo_lock_event
|
||||
#define TRACE_INCLUDE_FILE pseudo_lock_event
|
||||
#include <trace/define_trace.h>
|
@ -35,8 +35,8 @@
|
||||
|
||||
#include <uapi/linux/magic.h>
|
||||
|
||||
#include <asm/intel_rdt_sched.h>
|
||||
#include "intel_rdt.h"
|
||||
#include <asm/resctrl_sched.h>
|
||||
#include "internal.h"
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
|
||||
DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
|
||||
@ -298,7 +298,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
|
||||
}
|
||||
|
||||
/*
|
||||
* This is safe against intel_rdt_sched_in() called from __switch_to()
|
||||
* This is safe against resctrl_sched_in() called from __switch_to()
|
||||
* because __switch_to() is executed with interrupts disabled. A local call
|
||||
* from update_closid_rmid() is proteced against __switch_to() because
|
||||
* preemption is disabled.
|
||||
@ -317,7 +317,7 @@ static void update_cpu_closid_rmid(void *info)
|
||||
* executing task might have its own closid selected. Just reuse
|
||||
* the context switch code.
|
||||
*/
|
||||
intel_rdt_sched_in();
|
||||
resctrl_sched_in();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -345,7 +345,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
|
||||
/* Check whether cpus belong to parent ctrl group */
|
||||
cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
|
||||
if (cpumask_weight(tmpmask)) {
|
||||
rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n");
|
||||
rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -470,14 +470,14 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
|
||||
rdt_last_cmd_clear();
|
||||
if (!rdtgrp) {
|
||||
ret = -ENOENT;
|
||||
rdt_last_cmd_puts("directory was removed\n");
|
||||
rdt_last_cmd_puts("Directory was removed\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
|
||||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
|
||||
ret = -EINVAL;
|
||||
rdt_last_cmd_puts("pseudo-locking in progress\n");
|
||||
rdt_last_cmd_puts("Pseudo-locking in progress\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@ -487,7 +487,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
|
||||
ret = cpumask_parse(buf, newmask);
|
||||
|
||||
if (ret) {
|
||||
rdt_last_cmd_puts("bad cpu list/mask\n");
|
||||
rdt_last_cmd_puts("Bad CPU list/mask\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@ -495,7 +495,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
|
||||
cpumask_andnot(tmpmask, newmask, cpu_online_mask);
|
||||
if (cpumask_weight(tmpmask)) {
|
||||
ret = -EINVAL;
|
||||
rdt_last_cmd_puts("can only assign online cpus\n");
|
||||
rdt_last_cmd_puts("Can only assign online CPUs\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@ -542,7 +542,7 @@ static void move_myself(struct callback_head *head)
|
||||
|
||||
preempt_disable();
|
||||
/* update PQR_ASSOC MSR to make resource group go into effect */
|
||||
intel_rdt_sched_in();
|
||||
resctrl_sched_in();
|
||||
preempt_enable();
|
||||
|
||||
kfree(callback);
|
||||
@ -574,7 +574,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
|
||||
*/
|
||||
atomic_dec(&rdtgrp->waitcount);
|
||||
kfree(callback);
|
||||
rdt_last_cmd_puts("task exited\n");
|
||||
rdt_last_cmd_puts("Task exited\n");
|
||||
} else {
|
||||
/*
|
||||
* For ctrl_mon groups move both closid and rmid.
|
||||
@ -692,7 +692,7 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
|
||||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
|
||||
ret = -EINVAL;
|
||||
rdt_last_cmd_puts("pseudo-locking in progress\n");
|
||||
rdt_last_cmd_puts("Pseudo-locking in progress\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@ -926,7 +926,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
|
||||
seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale);
|
||||
seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -945,7 +945,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
|
||||
if (bytes > (boot_cpu_data.x86_cache_size * 1024))
|
||||
return -EINVAL;
|
||||
|
||||
intel_cqm_threshold = bytes / r->mon_scale;
|
||||
resctrl_cqm_threshold = bytes / r->mon_scale;
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
@ -1029,7 +1029,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
|
||||
* peer RDT CDP resource. Hence the WARN.
|
||||
*/
|
||||
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
|
||||
if (WARN_ON(!_d_cdp)) {
|
||||
if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
|
||||
_r_cdp = NULL;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
@ -1158,14 +1158,14 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
|
||||
rdtgrp->closid, false)) {
|
||||
rdt_last_cmd_puts("schemata overlaps\n");
|
||||
rdt_last_cmd_puts("Schemata overlaps\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_cache) {
|
||||
rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n");
|
||||
rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1206,7 +1206,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
|
||||
goto out;
|
||||
|
||||
if (mode == RDT_MODE_PSEUDO_LOCKED) {
|
||||
rdt_last_cmd_printf("cannot change pseudo-locked group\n");
|
||||
rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -1235,7 +1235,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
|
||||
goto out;
|
||||
rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
|
||||
} else {
|
||||
rdt_last_cmd_printf("unknown/unsupported mode\n");
|
||||
rdt_last_cmd_puts("Unknown or unsupported mode\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@ -1722,14 +1722,14 @@ static void l3_qos_cfg_update(void *arg)
|
||||
{
|
||||
bool *enable = arg;
|
||||
|
||||
wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
|
||||
wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
|
||||
}
|
||||
|
||||
static void l2_qos_cfg_update(void *arg)
|
||||
{
|
||||
bool *enable = arg;
|
||||
|
||||
wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
|
||||
wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
|
||||
}
|
||||
|
||||
static inline bool is_mba_linear(void)
|
||||
@ -1878,7 +1878,10 @@ static int parse_rdtgroupfs_options(char *data)
|
||||
if (ret)
|
||||
goto out;
|
||||
} else if (!strcmp(token, "mba_MBps")) {
|
||||
ret = set_mba_sc(true);
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
||||
ret = set_mba_sc(true);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
if (ret)
|
||||
goto out;
|
||||
} else {
|
||||
@ -2540,7 +2543,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
tmp_cbm = d->new_ctrl;
|
||||
if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
|
||||
r->cache.min_cbm_bits) {
|
||||
rdt_last_cmd_printf("no space on %s:%d\n",
|
||||
rdt_last_cmd_printf("No space on %s:%d\n",
|
||||
r->name, d->id);
|
||||
return -ENOSPC;
|
||||
}
|
||||
@ -2557,7 +2560,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
continue;
|
||||
ret = update_domains(r, rdtgrp->closid);
|
||||
if (ret < 0) {
|
||||
rdt_last_cmd_puts("failed to initialize allocations\n");
|
||||
rdt_last_cmd_puts("Failed to initialize allocations\n");
|
||||
return ret;
|
||||
}
|
||||
rdtgrp->mode = RDT_MODE_SHAREABLE;
|
||||
@ -2580,7 +2583,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
|
||||
rdt_last_cmd_clear();
|
||||
if (!prdtgrp) {
|
||||
ret = -ENODEV;
|
||||
rdt_last_cmd_puts("directory was removed\n");
|
||||
rdt_last_cmd_puts("Directory was removed\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@ -2588,7 +2591,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
|
||||
(prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
|
||||
prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
|
||||
ret = -EINVAL;
|
||||
rdt_last_cmd_puts("pseudo-locking in progress\n");
|
||||
rdt_last_cmd_puts("Pseudo-locking in progress\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@ -2596,7 +2599,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
|
||||
rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
|
||||
if (!rdtgrp) {
|
||||
ret = -ENOSPC;
|
||||
rdt_last_cmd_puts("kernel out of memory\n");
|
||||
rdt_last_cmd_puts("Kernel out of memory\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
*r = rdtgrp;
|
||||
@ -2637,7 +2640,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
|
||||
if (rdt_mon_capable) {
|
||||
ret = alloc_rmid();
|
||||
if (ret < 0) {
|
||||
rdt_last_cmd_puts("out of RMIDs\n");
|
||||
rdt_last_cmd_puts("Out of RMIDs\n");
|
||||
goto out_destroy;
|
||||
}
|
||||
rdtgrp->mon.rmid = ret;
|
||||
@ -2725,7 +2728,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
|
||||
kn = rdtgrp->kn;
|
||||
ret = closid_alloc();
|
||||
if (ret < 0) {
|
||||
rdt_last_cmd_puts("out of CLOSIDs\n");
|
||||
rdt_last_cmd_puts("Out of CLOSIDs\n");
|
||||
goto out_common_fail;
|
||||
}
|
||||
closid = ret;
|
@ -17,7 +17,11 @@ struct cpuid_bit {
|
||||
u32 sub_leaf;
|
||||
};
|
||||
|
||||
/* Please keep the leaf sorted by cpuid_bit.level for faster search. */
|
||||
/*
|
||||
* Please keep the leaf sorted by cpuid_bit.level for faster search.
|
||||
* X86_FEATURE_MBA is supported by both Intel and AMD. But the CPUID
|
||||
* levels are different and there is a separate entry for each.
|
||||
*/
|
||||
static const struct cpuid_bit cpuid_bits[] = {
|
||||
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
|
||||
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
||||
@ -29,6 +33,7 @@ static const struct cpuid_bit cpuid_bits[] = {
|
||||
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
|
||||
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
|
||||
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
|
||||
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
|
||||
{ X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
|
||||
{ X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
|
||||
{ 0, 0, 0, 0, 0 }
|
||||
|
@ -56,7 +56,7 @@
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/vm86.h>
|
||||
#include <asm/intel_rdt_sched.h>
|
||||
#include <asm/resctrl_sched.h>
|
||||
#include <asm/proto.h>
|
||||
|
||||
#include "process.h"
|
||||
@ -298,7 +298,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
this_cpu_write(current_task, next_p);
|
||||
|
||||
/* Load the Intel cache allocation PQR MSR. */
|
||||
intel_rdt_sched_in();
|
||||
resctrl_sched_in();
|
||||
|
||||
return prev_p;
|
||||
}
|
||||
|
@ -52,7 +52,7 @@
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/intel_rdt_sched.h>
|
||||
#include <asm/resctrl_sched.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/fsgsbase.h>
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
@ -622,7 +622,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
}
|
||||
|
||||
/* Load the Intel cache allocation PQR MSR. */
|
||||
intel_rdt_sched_in();
|
||||
resctrl_sched_in();
|
||||
|
||||
return prev_p;
|
||||
}
|
||||
|
@ -993,7 +993,7 @@ struct task_struct {
|
||||
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
|
||||
struct list_head cg_list;
|
||||
#endif
|
||||
#ifdef CONFIG_INTEL_RDT
|
||||
#ifdef CONFIG_RESCTRL
|
||||
u32 closid;
|
||||
u32 rmid;
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user