x86/mtrr: Replace use_intel() with a local flag
In MTRR code use_intel() is only used in one source file, and the relevant use_intel_if member of struct mtrr_ops is set only in generic_mtrr_ops. Replace use_intel() with a single flag in cacheinfo.c which can be set when assigning generic_mtrr_ops to mtrr_if. This allows to drop use_intel_if from mtrr_ops, while preparing to decouple PAT from MTRR. As another preparation for the PAT/MTRR decoupling use a bit for MTRR control and one for PAT control. For now set both bits together, this can be changed later. As the new flag will be set only if mtrr_enabled is set, the test for mtrr_enabled can be dropped at some places. [ bp: Massage commit message. ] Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lore.kernel.org/r/20221102074713.21493-4-jgross@suse.com Signed-off-by: Borislav Petkov <bp@suse.de>
This commit is contained in:
parent
7420ae3bb9
commit
45fa71f19a
@ -2,6 +2,11 @@
|
||||
#ifndef _ASM_X86_CACHEINFO_H
|
||||
#define _ASM_X86_CACHEINFO_H
|
||||
|
||||
/* Kernel controls MTRR and/or PAT MSRs. */
|
||||
extern unsigned int memory_caching_control;
|
||||
#define CACHE_MTRR 0x01
|
||||
#define CACHE_PAT 0x02
|
||||
|
||||
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
|
||||
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
|
||||
|
||||
|
@ -35,6 +35,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
|
||||
/* Shared L2 cache maps */
|
||||
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
|
||||
|
||||
/* Kernel controls MTRR and/or PAT MSRs. */
|
||||
unsigned int memory_caching_control __ro_after_init;
|
||||
|
||||
struct _cache_table {
|
||||
unsigned char descriptor;
|
||||
char cache_type;
|
||||
|
@ -917,7 +917,6 @@ int positive_have_wrcomb(void)
|
||||
* Generic structure...
|
||||
*/
|
||||
const struct mtrr_ops generic_mtrr_ops = {
|
||||
.use_intel_if = 1,
|
||||
.set_all = generic_set_all,
|
||||
.get = generic_get_mtrr,
|
||||
.get_free_region = generic_get_free_region,
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#include <asm/cacheinfo.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/mtrr.h>
|
||||
@ -119,11 +120,11 @@ static int have_wrcomb(void)
|
||||
}
|
||||
|
||||
/* This function returns the number of variable MTRRs */
|
||||
static void __init set_num_var_ranges(void)
|
||||
static void __init set_num_var_ranges(bool use_generic)
|
||||
{
|
||||
unsigned long config = 0, dummy;
|
||||
|
||||
if (use_intel())
|
||||
if (use_generic)
|
||||
rdmsr(MSR_MTRRcap, config, dummy);
|
||||
else if (is_cpu(AMD) || is_cpu(HYGON))
|
||||
config = 2;
|
||||
@ -756,14 +757,16 @@ void __init mtrr_bp_init(void)
|
||||
|
||||
if (mtrr_if) {
|
||||
__mtrr_enabled = true;
|
||||
set_num_var_ranges();
|
||||
set_num_var_ranges(mtrr_if == &generic_mtrr_ops);
|
||||
init_table();
|
||||
if (use_intel()) {
|
||||
if (mtrr_if == &generic_mtrr_ops) {
|
||||
/* BIOS may override */
|
||||
__mtrr_enabled = get_mtrr_state();
|
||||
|
||||
if (mtrr_enabled())
|
||||
if (mtrr_enabled()) {
|
||||
mtrr_bp_pat_init();
|
||||
memory_caching_control |= CACHE_MTRR | CACHE_PAT;
|
||||
}
|
||||
|
||||
if (mtrr_cleanup(phys_addr)) {
|
||||
changed_by_mtrr_cleanup = 1;
|
||||
@ -786,10 +789,7 @@ void __init mtrr_bp_init(void)
|
||||
|
||||
void mtrr_ap_init(void)
|
||||
{
|
||||
if (!mtrr_enabled())
|
||||
return;
|
||||
|
||||
if (!use_intel() || mtrr_aps_delayed_init)
|
||||
if (!memory_caching_control || mtrr_aps_delayed_init)
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -825,9 +825,7 @@ void mtrr_save_state(void)
|
||||
|
||||
void set_mtrr_aps_delayed_init(void)
|
||||
{
|
||||
if (!mtrr_enabled())
|
||||
return;
|
||||
if (!use_intel())
|
||||
if (!memory_caching_control)
|
||||
return;
|
||||
|
||||
mtrr_aps_delayed_init = true;
|
||||
@ -838,7 +836,7 @@ void set_mtrr_aps_delayed_init(void)
|
||||
*/
|
||||
void mtrr_aps_init(void)
|
||||
{
|
||||
if (!use_intel() || !mtrr_enabled())
|
||||
if (!memory_caching_control)
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -855,7 +853,7 @@ void mtrr_aps_init(void)
|
||||
|
||||
void mtrr_bp_restore(void)
|
||||
{
|
||||
if (!use_intel() || !mtrr_enabled())
|
||||
if (!memory_caching_control)
|
||||
return;
|
||||
|
||||
mtrr_if->set_all();
|
||||
@ -866,7 +864,7 @@ static int __init mtrr_init_finialize(void)
|
||||
if (!mtrr_enabled())
|
||||
return 0;
|
||||
|
||||
if (use_intel()) {
|
||||
if (memory_caching_control & CACHE_MTRR) {
|
||||
if (!changed_by_mtrr_cleanup)
|
||||
mtrr_state_warn();
|
||||
return 0;
|
||||
|
@ -14,7 +14,6 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
|
||||
|
||||
struct mtrr_ops {
|
||||
u32 vendor;
|
||||
u32 use_intel_if;
|
||||
void (*set)(unsigned int reg, unsigned long base,
|
||||
unsigned long size, mtrr_type type);
|
||||
void (*set_all)(void);
|
||||
@ -61,7 +60,6 @@ extern u64 size_or_mask, size_and_mask;
|
||||
extern const struct mtrr_ops *mtrr_if;
|
||||
|
||||
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
|
||||
#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
|
||||
|
||||
extern unsigned int num_var_ranges;
|
||||
extern u64 mtrr_tom2;
|
||||
|
Loading…
Reference in New Issue
Block a user