x86/mtrr: Remove physical address size calculation
The physical address width calculation in mtrr_bp_init() can easily be replaced with using the already available value x86_phys_bits from struct cpuinfo_x86. The same information source can be used in mtrr/cleanup.c, removing the need to pass that value on to mtrr_cleanup(). In print_mtrr_state() use x86_phys_bits instead of recalculating it from size_or_mask. Move setting of size_or_mask and size_and_mask into a dedicated new function in mtrr/generic.c, enabling to make those 2 variables static, as they are used in generic.c only now. Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Tested-by: Michael Kelley <mikelley@microsoft.com> Link: https://lore.kernel.org/r/20230502120931.20719-2-jgross@suse.com Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
This commit is contained in:
parent
ac9a78681b
commit
f6b980646b
@ -173,7 +173,7 @@ early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
|
|||||||
|
|
||||||
static void __init
|
static void __init
|
||||||
set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
|
set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
|
||||||
unsigned char type, unsigned int address_bits)
|
unsigned char type)
|
||||||
{
|
{
|
||||||
u32 base_lo, base_hi, mask_lo, mask_hi;
|
u32 base_lo, base_hi, mask_lo, mask_hi;
|
||||||
u64 base, mask;
|
u64 base, mask;
|
||||||
@ -183,7 +183,7 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
mask = (1ULL << address_bits) - 1;
|
mask = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
|
||||||
mask &= ~((((u64)sizek) << 10) - 1);
|
mask &= ~((((u64)sizek) << 10) - 1);
|
||||||
|
|
||||||
base = ((u64)basek) << 10;
|
base = ((u64)basek) << 10;
|
||||||
@ -209,7 +209,7 @@ save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
|
|||||||
range_state[reg].type = type;
|
range_state[reg].type = type;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init set_var_mtrr_all(unsigned int address_bits)
|
static void __init set_var_mtrr_all(void)
|
||||||
{
|
{
|
||||||
unsigned long basek, sizek;
|
unsigned long basek, sizek;
|
||||||
unsigned char type;
|
unsigned char type;
|
||||||
@ -220,7 +220,7 @@ static void __init set_var_mtrr_all(unsigned int address_bits)
|
|||||||
sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10);
|
sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10);
|
||||||
type = range_state[reg].type;
|
type = range_state[reg].type;
|
||||||
|
|
||||||
set_var_mtrr(reg, basek, sizek, type, address_bits);
|
set_var_mtrr(reg, basek, sizek, type);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -680,7 +680,7 @@ static int __init mtrr_search_optimal_index(void)
|
|||||||
return index_good;
|
return index_good;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __init mtrr_cleanup(unsigned address_bits)
|
int __init mtrr_cleanup(void)
|
||||||
{
|
{
|
||||||
unsigned long x_remove_base, x_remove_size;
|
unsigned long x_remove_base, x_remove_size;
|
||||||
unsigned long base, size, def, dummy;
|
unsigned long base, size, def, dummy;
|
||||||
@ -742,7 +742,7 @@ int __init mtrr_cleanup(unsigned address_bits)
|
|||||||
mtrr_print_out_one_result(i);
|
mtrr_print_out_one_result(i);
|
||||||
|
|
||||||
if (!result[i].bad) {
|
if (!result[i].bad) {
|
||||||
set_var_mtrr_all(address_bits);
|
set_var_mtrr_all();
|
||||||
pr_debug("New variable MTRRs\n");
|
pr_debug("New variable MTRRs\n");
|
||||||
print_out_mtrr_range_state();
|
print_out_mtrr_range_state();
|
||||||
return 1;
|
return 1;
|
||||||
@ -786,7 +786,7 @@ int __init mtrr_cleanup(unsigned address_bits)
|
|||||||
gran_size = result[i].gran_sizek;
|
gran_size = result[i].gran_sizek;
|
||||||
gran_size <<= 10;
|
gran_size <<= 10;
|
||||||
x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
|
x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
|
||||||
set_var_mtrr_all(address_bits);
|
set_var_mtrr_all();
|
||||||
pr_debug("New variable MTRRs\n");
|
pr_debug("New variable MTRRs\n");
|
||||||
print_out_mtrr_range_state();
|
print_out_mtrr_range_state();
|
||||||
return 1;
|
return 1;
|
||||||
@ -802,7 +802,7 @@ int __init mtrr_cleanup(unsigned address_bits)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
int __init mtrr_cleanup(unsigned address_bits)
|
int __init mtrr_cleanup(void)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,16 @@ u64 mtrr_tom2;
|
|||||||
struct mtrr_state_type mtrr_state;
|
struct mtrr_state_type mtrr_state;
|
||||||
EXPORT_SYMBOL_GPL(mtrr_state);
|
EXPORT_SYMBOL_GPL(mtrr_state);
|
||||||
|
|
||||||
|
static u64 size_or_mask, size_and_mask;
|
||||||
|
|
||||||
|
void __init mtrr_set_mask(void)
|
||||||
|
{
|
||||||
|
unsigned int phys_addr = boot_cpu_data.x86_phys_bits;
|
||||||
|
|
||||||
|
size_or_mask = ~GENMASK_ULL(phys_addr - PAGE_SHIFT - 1, 0);
|
||||||
|
size_and_mask = ~size_or_mask & GENMASK_ULL(39, 20);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* BIOS is expected to clear MtrrFixDramModEn bit, see for example
|
* BIOS is expected to clear MtrrFixDramModEn bit, see for example
|
||||||
* "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
|
* "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
|
||||||
@ -422,7 +432,7 @@ static void __init print_mtrr_state(void)
|
|||||||
}
|
}
|
||||||
pr_debug("MTRR variable ranges %sabled:\n",
|
pr_debug("MTRR variable ranges %sabled:\n",
|
||||||
mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
|
mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
|
||||||
high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
|
high_width = (boot_cpu_data.x86_phys_bits - (32 - PAGE_SHIFT) + 3) / 4;
|
||||||
|
|
||||||
for (i = 0; i < num_var_ranges; ++i) {
|
for (i = 0; i < num_var_ranges; ++i) {
|
||||||
if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
|
if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
|
||||||
|
@ -67,8 +67,6 @@ static bool mtrr_enabled(void)
|
|||||||
unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
|
unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
|
||||||
static DEFINE_MUTEX(mtrr_mutex);
|
static DEFINE_MUTEX(mtrr_mutex);
|
||||||
|
|
||||||
u64 size_or_mask, size_and_mask;
|
|
||||||
|
|
||||||
const struct mtrr_ops *mtrr_if;
|
const struct mtrr_ops *mtrr_if;
|
||||||
|
|
||||||
/* Returns non-zero if we have the write-combining memory type */
|
/* Returns non-zero if we have the write-combining memory type */
|
||||||
@ -619,77 +617,34 @@ static struct syscore_ops mtrr_syscore_ops = {
|
|||||||
|
|
||||||
int __initdata changed_by_mtrr_cleanup;
|
int __initdata changed_by_mtrr_cleanup;
|
||||||
|
|
||||||
#define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
|
|
||||||
/**
|
/**
|
||||||
* mtrr_bp_init - initialize mtrrs on the boot CPU
|
* mtrr_bp_init - initialize MTRRs on the boot CPU
|
||||||
*
|
*
|
||||||
* This needs to be called early; before any of the other CPUs are
|
* This needs to be called early; before any of the other CPUs are
|
||||||
* initialized (i.e. before smp_init()).
|
* initialized (i.e. before smp_init()).
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
void __init mtrr_bp_init(void)
|
void __init mtrr_bp_init(void)
|
||||||
{
|
{
|
||||||
const char *why = "(not available)";
|
const char *why = "(not available)";
|
||||||
u32 phys_addr;
|
|
||||||
|
|
||||||
phys_addr = 32;
|
mtrr_set_mask();
|
||||||
|
|
||||||
if (boot_cpu_has(X86_FEATURE_MTRR)) {
|
if (cpu_feature_enabled(X86_FEATURE_MTRR)) {
|
||||||
mtrr_if = &generic_mtrr_ops;
|
mtrr_if = &generic_mtrr_ops;
|
||||||
size_or_mask = SIZE_OR_MASK_BITS(36);
|
|
||||||
size_and_mask = 0x00f00000;
|
|
||||||
phys_addr = 36;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is an AMD specific MSR, but we assume(hope?) that
|
|
||||||
* Intel will implement it too when they extend the address
|
|
||||||
* bus of the Xeon.
|
|
||||||
*/
|
|
||||||
if (cpuid_eax(0x80000000) >= 0x80000008) {
|
|
||||||
phys_addr = cpuid_eax(0x80000008) & 0xff;
|
|
||||||
/* CPUID workaround for Intel 0F33/0F34 CPU */
|
|
||||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
|
||||||
boot_cpu_data.x86 == 0xF &&
|
|
||||||
boot_cpu_data.x86_model == 0x3 &&
|
|
||||||
(boot_cpu_data.x86_stepping == 0x3 ||
|
|
||||||
boot_cpu_data.x86_stepping == 0x4))
|
|
||||||
phys_addr = 36;
|
|
||||||
|
|
||||||
size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
|
|
||||||
size_and_mask = ~size_or_mask & 0xfffff00000ULL;
|
|
||||||
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
|
|
||||||
boot_cpu_data.x86 == 6) {
|
|
||||||
/*
|
|
||||||
* VIA C* family have Intel style MTRRs,
|
|
||||||
* but don't support PAE
|
|
||||||
*/
|
|
||||||
size_or_mask = SIZE_OR_MASK_BITS(32);
|
|
||||||
size_and_mask = 0;
|
|
||||||
phys_addr = 32;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
switch (boot_cpu_data.x86_vendor) {
|
switch (boot_cpu_data.x86_vendor) {
|
||||||
case X86_VENDOR_AMD:
|
case X86_VENDOR_AMD:
|
||||||
if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
|
/* Pre-Athlon (K6) AMD CPU MTRRs */
|
||||||
/* Pre-Athlon (K6) AMD CPU MTRRs */
|
if (cpu_feature_enabled(X86_FEATURE_K6_MTRR))
|
||||||
mtrr_if = &amd_mtrr_ops;
|
mtrr_if = &amd_mtrr_ops;
|
||||||
size_or_mask = SIZE_OR_MASK_BITS(32);
|
|
||||||
size_and_mask = 0;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case X86_VENDOR_CENTAUR:
|
case X86_VENDOR_CENTAUR:
|
||||||
if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
|
if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR))
|
||||||
mtrr_if = ¢aur_mtrr_ops;
|
mtrr_if = ¢aur_mtrr_ops;
|
||||||
size_or_mask = SIZE_OR_MASK_BITS(32);
|
|
||||||
size_and_mask = 0;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case X86_VENDOR_CYRIX:
|
case X86_VENDOR_CYRIX:
|
||||||
if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
|
if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR))
|
||||||
mtrr_if = &cyrix_mtrr_ops;
|
mtrr_if = &cyrix_mtrr_ops;
|
||||||
size_or_mask = SIZE_OR_MASK_BITS(32);
|
|
||||||
size_and_mask = 0;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
@ -703,7 +658,7 @@ void __init mtrr_bp_init(void)
|
|||||||
/* BIOS may override */
|
/* BIOS may override */
|
||||||
if (get_mtrr_state()) {
|
if (get_mtrr_state()) {
|
||||||
memory_caching_control |= CACHE_MTRR;
|
memory_caching_control |= CACHE_MTRR;
|
||||||
changed_by_mtrr_cleanup = mtrr_cleanup(phys_addr);
|
changed_by_mtrr_cleanup = mtrr_cleanup();
|
||||||
} else {
|
} else {
|
||||||
mtrr_if = NULL;
|
mtrr_if = NULL;
|
||||||
why = "by BIOS";
|
why = "by BIOS";
|
||||||
|
@ -51,7 +51,6 @@ void fill_mtrr_var_range(unsigned int index,
|
|||||||
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
|
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
|
||||||
bool get_mtrr_state(void);
|
bool get_mtrr_state(void);
|
||||||
|
|
||||||
extern u64 size_or_mask, size_and_mask;
|
|
||||||
extern const struct mtrr_ops *mtrr_if;
|
extern const struct mtrr_ops *mtrr_if;
|
||||||
|
|
||||||
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
|
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
|
||||||
@ -60,6 +59,7 @@ extern unsigned int num_var_ranges;
|
|||||||
extern u64 mtrr_tom2;
|
extern u64 mtrr_tom2;
|
||||||
extern struct mtrr_state_type mtrr_state;
|
extern struct mtrr_state_type mtrr_state;
|
||||||
|
|
||||||
|
void mtrr_set_mask(void);
|
||||||
void mtrr_state_warn(void);
|
void mtrr_state_warn(void);
|
||||||
const char *mtrr_attrib_to_str(int x);
|
const char *mtrr_attrib_to_str(int x);
|
||||||
void mtrr_wrmsr(unsigned, unsigned, unsigned);
|
void mtrr_wrmsr(unsigned, unsigned, unsigned);
|
||||||
@ -70,4 +70,4 @@ extern const struct mtrr_ops cyrix_mtrr_ops;
|
|||||||
extern const struct mtrr_ops centaur_mtrr_ops;
|
extern const struct mtrr_ops centaur_mtrr_ops;
|
||||||
|
|
||||||
extern int changed_by_mtrr_cleanup;
|
extern int changed_by_mtrr_cleanup;
|
||||||
extern int mtrr_cleanup(unsigned address_bits);
|
extern int mtrr_cleanup(void);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user