x86/mm/numa: Use core domain size on AMD
cpuinfo::topo::x86_coreid_bits is about to be phased out. Use the core domain size from the topology information. Add a comment why the early MPTABLE parsing is required and decrapify the loop which sets the APIC ID to node map. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Juergen Gross <jgross@suse.com> Tested-by: Sohil Mehta <sohil.mehta@intel.com> Tested-by: Michael Kelley <mhklinux@outlook.com> Tested-by: Zhang Rui <rui.zhang@intel.com> Tested-by: Wang Wendy <wendy.wang@intel.com> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Link: https://lore.kernel.org/r/20240212153625.270320718@linutronix.de
This commit is contained in:
parent
3279081dd0
commit
d805a69160
@ -121,6 +121,11 @@ struct x86_topology_system {
|
|||||||
|
|
||||||
extern struct x86_topology_system x86_topo_system;
|
extern struct x86_topology_system x86_topo_system;
|
||||||
|
|
||||||
|
static inline unsigned int topology_get_domain_size(enum x86_topology_domains dom)
|
||||||
|
{
|
||||||
|
return x86_topo_system.dom_size[dom];
|
||||||
|
}
|
||||||
|
|
||||||
extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||||
extern const struct cpumask *cpu_clustergroup_mask(int cpu);
|
extern const struct cpumask *cpu_clustergroup_mask(int cpu);
|
||||||
|
|
||||||
|
@ -54,13 +54,11 @@ static __init int find_northbridge(void)
|
|||||||
|
|
||||||
int __init amd_numa_init(void)
|
int __init amd_numa_init(void)
|
||||||
{
|
{
|
||||||
u64 start = PFN_PHYS(0);
|
unsigned int numnodes, cores, apicid;
|
||||||
|
u64 prevbase, start = PFN_PHYS(0);
|
||||||
u64 end = PFN_PHYS(max_pfn);
|
u64 end = PFN_PHYS(max_pfn);
|
||||||
unsigned numnodes;
|
|
||||||
u64 prevbase;
|
|
||||||
int i, j, nb;
|
|
||||||
u32 nodeid, reg;
|
u32 nodeid, reg;
|
||||||
unsigned int bits, cores, apicid_base;
|
int i, j, nb;
|
||||||
|
|
||||||
if (!early_pci_allowed())
|
if (!early_pci_allowed())
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -158,26 +156,25 @@ int __init amd_numa_init(void)
|
|||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We seem to have valid NUMA configuration. Map apicids to nodes
|
* We seem to have valid NUMA configuration. Map apicids to nodes
|
||||||
* using the coreid bits from early_identify_cpu.
|
* using the size of the core domain in the APIC space.
|
||||||
*/
|
*/
|
||||||
bits = boot_cpu_data.x86_coreid_bits;
|
cores = topology_get_domain_size(TOPO_CORE_DOMAIN);
|
||||||
cores = 1 << bits;
|
|
||||||
apicid_base = 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* get boot-time SMP configuration:
|
* Scan MPTABLE to map the local APIC and ensure that the boot CPU
|
||||||
|
* APIC ID is valid. This is required because on pre ACPI/SRAT
|
||||||
|
* systems IO-APICs are mapped before the boot CPU.
|
||||||
*/
|
*/
|
||||||
early_get_smp_config();
|
early_get_smp_config();
|
||||||
|
|
||||||
if (boot_cpu_physical_apicid > 0) {
|
apicid = boot_cpu_physical_apicid;
|
||||||
pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid);
|
if (apicid > 0)
|
||||||
apicid_base = boot_cpu_physical_apicid;
|
pr_info("BSP APIC ID: %02x\n", apicid);
|
||||||
|
|
||||||
|
for_each_node_mask(i, numa_nodes_parsed) {
|
||||||
|
for (j = 0; j < cores; j++, apicid++)
|
||||||
|
set_apicid_to_node(apicid, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_node_mask(i, numa_nodes_parsed)
|
|
||||||
for (j = apicid_base; j < cores + apicid_base; j++)
|
|
||||||
set_apicid_to_node((i << bits) + j, i);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user