2019-05-27 09:55:05 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-07-07 05:18:10 +04:00
/*
*
* ia64 kernel NUMA specific stuff
*
* Copyright ( C ) 2002 Erich Focht < efocht @ ess . nec . de >
* Copyright ( C ) 2004 Silicon Graphics , Inc .
* Jesse Barnes < jbarnes @ sgi . com >
*/
# include <linux/topology.h>
# include <linux/module.h>
# include <asm/processor.h>
# include <asm/smp.h>
2006-03-03 01:02:28 +03:00
u16 cpu_to_node_map [ NR_CPUS ] __cacheline_aligned ;
2005-07-07 05:18:10 +04:00
EXPORT_SYMBOL ( cpu_to_node_map ) ;
cpumask_t node_to_cpu_mask [ MAX_NUMNODES ] __cacheline_aligned ;
2006-10-02 13:17:41 +04:00
EXPORT_SYMBOL ( node_to_cpu_mask ) ;
2005-07-07 05:18:10 +04:00
2013-06-17 23:51:20 +04:00
void map_cpu_to_node ( int cpu , int nid )
2006-09-26 03:25:31 +04:00
{
int oldnid ;
if ( nid < 0 ) { /* just initialize by zero */
cpu_to_node_map [ cpu ] = 0 ;
return ;
}
/* sanity check first */
oldnid = cpu_to_node_map [ cpu ] ;
2015-03-05 03:19:16 +03:00
if ( cpumask_test_cpu ( cpu , & node_to_cpu_mask [ oldnid ] ) ) {
2006-09-26 03:25:31 +04:00
return ; /* nothing to do */
}
/* we don't have cpu-driven node hot add yet...
In usual case , node is created from SRAT at boot time . */
if ( ! node_online ( nid ) )
nid = first_online_node ;
cpu_to_node_map [ cpu ] = nid ;
2015-03-05 03:19:16 +03:00
cpumask_set_cpu ( cpu , & node_to_cpu_mask [ nid ] ) ;
2006-09-26 03:25:31 +04:00
return ;
}
2013-06-17 23:51:20 +04:00
void unmap_cpu_from_node ( int cpu , int nid )
2006-09-26 03:25:31 +04:00
{
2015-03-05 03:19:16 +03:00
WARN_ON ( ! cpumask_test_cpu ( cpu , & node_to_cpu_mask [ nid ] ) ) ;
2006-09-26 03:25:31 +04:00
WARN_ON ( cpu_to_node_map [ cpu ] ! = nid ) ;
cpu_to_node_map [ cpu ] = 0 ;
2015-03-05 03:19:16 +03:00
cpumask_clear_cpu ( cpu , & node_to_cpu_mask [ nid ] ) ;
2006-09-26 03:25:31 +04:00
}
2005-07-07 05:18:10 +04:00
/**
* build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
*
* Build cpu to node mapping and initialize the per node cpu masks using
* info from the node_cpuid array handed to us by ACPI .
*/
void __init build_cpu_to_node_map ( void )
{
int cpu , i , node ;
for ( node = 0 ; node < MAX_NUMNODES ; node + + )
2015-03-05 03:19:16 +03:00
cpumask_clear ( & node_to_cpu_mask [ node ] ) ;
2005-07-07 05:18:10 +04:00
2008-04-04 00:17:13 +04:00
for_each_possible_early_cpu ( cpu ) {
2019-03-06 02:42:58 +03:00
node = NUMA_NO_NODE ;
2005-07-07 05:18:10 +04:00
for ( i = 0 ; i < NR_CPUS ; + + i )
if ( cpu_physical_id ( cpu ) = = node_cpuid [ i ] . phys_id ) {
node = node_cpuid [ i ] . nid ;
break ;
}
2006-09-26 03:25:31 +04:00
map_cpu_to_node ( cpu , node ) ;
2005-07-07 05:18:10 +04:00
}
}