2005-04-17 02:20:36 +04:00
/*
2017-09-21 22:55:01 +03:00
* arch / parisc / kernel / topology . c
2005-04-17 02:20:36 +04:00
*
2017-09-21 22:55:01 +03:00
* Copyright ( C ) 2017 Helge Deller < deller @ gmx . de >
2005-04-17 02:20:36 +04:00
*
2017-09-21 22:55:01 +03:00
* based on arch / arm / kernel / topology . c
2005-04-17 02:20:36 +04:00
*
2017-09-21 22:55:01 +03:00
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
2005-04-17 02:20:36 +04:00
*/
2017-09-21 22:55:01 +03:00
# include <linux/percpu.h>
# include <linux/sched.h>
# include <linux/sched/topology.h>
2022-03-24 21:46:50 +03:00
# include <linux/cpu.h>
2005-04-17 02:20:36 +04:00
2017-09-21 22:55:01 +03:00
# include <asm/topology.h>
2022-03-25 14:50:13 +03:00
# include <asm/sections.h>
2005-04-17 02:20:36 +04:00
2022-03-24 21:46:50 +03:00
static DEFINE_PER_CPU ( struct cpu , cpu_devices ) ;
2017-09-21 22:55:01 +03:00
2022-03-25 14:50:13 +03:00
static int dualcores_found ;
2017-09-21 22:55:01 +03:00
/*
* store_cpu_topology is called at boot when only one cpu is running
* and with the mutex cpu_hotplug . lock locked , when several cpus have booted ,
* which prevents simultaneous write access to cpu_topology array
*/
2022-03-25 14:50:13 +03:00
void store_cpu_topology ( unsigned int cpuid )
2017-09-21 22:55:01 +03:00
{
2022-03-24 21:46:50 +03:00
struct cpu_topology * cpuid_topo = & cpu_topology [ cpuid ] ;
2017-09-21 22:55:01 +03:00
struct cpuinfo_parisc * p ;
int max_socket = - 1 ;
unsigned long cpu ;
/* If the cpu topology has been already set, just return */
if ( cpuid_topo - > core_id ! = - 1 )
return ;
2005-04-17 02:20:36 +04:00
2022-03-24 21:46:50 +03:00
# ifdef CONFIG_HOTPLUG_CPU
per_cpu ( cpu_devices , cpuid ) . hotpluggable = 1 ;
# endif
if ( register_cpu ( & per_cpu ( cpu_devices , cpuid ) , cpuid ) )
pr_warn ( " Failed to register CPU%d device " , cpuid ) ;
2017-09-21 22:55:01 +03:00
/* create cpu topology mapping */
cpuid_topo - > thread_id = - 1 ;
cpuid_topo - > core_id = 0 ;
p = & per_cpu ( cpu_data , cpuid ) ;
for_each_online_cpu ( cpu ) {
const struct cpuinfo_parisc * cpuinfo = & per_cpu ( cpu_data , cpu ) ;
if ( cpu = = cpuid ) /* ignore current cpu */
continue ;
if ( cpuinfo - > cpu_loc = = p - > cpu_loc ) {
cpuid_topo - > core_id = cpu_topology [ cpu ] . core_id ;
if ( p - > cpu_loc ) {
cpuid_topo - > core_id + + ;
2022-03-24 21:46:50 +03:00
cpuid_topo - > package_id = cpu_topology [ cpu ] . package_id ;
2017-09-21 22:55:01 +03:00
dualcores_found = 1 ;
continue ;
}
}
2022-03-24 21:46:50 +03:00
if ( cpuid_topo - > package_id = = - 1 )
max_socket = max ( max_socket , cpu_topology [ cpu ] . package_id ) ;
2005-04-17 02:20:36 +04:00
}
2017-09-21 22:55:01 +03:00
2022-03-24 21:46:50 +03:00
if ( cpuid_topo - > package_id = = - 1 )
cpuid_topo - > package_id = max_socket + 1 ;
2017-09-21 22:55:01 +03:00
update_siblings_masks ( cpuid ) ;
2022-03-18 01:01:09 +03:00
pr_info ( " CPU%u: cpu core %d of socket %d \n " ,
cpuid ,
2017-09-21 22:55:01 +03:00
cpu_topology [ cpuid ] . core_id ,
2022-03-24 21:46:50 +03:00
cpu_topology [ cpuid ] . package_id ) ;
2005-04-17 02:20:36 +04:00
}
2017-09-21 22:55:01 +03:00
static struct sched_domain_topology_level parisc_mc_topology [ ] = {
# ifdef CONFIG_SCHED_MC
{ cpu_coregroup_mask , cpu_core_flags , SD_INIT_NAME ( MC ) } ,
# endif
{ cpu_cpu_mask , SD_INIT_NAME ( DIE ) } ,
{ NULL , } ,
} ;
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
*/
void __init init_cpu_topology ( void )
{
/* Set scheduler topology descriptor */
if ( dualcores_found )
set_sched_topology ( parisc_mc_topology ) ;
}