2008-04-17 07:46:12 +02:00
/*
2012-07-20 11:15:04 +02:00
* Copyright IBM Corp . 2007 , 2011
2008-04-17 07:46:12 +02:00
* Author ( s ) : Heiko Carstens < heiko . carstens @ de . ibm . com >
*/
2008-12-25 13:39:50 +01:00
# define KMSG_COMPONENT "cpu"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2011-12-27 11:27:09 +01:00
# include <linux/workqueue.h>
2016-12-03 09:50:21 +01:00
# include <linux/bootmem.h>
2011-12-27 11:27:09 +01:00
# include <linux/cpuset.h>
# include <linux/device.h>
2013-01-07 13:58:38 +01:00
# include <linux/export.h>
2011-12-27 11:27:09 +01:00
# include <linux/kernel.h>
2008-04-17 07:46:12 +02:00
# include <linux/sched.h>
2017-02-01 16:36:40 +01:00
# include <linux/sched/topology.h>
2011-12-27 11:27:09 +01:00
# include <linux/delay.h>
2015-02-04 14:21:31 +01:00
# include <linux/init.h>
# include <linux/slab.h>
2008-04-17 07:46:12 +02:00
# include <linux/cpu.h>
# include <linux/smp.h>
2011-12-27 11:27:09 +01:00
# include <linux/mm.h>
2014-03-06 18:25:13 +01:00
# include <linux/nodemask.h>
# include <linux/node.h>
2012-09-03 14:11:32 +02:00
# include <asm/sysinfo.h>
2014-03-06 18:25:13 +01:00
# include <asm/numa.h>
2008-04-17 07:46:12 +02:00
2008-04-17 07:46:13 +02:00
# define PTF_HORIZONTAL (0UL)
# define PTF_VERTICAL (1UL)
# define PTF_CHECK (2UL)
2008-04-17 07:46:12 +02:00
2017-09-19 12:52:22 +02:00
enum {
TOPOLOGY_MODE_HW ,
TOPOLOGY_MODE_SINGLE ,
TOPOLOGY_MODE_PACKAGE ,
TOPOLOGY_MODE_UNINITIALIZED
} ;
2010-08-31 10:28:18 +02:00
struct mask_info {
struct mask_info * next ;
2010-05-17 10:00:12 +02:00
unsigned char id ;
2008-04-17 07:46:12 +02:00
cpumask_t mask ;
} ;
2017-09-19 12:52:22 +02:00
static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED ;
2012-11-12 10:03:25 +01:00
static void set_topology_timer ( void ) ;
2008-04-17 07:46:12 +02:00
static void topology_work_fn ( struct work_struct * work ) ;
2010-10-25 16:10:53 +02:00
static struct sysinfo_15_1_x * tl_info ;
2008-04-17 07:46:12 +02:00
2012-11-12 10:03:25 +01:00
static DECLARE_WORK ( topology_work , topology_work_fn ) ;
2008-04-30 13:38:40 +02:00
2015-08-01 18:12:41 +02:00
/*
2016-12-02 10:38:37 +01:00
* Socket / Book linked lists and cpu_topology updates are
2015-08-01 18:12:41 +02:00
* protected by " sched_domains_mutex " .
*/
2012-11-12 10:03:25 +01:00
static struct mask_info socket_info ;
2010-08-31 10:28:18 +02:00
static struct mask_info book_info ;
2016-05-25 10:25:50 +02:00
static struct mask_info drawer_info ;
2012-11-12 10:03:25 +01:00
2016-12-02 10:38:37 +01:00
struct cpu_topology_s390 cpu_topology [ NR_CPUS ] ;
EXPORT_SYMBOL_GPL ( cpu_topology ) ;
2011-12-27 11:27:09 +01:00
2016-12-03 09:50:21 +01:00
cpumask_t cpus_with_topology ;
2010-08-31 10:28:18 +02:00
static cpumask_t cpu_group_map ( struct mask_info * info , unsigned int cpu )
2008-04-17 07:46:12 +02:00
{
cpumask_t mask ;
2012-11-12 10:03:25 +01:00
cpumask_copy ( & mask , cpumask_of ( cpu ) ) ;
2017-09-19 12:52:22 +02:00
switch ( topology_mode ) {
case TOPOLOGY_MODE_HW :
while ( info ) {
if ( cpumask_test_cpu ( cpu , & info - > mask ) ) {
mask = info - > mask ;
break ;
}
info = info - > next ;
}
if ( cpumask_empty ( & mask ) )
cpumask_copy ( & mask , cpumask_of ( cpu ) ) ;
break ;
case TOPOLOGY_MODE_PACKAGE :
cpumask_copy ( & mask , cpu_present_mask ) ;
break ;
default :
/* fallthrough */
case TOPOLOGY_MODE_SINGLE :
cpumask_copy ( & mask , cpumask_of ( cpu ) ) ;
break ;
2010-10-29 16:50:38 +02:00
}
2008-04-17 07:46:12 +02:00
return mask ;
}
2015-01-14 17:52:10 +01:00
static cpumask_t cpu_thread_map ( unsigned int cpu )
{
cpumask_t mask ;
int i ;
cpumask_copy ( & mask , cpumask_of ( cpu ) ) ;
2017-09-19 12:52:22 +02:00
if ( topology_mode ! = TOPOLOGY_MODE_HW )
2015-01-14 17:52:10 +01:00
return mask ;
cpu - = cpu % ( smp_cpu_mtid + 1 ) ;
for ( i = 0 ; i < = smp_cpu_mtid ; i + + )
if ( cpu_present ( cpu + i ) )
cpumask_set_cpu ( cpu + i , & mask ) ;
return mask ;
}
2017-03-09 10:02:28 +01:00
# define TOPOLOGY_CORE_BITS 64
2016-05-25 09:53:07 +02:00
static void add_cpus_to_mask ( struct topology_core * tl_core ,
struct mask_info * drawer ,
struct mask_info * book ,
struct mask_info * socket )
2008-04-17 07:46:12 +02:00
{
2015-10-15 13:40:55 +02:00
struct cpu_topology_s390 * topo ;
2015-01-14 17:52:10 +01:00
unsigned int core ;
2008-04-17 07:46:12 +02:00
2017-03-09 10:02:28 +01:00
for_each_set_bit ( core , & tl_core - > mask , TOPOLOGY_CORE_BITS ) {
2015-01-14 17:52:10 +01:00
unsigned int rcore ;
int lcpu , i ;
2008-04-17 07:46:12 +02:00
2015-01-14 17:52:10 +01:00
rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core - > origin ;
lcpu = smp_find_processor_id ( rcore < < smp_cpu_mt_shift ) ;
2012-11-12 10:03:25 +01:00
if ( lcpu < 0 )
continue ;
2015-01-14 17:52:10 +01:00
for ( i = 0 ; i < = smp_cpu_mtid ; i + + ) {
2016-12-02 10:38:37 +01:00
topo = & cpu_topology [ lcpu + i ] ;
2016-05-25 10:25:50 +02:00
topo - > drawer_id = drawer - > id ;
2015-10-15 13:40:55 +02:00
topo - > book_id = book - > id ;
2016-05-25 09:53:07 +02:00
topo - > socket_id = socket - > id ;
2015-10-15 13:40:55 +02:00
topo - > core_id = rcore ;
topo - > thread_id = lcpu + i ;
2016-05-25 10:25:50 +02:00
cpumask_set_cpu ( lcpu + i , & drawer - > mask ) ;
2015-01-14 17:52:10 +01:00
cpumask_set_cpu ( lcpu + i , & book - > mask ) ;
cpumask_set_cpu ( lcpu + i , & socket - > mask ) ;
2016-12-03 09:50:21 +01:00
cpumask_set_cpu ( lcpu + i , & cpus_with_topology ) ;
2015-01-14 17:52:10 +01:00
smp_cpu_set_polarization ( lcpu + i , tl_core - > pp ) ;
2008-04-17 07:46:12 +02:00
}
}
}
2010-08-31 10:28:18 +02:00
static void clear_masks ( void )
2008-04-17 07:46:12 +02:00
{
2010-08-31 10:28:18 +02:00
struct mask_info * info ;
2008-04-17 07:46:12 +02:00
2012-11-12 10:03:25 +01:00
info = & socket_info ;
2010-08-31 10:28:18 +02:00
while ( info ) {
2011-05-23 10:24:36 +02:00
cpumask_clear ( & info - > mask ) ;
2010-08-31 10:28:18 +02:00
info = info - > next ;
}
info = & book_info ;
while ( info ) {
2011-05-23 10:24:36 +02:00
cpumask_clear ( & info - > mask ) ;
2010-08-31 10:28:18 +02:00
info = info - > next ;
2008-04-17 07:46:12 +02:00
}
2016-05-25 10:25:50 +02:00
info = & drawer_info ;
while ( info ) {
cpumask_clear ( & info - > mask ) ;
info = info - > next ;
}
2008-04-17 07:46:12 +02:00
}
2010-10-25 16:10:53 +02:00
static union topology_entry * next_tle ( union topology_entry * tle )
2008-04-17 07:46:12 +02:00
{
2010-10-25 16:10:53 +02:00
if ( ! tle - > nl )
2015-01-14 17:52:10 +01:00
return ( union topology_entry * ) ( ( struct topology_core * ) tle + 1 ) ;
2010-10-25 16:10:53 +02:00
return ( union topology_entry * ) ( ( struct topology_container * ) tle + 1 ) ;
2008-04-17 07:46:12 +02:00
}
2016-05-25 09:53:07 +02:00
static void tl_to_masks ( struct sysinfo_15_1_x * info )
2008-04-17 07:46:12 +02:00
{
2012-11-12 10:03:25 +01:00
struct mask_info * socket = & socket_info ;
2011-12-27 11:27:09 +01:00
struct mask_info * book = & book_info ;
2016-05-25 10:25:50 +02:00
struct mask_info * drawer = & drawer_info ;
2010-10-25 16:10:53 +02:00
union topology_entry * tle , * end ;
2010-08-31 10:28:18 +02:00
2016-05-25 09:53:07 +02:00
clear_masks ( ) ;
2008-04-17 07:46:13 +02:00
tle = info - > tle ;
2010-10-25 16:10:53 +02:00
end = ( union topology_entry * ) ( ( unsigned long ) info + info - > length ) ;
2008-04-17 07:46:12 +02:00
while ( tle < end ) {
switch ( tle - > nl ) {
2016-05-25 10:25:50 +02:00
case 3 :
drawer = drawer - > next ;
drawer - > id = tle - > container . id ;
break ;
2008-04-17 07:46:12 +02:00
case 2 :
2010-08-31 10:28:18 +02:00
book = book - > next ;
book - > id = tle - > container . id ;
2008-04-17 07:46:12 +02:00
break ;
case 1 :
2012-11-12 10:03:25 +01:00
socket = socket - > next ;
socket - > id = tle - > container . id ;
2008-04-17 07:46:12 +02:00
break ;
case 0 :
2016-05-25 09:53:07 +02:00
add_cpus_to_mask ( & tle - > cpu , drawer , book , socket ) ;
2011-12-27 11:27:12 +01:00
break ;
default :
clear_masks ( ) ;
return ;
}
tle = next_tle ( tle ) ;
}
}
2008-04-17 07:46:13 +02:00
static void topology_update_polarization_simple ( void )
{
int cpu ;
mutex_lock ( & smp_cpu_state_mutex ) ;
2008-12-25 13:37:57 +01:00
for_each_possible_cpu ( cpu )
2012-09-04 17:36:16 +02:00
smp_cpu_set_polarization ( cpu , POLARIZATION_HRZ ) ;
2008-04-17 07:46:13 +02:00
mutex_unlock ( & smp_cpu_state_mutex ) ;
}
static int ptf ( unsigned long fc )
2008-04-17 07:46:12 +02:00
{
int rc ;
asm volatile (
" .insn rre,0xb9a20000,%1,%1 \n "
" ipm %0 \n "
" srl %0,28 \n "
: " =d " ( rc )
2008-04-17 07:46:13 +02:00
: " d " ( fc ) : " cc " ) ;
return rc ;
}
int topology_set_cpu_management ( int fc )
{
2011-12-27 11:27:09 +01:00
int cpu , rc ;
2008-04-17 07:46:13 +02:00
2010-10-25 16:10:52 +02:00
if ( ! MACHINE_HAS_TOPOLOGY )
2008-04-17 07:46:13 +02:00
return - EOPNOTSUPP ;
if ( fc )
rc = ptf ( PTF_VERTICAL ) ;
else
rc = ptf ( PTF_HORIZONTAL ) ;
if ( rc )
return - EBUSY ;
2008-12-25 13:37:57 +01:00
for_each_possible_cpu ( cpu )
2012-09-04 17:36:16 +02:00
smp_cpu_set_polarization ( cpu , POLARIZATION_UNKNOWN ) ;
2008-04-17 07:46:12 +02:00
return rc ;
}
2012-11-12 10:03:25 +01:00
static void update_cpu_masks ( void )
2008-04-30 13:38:40 +02:00
{
2015-10-15 13:40:55 +02:00
struct cpu_topology_s390 * topo ;
2017-09-19 12:52:22 +02:00
int cpu , id ;
2008-04-30 13:38:40 +02:00
2010-08-31 10:28:18 +02:00
for_each_possible_cpu ( cpu ) {
2016-12-02 10:38:37 +01:00
topo = & cpu_topology [ cpu ] ;
2015-10-15 13:40:55 +02:00
topo - > thread_mask = cpu_thread_map ( cpu ) ;
topo - > core_mask = cpu_group_map ( & socket_info , cpu ) ;
topo - > book_mask = cpu_group_map ( & book_info , cpu ) ;
2016-05-25 10:25:50 +02:00
topo - > drawer_mask = cpu_group_map ( & drawer_info , cpu ) ;
2017-09-19 12:52:22 +02:00
if ( topology_mode ! = TOPOLOGY_MODE_HW ) {
id = topology_mode = = TOPOLOGY_MODE_PACKAGE ? 0 : cpu ;
2015-10-15 13:40:55 +02:00
topo - > thread_id = cpu ;
topo - > core_id = cpu ;
2017-09-19 12:52:22 +02:00
topo - > socket_id = id ;
topo - > book_id = id ;
topo - > drawer_id = id ;
2016-12-03 09:50:21 +01:00
if ( cpu_present ( cpu ) )
cpumask_set_cpu ( cpu , & cpus_with_topology ) ;
2012-11-12 10:03:25 +01:00
}
2010-08-31 10:28:18 +02:00
}
2014-03-06 18:25:13 +01:00
numa_update_cpu_topology ( ) ;
2010-08-31 10:28:18 +02:00
}
2010-10-25 16:10:54 +02:00
void store_topology ( struct sysinfo_15_1_x * info )
2010-08-31 10:28:18 +02:00
{
2017-03-13 15:58:59 +01:00
stsi ( info , 15 , 1 , topology_mnest_limit ( ) ) ;
2008-04-30 13:38:40 +02:00
}
2016-12-03 09:50:21 +01:00
static int __arch_update_cpu_topology ( void )
2008-04-17 07:46:12 +02:00
{
2010-10-25 16:10:53 +02:00
struct sysinfo_15_1_x * info = tl_info ;
2016-12-03 09:50:21 +01:00
int rc = 0 ;
2008-04-17 07:46:12 +02:00
2016-12-03 09:50:21 +01:00
cpumask_clear ( & cpus_with_topology ) ;
2014-03-06 18:25:13 +01:00
if ( MACHINE_HAS_TOPOLOGY ) {
rc = 1 ;
store_topology ( info ) ;
tl_to_masks ( info ) ;
2008-04-17 07:46:13 +02:00
}
2012-11-12 10:03:25 +01:00
update_cpu_masks ( ) ;
2014-03-06 18:25:13 +01:00
if ( ! MACHINE_HAS_TOPOLOGY )
topology_update_polarization_simple ( ) ;
2016-12-03 09:50:21 +01:00
return rc ;
}
int arch_update_cpu_topology ( void )
{
struct device * dev ;
int cpu , rc ;
rc = __arch_update_cpu_topology ( ) ;
2008-04-17 07:46:12 +02:00
for_each_online_cpu ( cpu ) {
2011-12-21 14:29:42 -08:00
dev = get_cpu_device ( cpu ) ;
kobject_uevent ( & dev - > kobj , KOBJ_CHANGE ) ;
2008-04-17 07:46:12 +02:00
}
2014-03-06 18:25:13 +01:00
return rc ;
2008-04-17 07:46:12 +02:00
}
2008-04-30 13:38:41 +02:00
static void topology_work_fn ( struct work_struct * work )
{
2008-12-25 13:37:59 +01:00
rebuild_sched_domains ( ) ;
2008-04-17 07:46:12 +02:00
}
2008-04-17 07:46:13 +02:00
void topology_schedule_update ( void )
{
schedule_work ( & topology_work ) ;
}
2008-04-17 07:46:12 +02:00
static void topology_timer_fn ( unsigned long ignored )
{
2008-04-17 07:46:13 +02:00
if ( ptf ( PTF_CHECK ) )
topology_schedule_update ( ) ;
2008-04-17 07:46:12 +02:00
set_topology_timer ( ) ;
}
2011-12-27 11:27:16 +01:00
static struct timer_list topology_timer =
TIMER_DEFERRED_INITIALIZER ( topology_timer_fn , 0 , 0 ) ;
static atomic_t topology_poll = ATOMIC_INIT ( 0 ) ;
2008-04-17 07:46:12 +02:00
static void set_topology_timer ( void )
{
2011-12-27 11:27:16 +01:00
if ( atomic_add_unless ( & topology_poll , - 1 , 0 ) )
mod_timer ( & topology_timer , jiffies + HZ / 10 ) ;
else
mod_timer ( & topology_timer , jiffies + HZ * 60 ) ;
}
void topology_expect_change ( void )
{
if ( ! MACHINE_HAS_TOPOLOGY )
return ;
/* This is racy, but it doesn't matter since it is just a heuristic.
* Worst case is that we poll in a higher frequency for a bit longer .
*/
if ( atomic_read ( & topology_poll ) > 60 )
return ;
atomic_add ( 60 , & topology_poll ) ;
set_topology_timer ( ) ;
2008-04-17 07:46:12 +02:00
}
2011-12-27 11:27:09 +01:00
static int cpu_management ;
2012-01-09 08:11:13 -08:00
static ssize_t dispatching_show ( struct device * dev ,
struct device_attribute * attr ,
2011-12-27 11:27:09 +01:00
char * buf )
{
ssize_t count ;
mutex_lock ( & smp_cpu_state_mutex ) ;
count = sprintf ( buf , " %d \n " , cpu_management ) ;
mutex_unlock ( & smp_cpu_state_mutex ) ;
return count ;
}
2012-01-09 08:11:13 -08:00
static ssize_t dispatching_store ( struct device * dev ,
struct device_attribute * attr ,
2011-12-27 11:27:09 +01:00
const char * buf ,
size_t count )
{
int val , rc ;
char delim ;
if ( sscanf ( buf , " %d %c " , & val , & delim ) ! = 1 )
return - EINVAL ;
if ( val ! = 0 & & val ! = 1 )
return - EINVAL ;
rc = 0 ;
get_online_cpus ( ) ;
mutex_lock ( & smp_cpu_state_mutex ) ;
if ( cpu_management = = val )
goto out ;
rc = topology_set_cpu_management ( val ) ;
2011-12-27 11:27:16 +01:00
if ( rc )
goto out ;
cpu_management = val ;
topology_expect_change ( ) ;
2011-12-27 11:27:09 +01:00
out :
mutex_unlock ( & smp_cpu_state_mutex ) ;
put_online_cpus ( ) ;
return rc ? rc : count ;
}
2012-01-09 08:11:13 -08:00
static DEVICE_ATTR ( dispatching , 0644 , dispatching_show ,
2011-12-27 11:27:09 +01:00
dispatching_store ) ;
2012-01-09 08:11:13 -08:00
static ssize_t cpu_polarization_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
2011-12-27 11:27:09 +01:00
{
int cpu = dev - > id ;
ssize_t count ;
mutex_lock ( & smp_cpu_state_mutex ) ;
2012-09-04 17:36:16 +02:00
switch ( smp_cpu_get_polarization ( cpu ) ) {
2011-12-27 11:27:09 +01:00
case POLARIZATION_HRZ :
count = sprintf ( buf , " horizontal \n " ) ;
break ;
case POLARIZATION_VL :
count = sprintf ( buf , " vertical:low \n " ) ;
break ;
case POLARIZATION_VM :
count = sprintf ( buf , " vertical:medium \n " ) ;
break ;
case POLARIZATION_VH :
count = sprintf ( buf , " vertical:high \n " ) ;
break ;
default :
count = sprintf ( buf , " unknown \n " ) ;
break ;
}
mutex_unlock ( & smp_cpu_state_mutex ) ;
return count ;
}
2012-01-09 08:11:13 -08:00
static DEVICE_ATTR ( polarization , 0444 , cpu_polarization_show , NULL ) ;
2011-12-27 11:27:09 +01:00
static struct attribute * topology_cpu_attrs [ ] = {
2012-01-09 08:11:13 -08:00
& dev_attr_polarization . attr ,
2011-12-27 11:27:09 +01:00
NULL ,
} ;
static struct attribute_group topology_cpu_attr_group = {
. attrs = topology_cpu_attrs ,
} ;
int topology_cpu_init ( struct cpu * cpu )
{
2012-01-09 08:11:13 -08:00
return sysfs_create_group ( & cpu - > dev . kobj , & topology_cpu_attr_group ) ;
2011-12-27 11:27:09 +01:00
}
2015-03-16 12:44:10 +01:00
static const struct cpumask * cpu_thread_mask ( int cpu )
2015-01-14 17:52:10 +01:00
{
2016-12-02 10:38:37 +01:00
return & cpu_topology [ cpu ] . thread_mask ;
2015-01-14 17:52:10 +01:00
}
2014-04-11 11:44:38 +02:00
const struct cpumask * cpu_coregroup_mask ( int cpu )
{
2016-12-02 10:38:37 +01:00
return & cpu_topology [ cpu ] . core_mask ;
2014-04-11 11:44:38 +02:00
}
static const struct cpumask * cpu_book_mask ( int cpu )
{
2016-12-02 10:38:37 +01:00
return & cpu_topology [ cpu ] . book_mask ;
2014-04-11 11:44:38 +02:00
}
2016-05-25 10:25:50 +02:00
static const struct cpumask * cpu_drawer_mask ( int cpu )
{
2016-12-02 10:38:37 +01:00
return & cpu_topology [ cpu ] . drawer_mask ;
2016-05-25 10:25:50 +02:00
}
2014-04-11 11:44:38 +02:00
static struct sched_domain_topology_level s390_topology [ ] = {
2015-01-14 17:52:10 +01:00
{ cpu_thread_mask , cpu_smt_flags , SD_INIT_NAME ( SMT ) } ,
2014-04-11 11:44:38 +02:00
{ cpu_coregroup_mask , cpu_core_flags , SD_INIT_NAME ( MC ) } ,
{ cpu_book_mask , SD_INIT_NAME ( BOOK ) } ,
2016-05-25 10:25:50 +02:00
{ cpu_drawer_mask , SD_INIT_NAME ( DRAWER ) } ,
2015-08-13 10:35:11 +02:00
{ cpu_cpu_mask , SD_INIT_NAME ( DIE ) } ,
2014-04-11 11:44:38 +02:00
{ NULL , } ,
} ;
2015-02-04 14:21:31 +01:00
static void __init alloc_masks ( struct sysinfo_15_1_x * info ,
struct mask_info * mask , int offset )
{
int i , nr_masks ;
nr_masks = info - > mag [ TOPOLOGY_NR_MAG - offset ] ;
for ( i = 0 ; i < info - > mnest - offset ; i + + )
nr_masks * = info - > mag [ TOPOLOGY_NR_MAG - offset - 1 - i ] ;
nr_masks = max ( nr_masks , 1 ) ;
for ( i = 0 ; i < nr_masks ; i + + ) {
2016-12-03 09:50:21 +01:00
mask - > next = memblock_virt_alloc ( sizeof ( * mask - > next ) , 8 ) ;
2015-02-04 14:21:31 +01:00
mask = mask - > next ;
}
}
2016-12-03 09:50:21 +01:00
void __init topology_init_early ( void )
2015-02-04 14:21:31 +01:00
{
struct sysinfo_15_1_x * info ;
2016-12-03 09:50:16 +01:00
set_sched_topology ( s390_topology ) ;
2017-09-19 12:52:22 +02:00
if ( topology_mode = = TOPOLOGY_MODE_UNINITIALIZED ) {
if ( MACHINE_HAS_TOPOLOGY )
topology_mode = TOPOLOGY_MODE_HW ;
else
topology_mode = TOPOLOGY_MODE_SINGLE ;
}
2015-02-04 14:21:31 +01:00
if ( ! MACHINE_HAS_TOPOLOGY )
2016-12-03 09:50:21 +01:00
goto out ;
2017-02-04 12:38:12 +01:00
tl_info = memblock_virt_alloc ( PAGE_SIZE , PAGE_SIZE ) ;
2015-02-04 14:21:31 +01:00
info = tl_info ;
store_topology ( info ) ;
2017-01-02 08:15:07 +01:00
pr_info ( " The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d \n " ,
info - > mag [ 0 ] , info - > mag [ 1 ] , info - > mag [ 2 ] , info - > mag [ 3 ] ,
info - > mag [ 4 ] , info - > mag [ 5 ] , info - > mnest ) ;
2015-02-04 14:21:31 +01:00
alloc_masks ( info , & socket_info , 1 ) ;
alloc_masks ( info , & book_info , 2 ) ;
2016-05-25 10:25:50 +02:00
alloc_masks ( info , & drawer_info , 3 ) ;
2016-12-03 09:50:21 +01:00
out :
__arch_update_cpu_topology ( ) ;
2015-02-04 14:21:31 +01:00
}
2017-09-19 12:52:22 +02:00
static inline int topology_get_mode ( int enabled )
{
if ( ! enabled )
return TOPOLOGY_MODE_SINGLE ;
return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE ;
}
static int __init topology_setup ( char * str )
{
bool enabled ;
int rc ;
rc = kstrtobool ( str , & enabled ) ;
if ( rc )
return rc ;
topology_mode = topology_get_mode ( enabled ) ;
return 0 ;
}
early_param ( " topology " , topology_setup ) ;
2011-12-27 11:27:09 +01:00
static int __init topology_init ( void )
{
2014-09-24 16:37:20 +02:00
if ( MACHINE_HAS_TOPOLOGY )
set_topology_timer ( ) ;
else
2011-12-27 11:27:09 +01:00
topology_update_polarization_simple ( ) ;
2012-01-09 08:11:13 -08:00
return device_create_file ( cpu_subsys . dev_root , & dev_attr_dispatching ) ;
2011-12-27 11:27:09 +01:00
}
device_initcall ( topology_init ) ;