2008-04-17 07:46:12 +02:00
/*
2012-07-20 11:15:04 +02:00
* Copyright IBM Corp . 2007 , 2011
2008-04-17 07:46:12 +02:00
* Author ( s ) : Heiko Carstens < heiko . carstens @ de . ibm . com >
*/
2008-12-25 13:39:50 +01:00
# define KMSG_COMPONENT "cpu"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2011-12-27 11:27:09 +01:00
# include <linux/workqueue.h>
2008-04-17 07:46:12 +02:00
# include <linux/bootmem.h>
2011-12-27 11:27:09 +01:00
# include <linux/cpuset.h>
# include <linux/device.h>
# include <linux/kernel.h>
2008-04-17 07:46:12 +02:00
# include <linux/sched.h>
2011-12-27 11:27:09 +01:00
# include <linux/init.h>
# include <linux/delay.h>
2008-04-17 07:46:12 +02:00
# include <linux/cpu.h>
# include <linux/smp.h>
2011-12-27 11:27:09 +01:00
# include <linux/mm.h>
2012-09-03 14:11:32 +02:00
# include <asm/sysinfo.h>
2008-04-17 07:46:12 +02:00
2008-04-17 07:46:13 +02:00
# define PTF_HORIZONTAL (0UL)
# define PTF_VERTICAL (1UL)
# define PTF_CHECK (2UL)
2008-04-17 07:46:12 +02:00
2010-08-31 10:28:18 +02:00
struct mask_info {
struct mask_info * next ;
2010-05-17 10:00:12 +02:00
unsigned char id ;
2008-04-17 07:46:12 +02:00
cpumask_t mask ;
} ;
2010-10-25 16:10:43 +02:00
static int topology_enabled = 1 ;
2008-04-17 07:46:12 +02:00
static void topology_work_fn ( struct work_struct * work ) ;
2010-10-25 16:10:53 +02:00
static struct sysinfo_15_1_x * tl_info ;
2008-04-17 07:46:12 +02:00
static void set_topology_timer ( void ) ;
static DECLARE_WORK ( topology_work , topology_work_fn ) ;
2008-11-14 18:18:07 +01:00
/* topology_lock protects the core linked list */
static DEFINE_SPINLOCK ( topology_lock ) ;
2008-04-17 07:46:12 +02:00
2010-08-31 10:28:18 +02:00
static struct mask_info core_info ;
2008-04-30 13:38:40 +02:00
cpumask_t cpu_core_map [ NR_CPUS ] ;
2010-05-17 10:00:12 +02:00
unsigned char cpu_core_id [ NR_CPUS ] ;
2008-04-30 13:38:40 +02:00
2010-08-31 10:28:18 +02:00
static struct mask_info book_info ;
cpumask_t cpu_book_map [ NR_CPUS ] ;
unsigned char cpu_book_id [ NR_CPUS ] ;
2011-12-27 11:27:09 +01:00
/* smp_cpu_state_mutex must be held when accessing this array */
int cpu_polarization [ NR_CPUS ] ;
2010-08-31 10:28:18 +02:00
static cpumask_t cpu_group_map ( struct mask_info * info , unsigned int cpu )
2008-04-17 07:46:12 +02:00
{
cpumask_t mask ;
2011-05-23 10:24:36 +02:00
cpumask_clear ( & mask ) ;
2010-10-29 16:50:38 +02:00
if ( ! topology_enabled | | ! MACHINE_HAS_TOPOLOGY ) {
cpumask_copy ( & mask , cpumask_of ( cpu ) ) ;
return mask ;
}
2010-08-31 10:28:18 +02:00
while ( info ) {
2011-05-23 10:24:36 +02:00
if ( cpumask_test_cpu ( cpu , & info - > mask ) ) {
2010-08-31 10:28:18 +02:00
mask = info - > mask ;
2008-04-17 07:46:12 +02:00
break ;
}
2010-08-31 10:28:18 +02:00
info = info - > next ;
2008-04-17 07:46:12 +02:00
}
2011-05-23 10:24:36 +02:00
if ( cpumask_empty ( & mask ) )
cpumask_copy ( & mask , cpumask_of ( cpu ) ) ;
2008-04-17 07:46:12 +02:00
return mask ;
}
2011-11-14 11:19:08 +01:00
static struct mask_info * add_cpus_to_mask ( struct topology_cpu * tl_cpu ,
struct mask_info * book ,
struct mask_info * core ,
2011-12-27 11:27:12 +01:00
int one_core_per_cpu )
2008-04-17 07:46:12 +02:00
{
unsigned int cpu ;
2010-10-25 16:10:53 +02:00
for ( cpu = find_first_bit ( & tl_cpu - > mask [ 0 ] , TOPOLOGY_CPU_BITS ) ;
cpu < TOPOLOGY_CPU_BITS ;
cpu = find_next_bit ( & tl_cpu - > mask [ 0 ] , TOPOLOGY_CPU_BITS , cpu + 1 ) )
2008-04-17 07:46:12 +02:00
{
2012-03-11 11:59:26 -04:00
unsigned int rcpu ;
int lcpu ;
2008-04-17 07:46:12 +02:00
2010-10-25 16:10:53 +02:00
rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu - > origin ;
2012-03-11 11:59:26 -04:00
lcpu = smp_find_processor_id ( rcpu ) ;
if ( lcpu > = 0 ) {
2011-05-23 10:24:36 +02:00
cpumask_set_cpu ( lcpu , & book - > mask ) ;
2010-08-31 10:28:18 +02:00
cpu_book_id [ lcpu ] = book - > id ;
2011-05-23 10:24:36 +02:00
cpumask_set_cpu ( lcpu , & core - > mask ) ;
2011-12-27 11:27:12 +01:00
if ( one_core_per_cpu ) {
2011-11-14 11:19:08 +01:00
cpu_core_id [ lcpu ] = rcpu ;
core = core - > next ;
} else {
cpu_core_id [ lcpu ] = core - > id ;
}
2011-12-27 11:27:09 +01:00
cpu_set_polarization ( lcpu , tl_cpu - > pp ) ;
2008-04-17 07:46:12 +02:00
}
}
2011-11-14 11:19:08 +01:00
return core ;
2008-04-17 07:46:12 +02:00
}
2010-08-31 10:28:18 +02:00
static void clear_masks ( void )
2008-04-17 07:46:12 +02:00
{
2010-08-31 10:28:18 +02:00
struct mask_info * info ;
2008-04-17 07:46:12 +02:00
2010-08-31 10:28:18 +02:00
info = & core_info ;
while ( info ) {
2011-05-23 10:24:36 +02:00
cpumask_clear ( & info - > mask ) ;
2010-08-31 10:28:18 +02:00
info = info - > next ;
}
info = & book_info ;
while ( info ) {
2011-05-23 10:24:36 +02:00
cpumask_clear ( & info - > mask ) ;
2010-08-31 10:28:18 +02:00
info = info - > next ;
2008-04-17 07:46:12 +02:00
}
}
2010-10-25 16:10:53 +02:00
static union topology_entry * next_tle ( union topology_entry * tle )
2008-04-17 07:46:12 +02:00
{
2010-10-25 16:10:53 +02:00
if ( ! tle - > nl )
return ( union topology_entry * ) ( ( struct topology_cpu * ) tle + 1 ) ;
return ( union topology_entry * ) ( ( struct topology_container * ) tle + 1 ) ;
2008-04-17 07:46:12 +02:00
}
2011-12-27 11:27:12 +01:00
static void __tl_to_cores_generic ( struct sysinfo_15_1_x * info )
2008-04-17 07:46:12 +02:00
{
2010-08-31 10:28:18 +02:00
struct mask_info * core = & core_info ;
2011-12-27 11:27:09 +01:00
struct mask_info * book = & book_info ;
2010-10-25 16:10:53 +02:00
union topology_entry * tle , * end ;
2010-08-31 10:28:18 +02:00
2008-04-17 07:46:13 +02:00
tle = info - > tle ;
2010-10-25 16:10:53 +02:00
end = ( union topology_entry * ) ( ( unsigned long ) info + info - > length ) ;
2008-04-17 07:46:12 +02:00
while ( tle < end ) {
switch ( tle - > nl ) {
case 2 :
2010-08-31 10:28:18 +02:00
book = book - > next ;
book - > id = tle - > container . id ;
2008-04-17 07:46:12 +02:00
break ;
case 1 :
core = core - > next ;
2010-05-17 10:00:12 +02:00
core - > id = tle - > container . id ;
2008-04-17 07:46:12 +02:00
break ;
case 0 :
2011-12-27 11:27:12 +01:00
add_cpus_to_mask ( & tle - > cpu , book , core , 0 ) ;
2008-04-17 07:46:12 +02:00
break ;
default :
2010-08-31 10:28:18 +02:00
clear_masks ( ) ;
2011-12-27 11:27:12 +01:00
return ;
2008-04-17 07:46:12 +02:00
}
tle = next_tle ( tle ) ;
}
2011-12-27 11:27:12 +01:00
}
static void __tl_to_cores_z10 ( struct sysinfo_15_1_x * info )
{
struct mask_info * core = & core_info ;
struct mask_info * book = & book_info ;
union topology_entry * tle , * end ;
tle = info - > tle ;
end = ( union topology_entry * ) ( ( unsigned long ) info + info - > length ) ;
while ( tle < end ) {
switch ( tle - > nl ) {
case 1 :
book = book - > next ;
book - > id = tle - > container . id ;
break ;
case 0 :
core = add_cpus_to_mask ( & tle - > cpu , book , core , 1 ) ;
break ;
default :
clear_masks ( ) ;
return ;
}
tle = next_tle ( tle ) ;
}
}
static void tl_to_cores ( struct sysinfo_15_1_x * info )
{
struct cpuid cpu_id ;
get_cpu_id ( & cpu_id ) ;
spin_lock_irq ( & topology_lock ) ;
clear_masks ( ) ;
switch ( cpu_id . machine ) {
case 0x2097 :
case 0x2098 :
__tl_to_cores_z10 ( info ) ;
break ;
default :
__tl_to_cores_generic ( info ) ;
}
2008-11-14 18:18:07 +01:00
spin_unlock_irq ( & topology_lock ) ;
2008-04-17 07:46:12 +02:00
}
2008-04-17 07:46:13 +02:00
static void topology_update_polarization_simple ( void )
{
int cpu ;
mutex_lock ( & smp_cpu_state_mutex ) ;
2008-12-25 13:37:57 +01:00
for_each_possible_cpu ( cpu )
2011-12-27 11:27:09 +01:00
cpu_set_polarization ( cpu , POLARIZATION_HRZ ) ;
2008-04-17 07:46:13 +02:00
mutex_unlock ( & smp_cpu_state_mutex ) ;
}
static int ptf ( unsigned long fc )
2008-04-17 07:46:12 +02:00
{
int rc ;
asm volatile (
" .insn rre,0xb9a20000,%1,%1 \n "
" ipm %0 \n "
" srl %0,28 \n "
: " =d " ( rc )
2008-04-17 07:46:13 +02:00
: " d " ( fc ) : " cc " ) ;
return rc ;
}
int topology_set_cpu_management ( int fc )
{
2011-12-27 11:27:09 +01:00
int cpu , rc ;
2008-04-17 07:46:13 +02:00
2010-10-25 16:10:52 +02:00
if ( ! MACHINE_HAS_TOPOLOGY )
2008-04-17 07:46:13 +02:00
return - EOPNOTSUPP ;
if ( fc )
rc = ptf ( PTF_VERTICAL ) ;
else
rc = ptf ( PTF_HORIZONTAL ) ;
if ( rc )
return - EBUSY ;
2008-12-25 13:37:57 +01:00
for_each_possible_cpu ( cpu )
2011-12-27 11:27:09 +01:00
cpu_set_polarization ( cpu , POLARIZATION_UNKNOWN ) ;
2008-04-17 07:46:12 +02:00
return rc ;
}
2008-04-30 13:38:40 +02:00
static void update_cpu_core_map ( void )
{
2010-08-31 10:28:18 +02:00
unsigned long flags ;
2008-04-30 13:38:40 +02:00
int cpu ;
2010-08-31 10:28:18 +02:00
spin_lock_irqsave ( & topology_lock , flags ) ;
for_each_possible_cpu ( cpu ) {
cpu_core_map [ cpu ] = cpu_group_map ( & core_info , cpu ) ;
cpu_book_map [ cpu ] = cpu_group_map ( & book_info , cpu ) ;
}
spin_unlock_irqrestore ( & topology_lock , flags ) ;
}
2010-10-25 16:10:54 +02:00
void store_topology ( struct sysinfo_15_1_x * info )
2010-08-31 10:28:18 +02:00
{
2012-09-04 14:26:03 +02:00
if ( topology_max_mnest > = 3 )
stsi ( info , 15 , 1 , 3 ) ;
else
stsi ( info , 15 , 1 , 2 ) ;
2008-04-30 13:38:40 +02:00
}
2008-12-09 18:49:50 +01:00
int arch_update_cpu_topology ( void )
2008-04-17 07:46:12 +02:00
{
2010-10-25 16:10:53 +02:00
struct sysinfo_15_1_x * info = tl_info ;
2011-12-21 14:29:42 -08:00
struct device * dev ;
2008-04-17 07:46:12 +02:00
int cpu ;
2010-10-25 16:10:52 +02:00
if ( ! MACHINE_HAS_TOPOLOGY ) {
2008-04-30 13:38:40 +02:00
update_cpu_core_map ( ) ;
2008-04-17 07:46:13 +02:00
topology_update_polarization_simple ( ) ;
2008-12-09 18:49:50 +01:00
return 0 ;
2008-04-17 07:46:13 +02:00
}
2010-08-31 10:28:18 +02:00
store_topology ( info ) ;
2008-04-17 07:46:12 +02:00
tl_to_cores ( info ) ;
2008-04-30 13:38:40 +02:00
update_cpu_core_map ( ) ;
2008-04-17 07:46:12 +02:00
for_each_online_cpu ( cpu ) {
2011-12-21 14:29:42 -08:00
dev = get_cpu_device ( cpu ) ;
kobject_uevent ( & dev - > kobj , KOBJ_CHANGE ) ;
2008-04-17 07:46:12 +02:00
}
2008-12-09 18:49:50 +01:00
return 1 ;
2008-04-17 07:46:12 +02:00
}
2008-04-30 13:38:41 +02:00
static void topology_work_fn ( struct work_struct * work )
{
2008-12-25 13:37:59 +01:00
rebuild_sched_domains ( ) ;
2008-04-17 07:46:12 +02:00
}
2008-04-17 07:46:13 +02:00
void topology_schedule_update ( void )
{
schedule_work ( & topology_work ) ;
}
2008-04-17 07:46:12 +02:00
static void topology_timer_fn ( unsigned long ignored )
{
2008-04-17 07:46:13 +02:00
if ( ptf ( PTF_CHECK ) )
topology_schedule_update ( ) ;
2008-04-17 07:46:12 +02:00
set_topology_timer ( ) ;
}
2011-12-27 11:27:16 +01:00
static struct timer_list topology_timer =
TIMER_DEFERRED_INITIALIZER ( topology_timer_fn , 0 , 0 ) ;
static atomic_t topology_poll = ATOMIC_INIT ( 0 ) ;
2008-04-17 07:46:12 +02:00
static void set_topology_timer ( void )
{
2011-12-27 11:27:16 +01:00
if ( atomic_add_unless ( & topology_poll , - 1 , 0 ) )
mod_timer ( & topology_timer , jiffies + HZ / 10 ) ;
else
mod_timer ( & topology_timer , jiffies + HZ * 60 ) ;
}
void topology_expect_change ( void )
{
if ( ! MACHINE_HAS_TOPOLOGY )
return ;
/* This is racy, but it doesn't matter since it is just a heuristic.
* Worst case is that we poll in a higher frequency for a bit longer .
*/
if ( atomic_read ( & topology_poll ) > 60 )
return ;
atomic_add ( 60 , & topology_poll ) ;
set_topology_timer ( ) ;
2008-04-17 07:46:12 +02:00
}
2008-12-25 13:39:23 +01:00
static int __init early_parse_topology ( char * p )
2008-04-17 07:46:12 +02:00
{
2010-10-25 16:10:43 +02:00
if ( strncmp ( p , " off " , 3 ) )
2008-12-25 13:39:23 +01:00
return 0 ;
2010-10-25 16:10:43 +02:00
topology_enabled = 0 ;
2008-12-25 13:39:23 +01:00
return 0 ;
2008-04-17 07:46:12 +02:00
}
2008-12-25 13:39:23 +01:00
early_param ( " topology " , early_parse_topology ) ;
2008-04-17 07:46:12 +02:00
2011-10-30 15:16:06 +01:00
static void __init alloc_masks ( struct sysinfo_15_1_x * info ,
struct mask_info * mask , int offset )
2010-08-31 10:28:18 +02:00
{
int i , nr_masks ;
2010-10-25 16:10:53 +02:00
nr_masks = info - > mag [ TOPOLOGY_NR_MAG - offset ] ;
2010-08-31 10:28:18 +02:00
for ( i = 0 ; i < info - > mnest - offset ; i + + )
2010-10-25 16:10:53 +02:00
nr_masks * = info - > mag [ TOPOLOGY_NR_MAG - offset - 1 - i ] ;
2010-08-31 10:28:18 +02:00
nr_masks = max ( nr_masks , 1 ) ;
for ( i = 0 ; i < nr_masks ; i + + ) {
mask - > next = alloc_bootmem ( sizeof ( struct mask_info ) ) ;
mask = mask - > next ;
}
}
2008-04-17 07:46:12 +02:00
void __init s390_init_cpu_topology ( void )
{
2010-10-25 16:10:53 +02:00
struct sysinfo_15_1_x * info ;
2008-04-17 07:46:12 +02:00
int i ;
2010-10-25 16:10:52 +02:00
if ( ! MACHINE_HAS_TOPOLOGY )
2008-04-17 07:46:12 +02:00
return ;
tl_info = alloc_bootmem_pages ( PAGE_SIZE ) ;
info = tl_info ;
2010-08-31 10:28:18 +02:00
store_topology ( info ) ;
2008-12-25 13:39:50 +01:00
pr_info ( " The CPU configuration topology of the machine is: " ) ;
2010-10-25 16:10:53 +02:00
for ( i = 0 ; i < TOPOLOGY_NR_MAG ; i + + )
2011-12-27 11:27:09 +01:00
printk ( KERN_CONT " %d " , info - > mag [ i ] ) ;
printk ( KERN_CONT " / %d \n " , info - > mnest ) ;
2011-11-14 11:19:08 +01:00
alloc_masks ( info , & core_info , 1 ) ;
alloc_masks ( info , & book_info , 2 ) ;
2008-04-17 07:46:12 +02:00
}
2011-12-27 11:27:09 +01:00
static int cpu_management ;
2012-01-09 08:11:13 -08:00
static ssize_t dispatching_show ( struct device * dev ,
struct device_attribute * attr ,
2011-12-27 11:27:09 +01:00
char * buf )
{
ssize_t count ;
mutex_lock ( & smp_cpu_state_mutex ) ;
count = sprintf ( buf , " %d \n " , cpu_management ) ;
mutex_unlock ( & smp_cpu_state_mutex ) ;
return count ;
}
2012-01-09 08:11:13 -08:00
static ssize_t dispatching_store ( struct device * dev ,
struct device_attribute * attr ,
2011-12-27 11:27:09 +01:00
const char * buf ,
size_t count )
{
int val , rc ;
char delim ;
if ( sscanf ( buf , " %d %c " , & val , & delim ) ! = 1 )
return - EINVAL ;
if ( val ! = 0 & & val ! = 1 )
return - EINVAL ;
rc = 0 ;
get_online_cpus ( ) ;
mutex_lock ( & smp_cpu_state_mutex ) ;
if ( cpu_management = = val )
goto out ;
rc = topology_set_cpu_management ( val ) ;
2011-12-27 11:27:16 +01:00
if ( rc )
goto out ;
cpu_management = val ;
topology_expect_change ( ) ;
2011-12-27 11:27:09 +01:00
out :
mutex_unlock ( & smp_cpu_state_mutex ) ;
put_online_cpus ( ) ;
return rc ? rc : count ;
}
2012-01-09 08:11:13 -08:00
static DEVICE_ATTR ( dispatching , 0644 , dispatching_show ,
2011-12-27 11:27:09 +01:00
dispatching_store ) ;
2012-01-09 08:11:13 -08:00
static ssize_t cpu_polarization_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
2011-12-27 11:27:09 +01:00
{
int cpu = dev - > id ;
ssize_t count ;
mutex_lock ( & smp_cpu_state_mutex ) ;
switch ( cpu_read_polarization ( cpu ) ) {
case POLARIZATION_HRZ :
count = sprintf ( buf , " horizontal \n " ) ;
break ;
case POLARIZATION_VL :
count = sprintf ( buf , " vertical:low \n " ) ;
break ;
case POLARIZATION_VM :
count = sprintf ( buf , " vertical:medium \n " ) ;
break ;
case POLARIZATION_VH :
count = sprintf ( buf , " vertical:high \n " ) ;
break ;
default :
count = sprintf ( buf , " unknown \n " ) ;
break ;
}
mutex_unlock ( & smp_cpu_state_mutex ) ;
return count ;
}
2012-01-09 08:11:13 -08:00
static DEVICE_ATTR ( polarization , 0444 , cpu_polarization_show , NULL ) ;
2011-12-27 11:27:09 +01:00
static struct attribute * topology_cpu_attrs [ ] = {
2012-01-09 08:11:13 -08:00
& dev_attr_polarization . attr ,
2011-12-27 11:27:09 +01:00
NULL ,
} ;
static struct attribute_group topology_cpu_attr_group = {
. attrs = topology_cpu_attrs ,
} ;
int topology_cpu_init ( struct cpu * cpu )
{
2012-01-09 08:11:13 -08:00
return sysfs_create_group ( & cpu - > dev . kobj , & topology_cpu_attr_group ) ;
2011-12-27 11:27:09 +01:00
}
static int __init topology_init ( void )
{
if ( ! MACHINE_HAS_TOPOLOGY ) {
topology_update_polarization_simple ( ) ;
goto out ;
}
set_topology_timer ( ) ;
out :
update_cpu_core_map ( ) ;
2012-01-09 08:11:13 -08:00
return device_create_file ( cpu_subsys . dev_root , & dev_attr_dispatching ) ;
2011-12-27 11:27:09 +01:00
}
device_initcall ( topology_init ) ;