2008-04-17 09:46:12 +04:00
/*
* Copyright IBM Corp . 2007
* Author ( s ) : Heiko Carstens < heiko . carstens @ de . ibm . com >
*/
2008-12-25 15:39:50 +03:00
# define KMSG_COMPONENT "cpu"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2008-04-17 09:46:12 +04:00
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/init.h>
# include <linux/device.h>
# include <linux/bootmem.h>
# include <linux/sched.h>
# include <linux/workqueue.h>
# include <linux/cpu.h>
# include <linux/smp.h>
2008-12-25 15:37:59 +03:00
# include <linux/cpuset.h>
2008-04-17 09:46:12 +04:00
# include <asm/delay.h>
2008-04-17 09:46:13 +04:00
# define PTF_HORIZONTAL (0UL)
# define PTF_VERTICAL (1UL)
# define PTF_CHECK (2UL)
2008-04-17 09:46:12 +04:00
2010-08-31 12:28:18 +04:00
struct mask_info {
struct mask_info * next ;
2010-05-17 12:00:12 +04:00
unsigned char id ;
2008-04-17 09:46:12 +04:00
cpumask_t mask ;
} ;
2010-10-25 18:10:43 +04:00
static int topology_enabled = 1 ;
2008-04-17 09:46:12 +04:00
static void topology_work_fn ( struct work_struct * work ) ;
2010-10-25 18:10:53 +04:00
static struct sysinfo_15_1_x * tl_info ;
2008-04-17 09:46:12 +04:00
static struct timer_list topology_timer ;
static void set_topology_timer ( void ) ;
static DECLARE_WORK ( topology_work , topology_work_fn ) ;
2008-11-14 20:18:07 +03:00
/* topology_lock protects the core linked list */
static DEFINE_SPINLOCK ( topology_lock ) ;
2008-04-17 09:46:12 +04:00
2010-08-31 12:28:18 +04:00
static struct mask_info core_info ;
2008-04-30 15:38:40 +04:00
cpumask_t cpu_core_map [ NR_CPUS ] ;
2010-05-17 12:00:12 +04:00
unsigned char cpu_core_id [ NR_CPUS ] ;
2008-04-30 15:38:40 +04:00
2010-08-31 12:28:18 +04:00
# ifdef CONFIG_SCHED_BOOK
static struct mask_info book_info ;
cpumask_t cpu_book_map [ NR_CPUS ] ;
unsigned char cpu_book_id [ NR_CPUS ] ;
# endif
static cpumask_t cpu_group_map ( struct mask_info * info , unsigned int cpu )
2008-04-17 09:46:12 +04:00
{
cpumask_t mask ;
2011-05-23 12:24:36 +04:00
cpumask_clear ( & mask ) ;
2010-10-29 18:50:38 +04:00
if ( ! topology_enabled | | ! MACHINE_HAS_TOPOLOGY ) {
cpumask_copy ( & mask , cpumask_of ( cpu ) ) ;
return mask ;
}
2010-08-31 12:28:18 +04:00
while ( info ) {
2011-05-23 12:24:36 +04:00
if ( cpumask_test_cpu ( cpu , & info - > mask ) ) {
2010-08-31 12:28:18 +04:00
mask = info - > mask ;
2008-04-17 09:46:12 +04:00
break ;
}
2010-08-31 12:28:18 +04:00
info = info - > next ;
2008-04-17 09:46:12 +04:00
}
2011-05-23 12:24:36 +04:00
if ( cpumask_empty ( & mask ) )
cpumask_copy ( & mask , cpumask_of ( cpu ) ) ;
2008-04-17 09:46:12 +04:00
return mask ;
}
2011-11-14 14:19:08 +04:00
static struct mask_info * add_cpus_to_mask ( struct topology_cpu * tl_cpu ,
struct mask_info * book ,
struct mask_info * core ,
int z10 )
2008-04-17 09:46:12 +04:00
{
unsigned int cpu ;
2010-10-25 18:10:53 +04:00
for ( cpu = find_first_bit ( & tl_cpu - > mask [ 0 ] , TOPOLOGY_CPU_BITS ) ;
cpu < TOPOLOGY_CPU_BITS ;
cpu = find_next_bit ( & tl_cpu - > mask [ 0 ] , TOPOLOGY_CPU_BITS , cpu + 1 ) )
2008-04-17 09:46:12 +04:00
{
unsigned int rcpu , lcpu ;
2010-10-25 18:10:53 +04:00
rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu - > origin ;
2008-04-17 09:46:12 +04:00
for_each_present_cpu ( lcpu ) {
2010-08-31 12:28:18 +04:00
if ( cpu_logical_map ( lcpu ) ! = rcpu )
continue ;
# ifdef CONFIG_SCHED_BOOK
2011-05-23 12:24:36 +04:00
cpumask_set_cpu ( lcpu , & book - > mask ) ;
2010-08-31 12:28:18 +04:00
cpu_book_id [ lcpu ] = book - > id ;
# endif
2011-05-23 12:24:36 +04:00
cpumask_set_cpu ( lcpu , & core - > mask ) ;
2011-11-14 14:19:08 +04:00
if ( z10 ) {
cpu_core_id [ lcpu ] = rcpu ;
core = core - > next ;
} else {
cpu_core_id [ lcpu ] = core - > id ;
}
2010-08-31 12:28:18 +04:00
smp_cpu_polarization [ lcpu ] = tl_cpu - > pp ;
2008-04-17 09:46:12 +04:00
}
}
2011-11-14 14:19:08 +04:00
return core ;
2008-04-17 09:46:12 +04:00
}
2010-08-31 12:28:18 +04:00
static void clear_masks ( void )
2008-04-17 09:46:12 +04:00
{
2010-08-31 12:28:18 +04:00
struct mask_info * info ;
2008-04-17 09:46:12 +04:00
2010-08-31 12:28:18 +04:00
info = & core_info ;
while ( info ) {
2011-05-23 12:24:36 +04:00
cpumask_clear ( & info - > mask ) ;
2010-08-31 12:28:18 +04:00
info = info - > next ;
}
# ifdef CONFIG_SCHED_BOOK
info = & book_info ;
while ( info ) {
2011-05-23 12:24:36 +04:00
cpumask_clear ( & info - > mask ) ;
2010-08-31 12:28:18 +04:00
info = info - > next ;
2008-04-17 09:46:12 +04:00
}
2010-08-31 12:28:18 +04:00
# endif
2008-04-17 09:46:12 +04:00
}
2010-10-25 18:10:53 +04:00
static union topology_entry * next_tle ( union topology_entry * tle )
2008-04-17 09:46:12 +04:00
{
2010-10-25 18:10:53 +04:00
if ( ! tle - > nl )
return ( union topology_entry * ) ( ( struct topology_cpu * ) tle + 1 ) ;
return ( union topology_entry * ) ( ( struct topology_container * ) tle + 1 ) ;
2008-04-17 09:46:12 +04:00
}
2010-10-25 18:10:53 +04:00
static void tl_to_cores ( struct sysinfo_15_1_x * info )
2008-04-17 09:46:12 +04:00
{
2010-08-31 12:28:18 +04:00
# ifdef CONFIG_SCHED_BOOK
struct mask_info * book = & book_info ;
2011-11-14 14:19:08 +04:00
struct cpuid cpu_id ;
2010-08-31 12:28:18 +04:00
# else
struct mask_info * book = NULL ;
# endif
struct mask_info * core = & core_info ;
2010-10-25 18:10:53 +04:00
union topology_entry * tle , * end ;
2011-11-14 14:19:08 +04:00
int z10 = 0 ;
2010-08-31 12:28:18 +04:00
2011-11-14 14:19:08 +04:00
# ifdef CONFIG_SCHED_BOOK
get_cpu_id ( & cpu_id ) ;
z10 = cpu_id . machine = = 0x2097 | | cpu_id . machine = = 0x2098 ;
# endif
2008-11-14 20:18:07 +03:00
spin_lock_irq ( & topology_lock ) ;
2010-08-31 12:28:18 +04:00
clear_masks ( ) ;
2008-04-17 09:46:13 +04:00
tle = info - > tle ;
2010-10-25 18:10:53 +04:00
end = ( union topology_entry * ) ( ( unsigned long ) info + info - > length ) ;
2008-04-17 09:46:12 +04:00
while ( tle < end ) {
2011-11-14 14:19:08 +04:00
# ifdef CONFIG_SCHED_BOOK
if ( z10 ) {
switch ( tle - > nl ) {
case 1 :
book = book - > next ;
book - > id = tle - > container . id ;
break ;
case 0 :
core = add_cpus_to_mask ( & tle - > cpu , book , core , z10 ) ;
break ;
default :
clear_masks ( ) ;
goto out ;
}
tle = next_tle ( tle ) ;
continue ;
}
# endif
2008-04-17 09:46:12 +04:00
switch ( tle - > nl ) {
2010-08-31 12:28:18 +04:00
# ifdef CONFIG_SCHED_BOOK
2008-04-17 09:46:12 +04:00
case 2 :
2010-08-31 12:28:18 +04:00
book = book - > next ;
book - > id = tle - > container . id ;
2008-04-17 09:46:12 +04:00
break ;
2010-08-31 12:28:18 +04:00
# endif
2008-04-17 09:46:12 +04:00
case 1 :
core = core - > next ;
2010-05-17 12:00:12 +04:00
core - > id = tle - > container . id ;
2008-04-17 09:46:12 +04:00
break ;
case 0 :
2011-11-14 14:19:08 +04:00
add_cpus_to_mask ( & tle - > cpu , book , core , z10 ) ;
2008-04-17 09:46:12 +04:00
break ;
default :
2010-08-31 12:28:18 +04:00
clear_masks ( ) ;
2010-04-09 15:42:58 +04:00
goto out ;
2008-04-17 09:46:12 +04:00
}
tle = next_tle ( tle ) ;
}
2010-04-09 15:42:58 +04:00
out :
2008-11-14 20:18:07 +03:00
spin_unlock_irq ( & topology_lock ) ;
2008-04-17 09:46:12 +04:00
}
2008-04-17 09:46:13 +04:00
static void topology_update_polarization_simple ( void )
{
int cpu ;
mutex_lock ( & smp_cpu_state_mutex ) ;
2008-12-25 15:37:57 +03:00
for_each_possible_cpu ( cpu )
2008-04-17 09:46:13 +04:00
smp_cpu_polarization [ cpu ] = POLARIZATION_HRZ ;
mutex_unlock ( & smp_cpu_state_mutex ) ;
}
static int ptf ( unsigned long fc )
2008-04-17 09:46:12 +04:00
{
int rc ;
asm volatile (
" .insn rre,0xb9a20000,%1,%1 \n "
" ipm %0 \n "
" srl %0,28 \n "
: " =d " ( rc )
2008-04-17 09:46:13 +04:00
: " d " ( fc ) : " cc " ) ;
return rc ;
}
int topology_set_cpu_management ( int fc )
{
int cpu ;
int rc ;
2010-10-25 18:10:52 +04:00
if ( ! MACHINE_HAS_TOPOLOGY )
2008-04-17 09:46:13 +04:00
return - EOPNOTSUPP ;
if ( fc )
rc = ptf ( PTF_VERTICAL ) ;
else
rc = ptf ( PTF_HORIZONTAL ) ;
if ( rc )
return - EBUSY ;
2008-12-25 15:37:57 +03:00
for_each_possible_cpu ( cpu )
2008-04-17 09:46:13 +04:00
smp_cpu_polarization [ cpu ] = POLARIZATION_UNKNWN ;
2008-04-17 09:46:12 +04:00
return rc ;
}
2008-04-30 15:38:40 +04:00
static void update_cpu_core_map ( void )
{
2010-08-31 12:28:18 +04:00
unsigned long flags ;
2008-04-30 15:38:40 +04:00
int cpu ;
2010-08-31 12:28:18 +04:00
spin_lock_irqsave ( & topology_lock , flags ) ;
for_each_possible_cpu ( cpu ) {
cpu_core_map [ cpu ] = cpu_group_map ( & core_info , cpu ) ;
# ifdef CONFIG_SCHED_BOOK
cpu_book_map [ cpu ] = cpu_group_map ( & book_info , cpu ) ;
# endif
}
spin_unlock_irqrestore ( & topology_lock , flags ) ;
}
2010-10-25 18:10:54 +04:00
void store_topology ( struct sysinfo_15_1_x * info )
2010-08-31 12:28:18 +04:00
{
# ifdef CONFIG_SCHED_BOOK
int rc ;
rc = stsi ( info , 15 , 1 , 3 ) ;
if ( rc ! = - ENOSYS )
return ;
# endif
stsi ( info , 15 , 1 , 2 ) ;
2008-04-30 15:38:40 +04:00
}
2008-12-09 20:49:50 +03:00
int arch_update_cpu_topology ( void )
2008-04-17 09:46:12 +04:00
{
2010-10-25 18:10:53 +04:00
struct sysinfo_15_1_x * info = tl_info ;
2008-04-17 09:46:12 +04:00
struct sys_device * sysdev ;
int cpu ;
2010-10-25 18:10:52 +04:00
if ( ! MACHINE_HAS_TOPOLOGY ) {
2008-04-30 15:38:40 +04:00
update_cpu_core_map ( ) ;
2008-04-17 09:46:13 +04:00
topology_update_polarization_simple ( ) ;
2008-12-09 20:49:50 +03:00
return 0 ;
2008-04-17 09:46:13 +04:00
}
2010-08-31 12:28:18 +04:00
store_topology ( info ) ;
2008-04-17 09:46:12 +04:00
tl_to_cores ( info ) ;
2008-04-30 15:38:40 +04:00
update_cpu_core_map ( ) ;
2008-04-17 09:46:12 +04:00
for_each_online_cpu ( cpu ) {
sysdev = get_cpu_sysdev ( cpu ) ;
kobject_uevent ( & sysdev - > kobj , KOBJ_CHANGE ) ;
}
2008-12-09 20:49:50 +03:00
return 1 ;
2008-04-17 09:46:12 +04:00
}
2008-04-30 15:38:41 +04:00
static void topology_work_fn ( struct work_struct * work )
{
2008-12-25 15:37:59 +03:00
rebuild_sched_domains ( ) ;
2008-04-17 09:46:12 +04:00
}
2008-04-17 09:46:13 +04:00
void topology_schedule_update ( void )
{
schedule_work ( & topology_work ) ;
}
2008-04-17 09:46:12 +04:00
static void topology_timer_fn ( unsigned long ignored )
{
2008-04-17 09:46:13 +04:00
if ( ptf ( PTF_CHECK ) )
topology_schedule_update ( ) ;
2008-04-17 09:46:12 +04:00
set_topology_timer ( ) ;
}
static void set_topology_timer ( void )
{
topology_timer . function = topology_timer_fn ;
topology_timer . data = 0 ;
topology_timer . expires = jiffies + 60 * HZ ;
add_timer ( & topology_timer ) ;
}
2008-12-25 15:39:23 +03:00
static int __init early_parse_topology ( char * p )
2008-04-17 09:46:12 +04:00
{
2010-10-25 18:10:43 +04:00
if ( strncmp ( p , " off " , 3 ) )
2008-12-25 15:39:23 +03:00
return 0 ;
2010-10-25 18:10:43 +04:00
topology_enabled = 0 ;
2008-12-25 15:39:23 +03:00
return 0 ;
2008-04-17 09:46:12 +04:00
}
2008-12-25 15:39:23 +03:00
early_param ( " topology " , early_parse_topology ) ;
2008-04-17 09:46:12 +04:00
static int __init init_topology_update ( void )
{
int rc ;
2008-04-30 15:38:40 +04:00
rc = 0 ;
2010-10-25 18:10:52 +04:00
if ( ! MACHINE_HAS_TOPOLOGY ) {
2008-04-17 09:46:13 +04:00
topology_update_polarization_simple ( ) ;
2008-04-30 15:38:40 +04:00
goto out ;
2008-04-17 09:46:13 +04:00
}
init_timer_deferrable ( & topology_timer ) ;
2008-12-25 15:39:24 +03:00
set_topology_timer ( ) ;
2008-04-30 15:38:40 +04:00
out :
update_cpu_core_map ( ) ;
return rc ;
2008-04-17 09:46:12 +04:00
}
__initcall ( init_topology_update ) ;
2011-10-30 18:16:06 +04:00
static void __init alloc_masks ( struct sysinfo_15_1_x * info ,
struct mask_info * mask , int offset )
2010-08-31 12:28:18 +04:00
{
int i , nr_masks ;
2010-10-25 18:10:53 +04:00
nr_masks = info - > mag [ TOPOLOGY_NR_MAG - offset ] ;
2010-08-31 12:28:18 +04:00
for ( i = 0 ; i < info - > mnest - offset ; i + + )
2010-10-25 18:10:53 +04:00
nr_masks * = info - > mag [ TOPOLOGY_NR_MAG - offset - 1 - i ] ;
2010-08-31 12:28:18 +04:00
nr_masks = max ( nr_masks , 1 ) ;
for ( i = 0 ; i < nr_masks ; i + + ) {
mask - > next = alloc_bootmem ( sizeof ( struct mask_info ) ) ;
mask = mask - > next ;
}
}
2008-04-17 09:46:12 +04:00
void __init s390_init_cpu_topology ( void )
{
2010-10-25 18:10:53 +04:00
struct sysinfo_15_1_x * info ;
2008-04-17 09:46:12 +04:00
int i ;
2010-10-25 18:10:52 +04:00
if ( ! MACHINE_HAS_TOPOLOGY )
2008-04-17 09:46:12 +04:00
return ;
tl_info = alloc_bootmem_pages ( PAGE_SIZE ) ;
info = tl_info ;
2010-08-31 12:28:18 +04:00
store_topology ( info ) ;
2008-12-25 15:39:50 +03:00
pr_info ( " The CPU configuration topology of the machine is: " ) ;
2010-10-25 18:10:53 +04:00
for ( i = 0 ; i < TOPOLOGY_NR_MAG ; i + + )
2008-04-17 09:46:12 +04:00
printk ( " %d " , info - > mag [ i ] ) ;
printk ( " / %d \n " , info - > mnest ) ;
2011-11-14 14:19:08 +04:00
alloc_masks ( info , & core_info , 1 ) ;
2010-08-31 12:28:18 +04:00
# ifdef CONFIG_SCHED_BOOK
2011-11-14 14:19:08 +04:00
alloc_masks ( info , & book_info , 2 ) ;
2010-08-31 12:28:18 +04:00
# endif
2008-04-17 09:46:12 +04:00
}