2005-04-16 15:20:36 -07:00
/**
* @ file nmi_int . c
*
2009-07-08 13:49:38 +02:00
* @ remark Copyright 2002 - 2009 OProfile authors
2005-04-16 15:20:36 -07:00
* @ remark Read the file COPYING
*
* @ author John Levon < levon @ movementarian . org >
2008-07-22 21:08:48 +02:00
* @ author Robert Richter < robert . richter @ amd . com >
2009-07-08 13:49:38 +02:00
* @ author Barry Kasindorf < barry . kasindorf @ amd . com >
* @ author Jason Yeh < jason . yeh @ amd . com >
* @ author Suravee Suthikulpanit < suravee . suthikulpanit @ amd . com >
2005-04-16 15:20:36 -07:00
*/
# include <linux/init.h>
# include <linux/notifier.h>
# include <linux/smp.h>
# include <linux/oprofile.h>
2011-03-23 22:15:54 +01:00
# include <linux/syscore_ops.h>
2005-04-16 15:20:36 -07:00
# include <linux/slab.h>
2006-07-10 17:06:21 +02:00
# include <linux/moduleparam.h>
2007-05-08 00:27:03 -07:00
# include <linux/kdebug.h>
2008-08-19 03:13:38 +02:00
# include <linux/cpu.h>
2005-04-16 15:20:36 -07:00
# include <asm/nmi.h>
# include <asm/msr.h>
# include <asm/apic.h>
2008-01-30 13:32:33 +01:00
2005-04-16 15:20:36 -07:00
# include "op_counter.h"
# include "op_x86_model.h"
2006-09-26 10:52:27 +02:00
2009-07-09 15:12:35 +02:00
static struct op_x86_model_spec * model ;
2008-03-25 15:06:59 -07:00
static DEFINE_PER_CPU ( struct op_msrs , cpu_msrs ) ;
static DEFINE_PER_CPU ( unsigned long , saved_lvtpc ) ;
2006-09-26 10:52:27 +02:00
2010-04-29 14:55:55 +02:00
/* must be protected with get_online_cpus()/put_online_cpus(): */
static int nmi_enabled ;
static int ctr_running ;
2005-04-16 15:20:36 -07:00
2009-07-08 13:49:38 +02:00
struct op_counter_config counter_config [ OP_MAX_COUNTER ] ;
2009-05-25 15:10:32 +02:00
/* common functions */
u64 op_x86_get_ctrl ( struct op_x86_model_spec const * model ,
struct op_counter_config * counter_config )
{
u64 val = 0 ;
u16 event = ( u16 ) counter_config - > event ;
val | = ARCH_PERFMON_EVENTSEL_INT ;
val | = counter_config - > user ? ARCH_PERFMON_EVENTSEL_USR : 0 ;
val | = counter_config - > kernel ? ARCH_PERFMON_EVENTSEL_OS : 0 ;
val | = ( counter_config - > unit_mask & 0xFF ) < < 8 ;
2011-03-16 15:44:33 -04:00
counter_config - > extra & = ( ARCH_PERFMON_EVENTSEL_INV |
ARCH_PERFMON_EVENTSEL_EDGE |
ARCH_PERFMON_EVENTSEL_CMASK ) ;
val | = counter_config - > extra ;
2009-05-25 15:10:32 +02:00
event & = model - > event_mask ? model - > event_mask : 0xFF ;
val | = event & 0xFF ;
2012-10-10 10:18:35 +03:00
val | = ( u64 ) ( event & 0x0F00 ) < < 24 ;
2009-05-25 15:10:32 +02:00
return val ;
}
2011-09-30 15:06:21 -04:00
static int profile_exceptions_notify ( unsigned int val , struct pt_regs * regs )
{
if ( ctr_running )
model - > check_ctrs ( regs , & __get_cpu_var ( cpu_msrs ) ) ;
else if ( ! nmi_enabled )
return NMI_DONE ;
else
model - > stop ( & __get_cpu_var ( cpu_msrs ) ) ;
return NMI_HANDLED ;
2005-04-16 15:20:36 -07:00
}
2006-09-26 10:52:27 +02:00
2008-01-30 13:32:33 +01:00
static void nmi_cpu_save_registers ( struct op_msrs * msrs )
2005-04-16 15:20:36 -07:00
{
2008-01-30 13:32:33 +01:00
struct op_msr * counters = msrs - > counters ;
struct op_msr * controls = msrs - > controls ;
2005-04-16 15:20:36 -07:00
unsigned int i ;
2009-06-05 15:54:24 +02:00
for ( i = 0 ; i < model - > num_counters ; + + i ) {
2009-06-03 19:09:27 +02:00
if ( counters [ i ] . addr )
rdmsrl ( counters [ i ] . addr , counters [ i ] . saved ) ;
2005-04-16 15:20:36 -07:00
}
2008-01-30 13:32:33 +01:00
2009-06-05 15:54:24 +02:00
for ( i = 0 ; i < model - > num_controls ; + + i ) {
2009-06-03 19:09:27 +02:00
if ( controls [ i ] . addr )
rdmsrl ( controls [ i ] . addr , controls [ i ] . saved ) ;
2005-04-16 15:20:36 -07:00
}
}
2009-07-09 14:38:49 +02:00
static void nmi_cpu_start ( void * dummy )
{
struct op_msrs const * msrs = & __get_cpu_var ( cpu_msrs ) ;
2010-05-03 19:44:32 +02:00
if ( ! msrs - > controls )
WARN_ON_ONCE ( 1 ) ;
else
model - > start ( msrs ) ;
2009-07-09 14:38:49 +02:00
}
static int nmi_start ( void )
{
2010-04-29 14:55:55 +02:00
get_online_cpus ( ) ;
ctr_running = 1 ;
2011-06-01 15:31:44 +02:00
/* make ctr_running visible to the nmi handler: */
smp_mb ( ) ;
on_each_cpu ( nmi_cpu_start , NULL , 1 ) ;
2010-04-29 14:55:55 +02:00
put_online_cpus ( ) ;
2009-07-09 14:38:49 +02:00
return 0 ;
}
static void nmi_cpu_stop ( void * dummy )
{
struct op_msrs const * msrs = & __get_cpu_var ( cpu_msrs ) ;
2010-05-03 19:44:32 +02:00
if ( ! msrs - > controls )
WARN_ON_ONCE ( 1 ) ;
else
model - > stop ( msrs ) ;
2009-07-09 14:38:49 +02:00
}
static void nmi_stop ( void )
{
2010-04-29 14:55:55 +02:00
get_online_cpus ( ) ;
2009-07-09 14:38:49 +02:00
on_each_cpu ( nmi_cpu_stop , NULL , 1 ) ;
2010-04-29 14:55:55 +02:00
ctr_running = 0 ;
put_online_cpus ( ) ;
2009-07-09 14:38:49 +02:00
}
2009-07-16 13:04:43 +02:00
# ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
static DEFINE_PER_CPU ( int , switch_index ) ;
2009-07-09 15:11:45 +02:00
static inline int has_mux ( void )
{
return ! ! model - > switch_ctrl ;
}
2009-07-16 13:04:43 +02:00
inline int op_x86_phys_to_virt ( int phys )
{
2010-12-18 16:28:55 +01:00
return __this_cpu_read ( switch_index ) + phys ;
2009-07-16 13:04:43 +02:00
}
2009-07-10 15:47:17 +02:00
inline int op_x86_virt_to_phys ( int virt )
{
return virt % model - > num_counters ;
}
2009-07-09 14:40:04 +02:00
static void nmi_shutdown_mux ( void )
{
int i ;
2009-07-09 15:11:45 +02:00
if ( ! has_mux ( ) )
return ;
2009-07-09 14:40:04 +02:00
for_each_possible_cpu ( i ) {
kfree ( per_cpu ( cpu_msrs , i ) . multiplex ) ;
per_cpu ( cpu_msrs , i ) . multiplex = NULL ;
per_cpu ( switch_index , i ) = 0 ;
}
}
static int nmi_setup_mux ( void )
{
size_t multiplex_size =
sizeof ( struct op_msr ) * model - > num_virt_counters ;
int i ;
2009-07-09 15:11:45 +02:00
if ( ! has_mux ( ) )
return 1 ;
2009-07-09 14:40:04 +02:00
for_each_possible_cpu ( i ) {
per_cpu ( cpu_msrs , i ) . multiplex =
2010-02-25 20:20:25 +01:00
kzalloc ( multiplex_size , GFP_KERNEL ) ;
2009-07-09 14:40:04 +02:00
if ( ! per_cpu ( cpu_msrs , i ) . multiplex )
return 0 ;
}
2009-07-09 15:11:45 +02:00
2009-07-09 14:40:04 +02:00
return 1 ;
}
2009-07-09 14:38:49 +02:00
static void nmi_cpu_setup_mux ( int cpu , struct op_msrs const * const msrs )
{
int i ;
struct op_msr * multiplex = msrs - > multiplex ;
2009-07-09 15:11:45 +02:00
if ( ! has_mux ( ) )
return ;
2009-07-09 14:38:49 +02:00
for ( i = 0 ; i < model - > num_virt_counters ; + + i ) {
if ( counter_config [ i ] . enabled ) {
multiplex [ i ] . saved = - ( u64 ) counter_config [ i ] . count ;
} else {
multiplex [ i ] . saved = 0 ;
}
}
per_cpu ( switch_index , cpu ) = 0 ;
}
2009-07-09 14:38:49 +02:00
static void nmi_cpu_save_mpx_registers ( struct op_msrs * msrs )
{
2010-02-25 19:16:46 +01:00
struct op_msr * counters = msrs - > counters ;
2009-07-09 14:38:49 +02:00
struct op_msr * multiplex = msrs - > multiplex ;
int i ;
for ( i = 0 ; i < model - > num_counters ; + + i ) {
int virt = op_x86_phys_to_virt ( i ) ;
2010-02-25 19:16:46 +01:00
if ( counters [ i ] . addr )
rdmsrl ( counters [ i ] . addr , multiplex [ virt ] . saved ) ;
2009-07-09 14:38:49 +02:00
}
}
static void nmi_cpu_restore_mpx_registers ( struct op_msrs * msrs )
{
2010-02-25 19:16:46 +01:00
struct op_msr * counters = msrs - > counters ;
2009-07-09 14:38:49 +02:00
struct op_msr * multiplex = msrs - > multiplex ;
int i ;
for ( i = 0 ; i < model - > num_counters ; + + i ) {
int virt = op_x86_phys_to_virt ( i ) ;
2010-02-25 19:16:46 +01:00
if ( counters [ i ] . addr )
wrmsrl ( counters [ i ] . addr , multiplex [ virt ] . saved ) ;
2009-07-09 14:38:49 +02:00
}
}
2009-07-09 14:38:49 +02:00
static void nmi_cpu_switch ( void * dummy )
{
int cpu = smp_processor_id ( ) ;
int si = per_cpu ( switch_index , cpu ) ;
struct op_msrs * msrs = & per_cpu ( cpu_msrs , cpu ) ;
nmi_cpu_stop ( NULL ) ;
nmi_cpu_save_mpx_registers ( msrs ) ;
/* move to next set */
si + = model - > num_counters ;
2010-01-18 11:25:36 -06:00
if ( ( si > = model - > num_virt_counters ) | | ( counter_config [ si ] . count = = 0 ) )
2009-07-09 14:38:49 +02:00
per_cpu ( switch_index , cpu ) = 0 ;
else
per_cpu ( switch_index , cpu ) = si ;
model - > switch_ctrl ( model , msrs ) ;
nmi_cpu_restore_mpx_registers ( msrs ) ;
nmi_cpu_start ( NULL ) ;
}
/*
* Quick check to see if multiplexing is necessary .
* The check should be sufficient since counters are used
* in ordre .
*/
static int nmi_multiplex_on ( void )
{
return counter_config [ model - > num_counters ] . count ? 0 : - EINVAL ;
}
static int nmi_switch_event ( void )
{
2009-07-09 15:11:45 +02:00
if ( ! has_mux ( ) )
2009-07-09 14:38:49 +02:00
return - ENOSYS ; /* not implemented */
if ( nmi_multiplex_on ( ) < 0 )
return - EINVAL ; /* not necessary */
2010-04-29 14:55:55 +02:00
get_online_cpus ( ) ;
if ( ctr_running )
on_each_cpu ( nmi_cpu_switch , NULL , 1 ) ;
put_online_cpus ( ) ;
2009-07-09 14:38:49 +02:00
return 0 ;
}
2009-07-09 16:02:44 +02:00
static inline void mux_init ( struct oprofile_operations * ops )
{
if ( has_mux ( ) )
ops - > switch_events = nmi_switch_event ;
}
2009-07-09 21:42:51 +02:00
static void mux_clone ( int cpu )
{
if ( ! has_mux ( ) )
return ;
memcpy ( per_cpu ( cpu_msrs , cpu ) . multiplex ,
per_cpu ( cpu_msrs , 0 ) . multiplex ,
sizeof ( struct op_msr ) * model - > num_virt_counters ) ;
}
2009-07-16 13:04:43 +02:00
# else
inline int op_x86_phys_to_virt ( int phys ) { return phys ; }
2009-07-10 15:47:17 +02:00
inline int op_x86_virt_to_phys ( int virt ) { return virt ; }
2009-07-09 14:40:04 +02:00
static inline void nmi_shutdown_mux ( void ) { }
static inline int nmi_setup_mux ( void ) { return 1 ; }
2009-07-09 14:38:49 +02:00
static inline void
nmi_cpu_setup_mux ( int cpu , struct op_msrs const * const msrs ) { }
2009-07-09 16:02:44 +02:00
static inline void mux_init ( struct oprofile_operations * ops ) { }
2009-07-09 21:42:51 +02:00
static void mux_clone ( int cpu ) { }
2009-07-16 13:04:43 +02:00
# endif
2005-04-16 15:20:36 -07:00
static void free_msrs ( void )
{
int i ;
2006-03-28 01:56:39 -08:00
for_each_possible_cpu ( i ) {
2008-03-25 15:06:59 -07:00
kfree ( per_cpu ( cpu_msrs , i ) . counters ) ;
per_cpu ( cpu_msrs , i ) . counters = NULL ;
kfree ( per_cpu ( cpu_msrs , i ) . controls ) ;
per_cpu ( cpu_msrs , i ) . controls = NULL ;
2005-04-16 15:20:36 -07:00
}
2010-03-23 19:09:51 +01:00
nmi_shutdown_mux ( ) ;
2005-04-16 15:20:36 -07:00
}
static int allocate_msrs ( void )
{
size_t controls_size = sizeof ( struct op_msr ) * model - > num_controls ;
size_t counters_size = sizeof ( struct op_msr ) * model - > num_counters ;
2008-09-24 11:08:52 +02:00
int i ;
2007-06-01 00:46:39 -07:00
for_each_possible_cpu ( i ) {
2010-02-25 20:20:25 +01:00
per_cpu ( cpu_msrs , i ) . counters = kzalloc ( counters_size ,
2009-07-09 14:40:04 +02:00
GFP_KERNEL ) ;
if ( ! per_cpu ( cpu_msrs , i ) . counters )
2010-03-23 19:09:51 +01:00
goto fail ;
2010-02-25 20:20:25 +01:00
per_cpu ( cpu_msrs , i ) . controls = kzalloc ( controls_size ,
2009-07-09 14:40:04 +02:00
GFP_KERNEL ) ;
if ( ! per_cpu ( cpu_msrs , i ) . controls )
2010-03-23 19:09:51 +01:00
goto fail ;
2005-04-16 15:20:36 -07:00
}
2010-03-23 19:09:51 +01:00
if ( ! nmi_setup_mux ( ) )
goto fail ;
2009-07-09 14:40:04 +02:00
return 1 ;
2010-03-23 19:09:51 +01:00
fail :
free_msrs ( ) ;
return 0 ;
2005-04-16 15:20:36 -07:00
}
2008-01-30 13:32:33 +01:00
static void nmi_cpu_setup ( void * dummy )
2005-04-16 15:20:36 -07:00
{
int cpu = smp_processor_id ( ) ;
2008-03-25 15:06:59 -07:00
struct op_msrs * msrs = & per_cpu ( cpu_msrs , cpu ) ;
2009-07-09 18:33:02 +02:00
nmi_cpu_save_registers ( msrs ) ;
2009-07-25 16:18:34 +02:00
raw_spin_lock ( & oprofilefs_lock ) ;
2009-05-25 19:31:44 +02:00
model - > setup_ctrs ( model , msrs ) ;
2009-07-09 19:23:50 +02:00
nmi_cpu_setup_mux ( cpu , msrs ) ;
2009-07-25 16:18:34 +02:00
raw_spin_unlock ( & oprofilefs_lock ) ;
2008-03-25 15:06:59 -07:00
per_cpu ( saved_lvtpc , cpu ) = apic_read ( APIC_LVTPC ) ;
2005-04-16 15:20:36 -07:00
apic_write ( APIC_LVTPC , APIC_DM_NMI ) ;
}
2009-07-09 18:33:02 +02:00
static void nmi_cpu_restore_registers ( struct op_msrs * msrs )
2005-04-16 15:20:36 -07:00
{
2008-01-30 13:32:33 +01:00
struct op_msr * counters = msrs - > counters ;
struct op_msr * controls = msrs - > controls ;
2005-04-16 15:20:36 -07:00
unsigned int i ;
2009-06-05 15:54:24 +02:00
for ( i = 0 ; i < model - > num_controls ; + + i ) {
2009-06-03 19:09:27 +02:00
if ( controls [ i ] . addr )
wrmsrl ( controls [ i ] . addr , controls [ i ] . saved ) ;
2005-04-16 15:20:36 -07:00
}
2008-01-30 13:32:33 +01:00
2009-06-05 15:54:24 +02:00
for ( i = 0 ; i < model - > num_counters ; + + i ) {
2009-06-03 19:09:27 +02:00
if ( counters [ i ] . addr )
wrmsrl ( counters [ i ] . addr , counters [ i ] . saved ) ;
2005-04-16 15:20:36 -07:00
}
}
2008-01-30 13:32:33 +01:00
static void nmi_cpu_shutdown ( void * dummy )
2005-04-16 15:20:36 -07:00
{
unsigned int v ;
int cpu = smp_processor_id ( ) ;
2009-07-09 16:29:34 +02:00
struct op_msrs * msrs = & per_cpu ( cpu_msrs , cpu ) ;
2008-01-30 13:32:33 +01:00
2005-04-16 15:20:36 -07:00
/* restoring APIC_LVTPC can trigger an apic error because the delivery
* mode and vector nr combination can be illegal . That ' s by design : on
* power on apic lvt contain a zero vector nr which are legal only for
* NMI delivery mode . So inhibit apic err before restoring lvtpc
*/
v = apic_read ( APIC_LVTERR ) ;
apic_write ( APIC_LVTERR , v | APIC_LVT_MASKED ) ;
2008-03-25 15:06:59 -07:00
apic_write ( APIC_LVTPC , per_cpu ( saved_lvtpc , cpu ) ) ;
2005-04-16 15:20:36 -07:00
apic_write ( APIC_LVTERR , v ) ;
2009-07-09 18:33:02 +02:00
nmi_cpu_restore_registers ( msrs ) ;
2005-04-16 15:20:36 -07:00
}
2010-04-29 14:55:55 +02:00
static void nmi_cpu_up ( void * dummy )
{
if ( nmi_enabled )
nmi_cpu_setup ( dummy ) ;
if ( ctr_running )
nmi_cpu_start ( dummy ) ;
}
static void nmi_cpu_down ( void * dummy )
{
if ( ctr_running )
nmi_cpu_stop ( dummy ) ;
if ( nmi_enabled )
nmi_cpu_shutdown ( dummy ) ;
}
2008-01-30 13:32:33 +01:00
static int nmi_create_files ( struct super_block * sb , struct dentry * root )
2005-04-16 15:20:36 -07:00
{
unsigned int i ;
2009-07-08 13:49:38 +02:00
for ( i = 0 ; i < model - > num_virt_counters ; + + i ) {
2008-01-30 13:32:33 +01:00
struct dentry * dir ;
2006-06-26 00:24:34 -07:00
char buf [ 4 ] ;
2008-01-30 13:32:33 +01:00
/* quick little hack to _not_ expose a counter if it is not
2006-09-26 10:52:26 +02:00
* available for use . This should protect userspace app .
* NOTE : assumes 1 : 1 mapping here ( that counters are organized
* sequentially in their struct assignment ) .
*/
2009-07-10 18:15:21 +02:00
if ( ! avail_to_resrv_perfctr_nmi_bit ( op_x86_virt_to_phys ( i ) ) )
2006-09-26 10:52:26 +02:00
continue ;
2006-06-26 00:24:34 -07:00
snprintf ( buf , sizeof ( buf ) , " %d " , i ) ;
2005-04-16 15:20:36 -07:00
dir = oprofilefs_mkdir ( sb , root , buf ) ;
2008-01-30 13:32:33 +01:00
oprofilefs_create_ulong ( sb , dir , " enabled " , & counter_config [ i ] . enabled ) ;
oprofilefs_create_ulong ( sb , dir , " event " , & counter_config [ i ] . event ) ;
oprofilefs_create_ulong ( sb , dir , " count " , & counter_config [ i ] . count ) ;
oprofilefs_create_ulong ( sb , dir , " unit_mask " , & counter_config [ i ] . unit_mask ) ;
oprofilefs_create_ulong ( sb , dir , " kernel " , & counter_config [ i ] . kernel ) ;
oprofilefs_create_ulong ( sb , dir , " user " , & counter_config [ i ] . user ) ;
2011-03-16 15:44:33 -04:00
oprofilefs_create_ulong ( sb , dir , " extra " , & counter_config [ i ] . extra ) ;
2005-04-16 15:20:36 -07:00
}
return 0 ;
}
2008-01-30 13:32:33 +01:00
2008-09-05 12:17:40 +02:00
static int oprofile_cpu_notifier ( struct notifier_block * b , unsigned long action ,
void * data )
{
int cpu = ( unsigned long ) data ;
switch ( action ) {
case CPU_DOWN_FAILED :
case CPU_ONLINE :
2010-04-29 14:55:55 +02:00
smp_call_function_single ( cpu , nmi_cpu_up , NULL , 0 ) ;
2008-09-05 12:17:40 +02:00
break ;
case CPU_DOWN_PREPARE :
2010-04-29 14:55:55 +02:00
smp_call_function_single ( cpu , nmi_cpu_down , NULL , 1 ) ;
2008-09-05 12:17:40 +02:00
break ;
}
return NOTIFY_DONE ;
}
static struct notifier_block oprofile_cpu_nb = {
. notifier_call = oprofile_cpu_notifier
} ;
2010-05-03 15:52:26 +02:00
static int nmi_setup ( void )
{
int err = 0 ;
int cpu ;
if ( ! allocate_msrs ( ) )
return - ENOMEM ;
/* We need to serialize save and setup for HT because the subset
* of msrs are distinct for save and setup operations
*/
/* Assume saved/restored counters are the same on all CPUs */
err = model - > fill_in_addresses ( & per_cpu ( cpu_msrs , 0 ) ) ;
if ( err )
goto fail ;
for_each_possible_cpu ( cpu ) {
if ( ! cpu )
continue ;
memcpy ( per_cpu ( cpu_msrs , cpu ) . counters ,
per_cpu ( cpu_msrs , 0 ) . counters ,
sizeof ( struct op_msr ) * model - > num_counters ) ;
memcpy ( per_cpu ( cpu_msrs , cpu ) . controls ,
per_cpu ( cpu_msrs , 0 ) . controls ,
sizeof ( struct op_msr ) * model - > num_controls ) ;
mux_clone ( cpu ) ;
}
nmi_enabled = 0 ;
ctr_running = 0 ;
2011-06-01 15:31:44 +02:00
/* make variables visible to the nmi handler: */
smp_mb ( ) ;
2011-09-30 15:06:21 -04:00
err = register_nmi_handler ( NMI_LOCAL , profile_exceptions_notify ,
0 , " oprofile " ) ;
2010-05-03 15:52:26 +02:00
if ( err )
goto fail ;
get_online_cpus ( ) ;
2010-05-03 15:00:25 +02:00
register_cpu_notifier ( & oprofile_cpu_nb ) ;
2010-05-03 15:52:26 +02:00
nmi_enabled = 1 ;
2011-06-01 15:31:44 +02:00
/* make nmi_enabled visible to the nmi handler: */
smp_mb ( ) ;
on_each_cpu ( nmi_cpu_setup , NULL , 1 ) ;
2010-05-03 15:52:26 +02:00
put_online_cpus ( ) ;
return 0 ;
fail :
free_msrs ( ) ;
return err ;
}
static void nmi_shutdown ( void )
{
struct op_msrs * msrs ;
get_online_cpus ( ) ;
2010-05-03 15:00:25 +02:00
unregister_cpu_notifier ( & oprofile_cpu_nb ) ;
2010-05-03 15:52:26 +02:00
on_each_cpu ( nmi_cpu_shutdown , NULL , 1 ) ;
nmi_enabled = 0 ;
ctr_running = 0 ;
put_online_cpus ( ) ;
2011-06-01 15:31:44 +02:00
/* make variables visible to the nmi handler: */
smp_mb ( ) ;
2011-09-30 15:06:21 -04:00
unregister_nmi_handler ( NMI_LOCAL , " oprofile " ) ;
2010-05-03 15:52:26 +02:00
msrs = & get_cpu_var ( cpu_msrs ) ;
model - > shutdown ( msrs ) ;
free_msrs ( ) ;
put_cpu_var ( cpu_msrs ) ;
}
2008-09-05 12:17:40 +02:00
# ifdef CONFIG_PM
2011-03-23 22:15:54 +01:00
static int nmi_suspend ( void )
2008-09-05 12:17:40 +02:00
{
/* Only one CPU left, just stop that one */
if ( nmi_enabled = = 1 )
nmi_cpu_stop ( NULL ) ;
return 0 ;
}
2011-03-23 22:15:54 +01:00
static void nmi_resume ( void )
2008-09-05 12:17:40 +02:00
{
if ( nmi_enabled = = 1 )
nmi_cpu_start ( NULL ) ;
}
2011-03-23 22:15:54 +01:00
static struct syscore_ops oprofile_syscore_ops = {
2008-09-05 12:17:40 +02:00
. resume = nmi_resume ,
. suspend = nmi_suspend ,
} ;
2011-03-23 22:15:54 +01:00
static void __init init_suspend_resume ( void )
2008-09-05 12:17:40 +02:00
{
2011-03-23 22:15:54 +01:00
register_syscore_ops ( & oprofile_syscore_ops ) ;
2008-09-05 12:17:40 +02:00
}
2011-03-23 22:15:54 +01:00
static void exit_suspend_resume ( void )
2008-09-05 12:17:40 +02:00
{
2011-03-23 22:15:54 +01:00
unregister_syscore_ops ( & oprofile_syscore_ops ) ;
2008-09-05 12:17:40 +02:00
}
# else
2010-09-01 14:50:50 +02:00
2011-03-23 22:15:54 +01:00
static inline void init_suspend_resume ( void ) { }
static inline void exit_suspend_resume ( void ) { }
2010-09-01 14:50:50 +02:00
2008-09-05 12:17:40 +02:00
# endif /* CONFIG_PM */
2008-01-30 13:32:33 +01:00
static int __init p4_init ( char * * cpu_type )
2005-04-16 15:20:36 -07:00
{
__u8 cpu_model = boot_cpu_data . x86_model ;
2009-04-27 17:44:12 +02:00
if ( cpu_model > 6 | | cpu_model = = 5 )
2005-04-16 15:20:36 -07:00
return 0 ;
# ifndef CONFIG_SMP
* cpu_type = " i386/p4 " ;
model = & op_p4_spec ;
return 1 ;
# else
switch ( smp_num_siblings ) {
2008-01-30 13:32:33 +01:00
case 1 :
* cpu_type = " i386/p4 " ;
model = & op_p4_spec ;
return 1 ;
case 2 :
* cpu_type = " i386/p4-ht " ;
model = & op_p4_ht2_spec ;
return 1 ;
2005-04-16 15:20:36 -07:00
}
# endif
printk ( KERN_INFO " oprofile: P4 HyperThreading detected with > 2 threads \n " ) ;
printk ( KERN_INFO " oprofile: Reverting to timer mode. \n " ) ;
return 0 ;
}
2011-10-11 19:39:16 +02:00
enum __force_cpu_type {
reserved = 0 , /* do not force */
timer ,
arch_perfmon ,
} ;
static int force_cpu_type ;
static int set_cpu_type ( const char * str , struct kernel_param * kp )
2009-05-06 12:10:23 +02:00
{
2011-10-11 19:39:16 +02:00
if ( ! strcmp ( str , " timer " ) ) {
force_cpu_type = timer ;
printk ( KERN_INFO " oprofile: forcing NMI timer mode \n " ) ;
} else if ( ! strcmp ( str , " arch_perfmon " ) ) {
force_cpu_type = arch_perfmon ;
2009-05-06 12:10:23 +02:00
printk ( KERN_INFO " oprofile: forcing architectural perfmon \n " ) ;
2011-10-11 19:39:16 +02:00
} else {
force_cpu_type = 0 ;
2009-05-06 12:10:23 +02:00
}
return 0 ;
}
2011-10-11 19:39:16 +02:00
module_param_call ( cpu_type , set_cpu_type , NULL , NULL , 0 ) ;
2009-04-27 17:44:11 +02:00
2008-01-30 13:32:33 +01:00
static int __init ppro_init ( char * * cpu_type )
2005-04-16 15:20:36 -07:00
{
__u8 cpu_model = boot_cpu_data . x86_model ;
2009-07-09 15:12:35 +02:00
struct op_x86_model_spec * spec = & op_ppro_spec ; /* default */
2005-04-16 15:20:36 -07:00
2011-10-11 19:39:16 +02:00
if ( force_cpu_type = = arch_perfmon & & cpu_has_arch_perfmon )
2009-04-27 17:44:11 +02:00
return 0 ;
2010-05-07 12:41:40 -04:00
/*
* Documentation on identifying Intel processors by CPU family
* and model can be found in the Intel Software Developer ' s
* Manuals ( SDM ) :
*
* http : //www.intel.com/products/processor/manuals/
*
* As of May 2010 the documentation for this was in the :
* " Intel 64 and IA-32 Architectures Software Developer's
* Manual Volume 3 B : System Programming Guide " , " Table B - 1
* CPUID Signature Values of DisplayFamily_DisplayModel " .
*/
2008-07-24 17:29:00 -07:00
switch ( cpu_model ) {
case 0 . . . 2 :
* cpu_type = " i386/ppro " ;
break ;
case 3 . . . 5 :
* cpu_type = " i386/pii " ;
break ;
case 6 . . . 8 :
2008-11-30 15:39:10 -05:00
case 10 . . . 11 :
2008-07-24 17:29:00 -07:00
* cpu_type = " i386/piii " ;
break ;
case 9 :
2008-11-30 15:39:10 -05:00
case 13 :
2008-07-24 17:29:00 -07:00
* cpu_type = " i386/p6_mobile " ;
break ;
case 14 :
2006-05-15 09:44:24 -07:00
* cpu_type = " i386/core " ;
2008-07-24 17:29:00 -07:00
break ;
2010-09-08 10:34:28 -04:00
case 0x0f :
case 0x16 :
case 0x17 :
2010-09-21 03:26:35 -04:00
case 0x1d :
2008-07-24 17:29:00 -07:00
* cpu_type = " i386/core_2 " ;
break ;
2010-05-07 12:41:40 -04:00
case 0x1a :
2010-08-04 20:27:05 -04:00
case 0x1e :
2010-01-21 23:26:27 +01:00
case 0x2e :
2009-06-12 18:32:07 +02:00
spec = & op_arch_perfmon_spec ;
2009-04-27 17:44:13 +02:00
* cpu_type = " i386/core_i7 " ;
break ;
2010-05-07 12:41:40 -04:00
case 0x1c :
2009-04-27 17:44:13 +02:00
* cpu_type = " i386/atom " ;
break ;
2008-07-24 17:29:00 -07:00
default :
/* Unknown */
2005-04-16 15:20:36 -07:00
return 0 ;
}
2009-06-12 18:32:07 +02:00
model = spec ;
2005-04-16 15:20:36 -07:00
return 1 ;
}
2005-09-06 15:17:26 -07:00
int __init op_nmi_init ( struct oprofile_operations * ops )
2005-04-16 15:20:36 -07:00
{
__u8 vendor = boot_cpu_data . x86_vendor ;
__u8 family = boot_cpu_data . x86 ;
2008-08-18 14:50:31 +02:00
char * cpu_type = NULL ;
2008-07-22 21:08:48 +02:00
int ret = 0 ;
2005-04-16 15:20:36 -07:00
if ( ! cpu_has_apic )
return - ENODEV ;
2008-01-30 13:32:33 +01:00
2011-10-11 19:39:16 +02:00
if ( force_cpu_type = = timer )
return - ENODEV ;
2005-04-16 15:20:36 -07:00
switch ( vendor ) {
2008-01-30 13:32:33 +01:00
case X86_VENDOR_AMD :
/* Needs to be at least an Athlon (or hammer in 32bit mode) */
2005-04-16 15:20:36 -07:00
2008-01-30 13:32:33 +01:00
switch ( family ) {
case 6 :
cpu_type = " i386/athlon " ;
break ;
case 0xf :
2009-01-11 13:01:16 +01:00
/*
* Actually it could be i386 / hammer too , but
* give user space an consistent name .
*/
2008-01-30 13:32:33 +01:00
cpu_type = " x86-64/hammer " ;
break ;
case 0x10 :
cpu_type = " x86-64/family10 " ;
break ;
2008-07-22 21:08:47 +02:00
case 0x11 :
cpu_type = " x86-64/family11h " ;
break ;
2010-08-31 10:44:17 +02:00
case 0x12 :
cpu_type = " x86-64/family12h " ;
break ;
2010-08-26 12:30:17 +02:00
case 0x14 :
cpu_type = " x86-64/family14h " ;
break ;
2010-08-31 10:44:38 +02:00
case 0x15 :
cpu_type = " x86-64/family15h " ;
break ;
2009-01-11 13:01:16 +01:00
default :
return - ENODEV ;
2008-01-30 13:32:33 +01:00
}
2009-01-11 13:01:16 +01:00
model = & op_amd_spec ;
2008-01-30 13:32:33 +01:00
break ;
case X86_VENDOR_INTEL :
switch ( family ) {
/* Pentium IV */
case 0xf :
2008-08-18 14:50:31 +02:00
p4_init ( & cpu_type ) ;
2005-04-16 15:20:36 -07:00
break ;
2008-01-30 13:32:33 +01:00
/* A P6-class processor */
case 6 :
2008-08-18 14:50:31 +02:00
ppro_init ( & cpu_type ) ;
2005-04-16 15:20:36 -07:00
break ;
default :
2008-08-18 14:50:31 +02:00
break ;
2008-01-30 13:32:33 +01:00
}
2008-08-18 14:50:31 +02:00
2008-10-12 15:12:34 -04:00
if ( cpu_type )
break ;
if ( ! cpu_has_arch_perfmon )
2008-08-18 14:50:31 +02:00
return - ENODEV ;
2008-10-12 15:12:34 -04:00
/* use arch perfmon as fallback */
cpu_type = " i386/arch_perfmon " ;
model = & op_arch_perfmon_spec ;
2008-01-30 13:32:33 +01:00
break ;
default :
return - ENODEV ;
2005-04-16 15:20:36 -07:00
}
2008-07-22 21:09:01 +02:00
/* default values, can be overwritten by model */
2009-07-07 19:25:39 +02:00
ops - > create_files = nmi_create_files ;
ops - > setup = nmi_setup ;
ops - > shutdown = nmi_shutdown ;
ops - > start = nmi_start ;
ops - > stop = nmi_stop ;
ops - > cpu_type = cpu_type ;
2008-07-22 21:09:01 +02:00
2008-07-22 21:08:48 +02:00
if ( model - > init )
ret = model - > init ( ops ) ;
if ( ret )
return ret ;
2009-07-06 14:43:55 +02:00
if ( ! model - > num_virt_counters )
model - > num_virt_counters = model - > num_counters ;
2009-07-09 16:02:44 +02:00
mux_init ( ops ) ;
2011-03-23 22:15:54 +01:00
init_suspend_resume ( ) ;
2010-08-30 10:56:18 +02:00
2005-04-16 15:20:36 -07:00
printk ( KERN_INFO " oprofile: using NMI interrupt. \n " ) ;
return 0 ;
}
2005-09-06 15:17:26 -07:00
void op_nmi_exit ( void )
2005-04-16 15:20:36 -07:00
{
2011-03-23 22:15:54 +01:00
exit_suspend_resume ( ) ;
2005-04-16 15:20:36 -07:00
}