2005-04-17 02:20:36 +04:00
/**
* @ file nmi_int . c
*
2008-07-22 23:08:48 +04:00
* @ remark Copyright 2002 - 2008 OProfile authors
2005-04-17 02:20:36 +04:00
* @ remark Read the file COPYING
*
* @ author John Levon < levon @ movementarian . org >
2008-07-22 23:08:48 +04:00
* @ author Robert Richter < robert . richter @ amd . com >
2005-04-17 02:20:36 +04:00
*/
# include <linux/init.h>
# include <linux/notifier.h>
# include <linux/smp.h>
# include <linux/oprofile.h>
# include <linux/sysdev.h>
# include <linux/slab.h>
2006-07-10 19:06:21 +04:00
# include <linux/moduleparam.h>
2007-05-08 11:27:03 +04:00
# include <linux/kdebug.h>
2008-08-19 05:13:38 +04:00
# include <linux/cpu.h>
2005-04-17 02:20:36 +04:00
# include <asm/nmi.h>
# include <asm/msr.h>
# include <asm/apic.h>
2008-01-30 15:32:33 +03:00
2005-04-17 02:20:36 +04:00
# include "op_counter.h"
# include "op_x86_model.h"
2006-09-26 12:52:27 +04:00
2008-01-30 15:32:33 +03:00
static struct op_x86_model_spec const * model ;
2008-03-26 01:06:59 +03:00
static DEFINE_PER_CPU ( struct op_msrs , cpu_msrs ) ;
static DEFINE_PER_CPU ( unsigned long , saved_lvtpc ) ;
2006-09-26 12:52:27 +04:00
2005-04-17 02:20:36 +04:00
/* 0 == registered but off, 1 == registered and on */
static int nmi_enabled = 0 ;
2009-05-25 17:10:32 +04:00
/* common functions */
u64 op_x86_get_ctrl ( struct op_x86_model_spec const * model ,
struct op_counter_config * counter_config )
{
u64 val = 0 ;
u16 event = ( u16 ) counter_config - > event ;
val | = ARCH_PERFMON_EVENTSEL_INT ;
val | = counter_config - > user ? ARCH_PERFMON_EVENTSEL_USR : 0 ;
val | = counter_config - > kernel ? ARCH_PERFMON_EVENTSEL_OS : 0 ;
val | = ( counter_config - > unit_mask & 0xFF ) < < 8 ;
event & = model - > event_mask ? model - > event_mask : 0xFF ;
val | = event & 0xFF ;
val | = ( event & 0x0F00 ) < < 24 ;
return val ;
}
2006-09-26 12:52:27 +04:00
static int profile_exceptions_notify ( struct notifier_block * self ,
unsigned long val , void * data )
2005-04-17 02:20:36 +04:00
{
2006-09-26 12:52:27 +04:00
struct die_args * args = ( struct die_args * ) data ;
int ret = NOTIFY_DONE ;
int cpu = smp_processor_id ( ) ;
2008-01-30 15:32:33 +03:00
switch ( val ) {
2006-09-26 12:52:27 +04:00
case DIE_NMI :
2009-02-04 19:11:34 +03:00
case DIE_NMI_IPI :
model - > check_ctrs ( args - > regs , & per_cpu ( cpu_msrs , cpu ) ) ;
ret = NOTIFY_STOP ;
2006-09-26 12:52:27 +04:00
break ;
default :
break ;
}
return ret ;
2005-04-17 02:20:36 +04:00
}
2006-09-26 12:52:27 +04:00
2008-01-30 15:32:33 +03:00
static void nmi_cpu_save_registers ( struct op_msrs * msrs )
2005-04-17 02:20:36 +04:00
{
2008-01-30 15:32:33 +03:00
struct op_msr * counters = msrs - > counters ;
struct op_msr * controls = msrs - > controls ;
2005-04-17 02:20:36 +04:00
unsigned int i ;
2009-06-05 17:54:24 +04:00
for ( i = 0 ; i < model - > num_counters ; + + i ) {
2009-06-03 21:09:27 +04:00
if ( counters [ i ] . addr )
rdmsrl ( counters [ i ] . addr , counters [ i ] . saved ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-30 15:32:33 +03:00
2009-06-05 17:54:24 +04:00
for ( i = 0 ; i < model - > num_controls ; + + i ) {
2009-06-03 21:09:27 +04:00
if ( controls [ i ] . addr )
rdmsrl ( controls [ i ] . addr , controls [ i ] . saved ) ;
2005-04-17 02:20:36 +04:00
}
}
2008-01-30 15:32:33 +03:00
static void nmi_save_registers ( void * dummy )
2005-04-17 02:20:36 +04:00
{
int cpu = smp_processor_id ( ) ;
2008-03-26 01:06:59 +03:00
struct op_msrs * msrs = & per_cpu ( cpu_msrs , cpu ) ;
2005-04-17 02:20:36 +04:00
nmi_cpu_save_registers ( msrs ) ;
}
static void free_msrs ( void )
{
int i ;
2006-03-28 13:56:39 +04:00
for_each_possible_cpu ( i ) {
2008-03-26 01:06:59 +03:00
kfree ( per_cpu ( cpu_msrs , i ) . counters ) ;
per_cpu ( cpu_msrs , i ) . counters = NULL ;
kfree ( per_cpu ( cpu_msrs , i ) . controls ) ;
per_cpu ( cpu_msrs , i ) . controls = NULL ;
2005-04-17 02:20:36 +04:00
}
}
static int allocate_msrs ( void )
{
2008-09-24 13:08:52 +04:00
int success = 1 ;
2005-04-17 02:20:36 +04:00
size_t controls_size = sizeof ( struct op_msr ) * model - > num_controls ;
size_t counters_size = sizeof ( struct op_msr ) * model - > num_counters ;
2008-09-24 13:08:52 +04:00
int i ;
2007-06-01 11:46:39 +04:00
for_each_possible_cpu ( i ) {
2008-03-26 01:06:59 +03:00
per_cpu ( cpu_msrs , i ) . counters = kmalloc ( counters_size ,
GFP_KERNEL ) ;
if ( ! per_cpu ( cpu_msrs , i ) . counters ) {
2005-04-17 02:20:36 +04:00
success = 0 ;
break ;
}
2008-09-24 13:08:52 +04:00
per_cpu ( cpu_msrs , i ) . controls = kmalloc ( controls_size ,
GFP_KERNEL ) ;
2008-03-26 01:06:59 +03:00
if ( ! per_cpu ( cpu_msrs , i ) . controls ) {
2005-04-17 02:20:36 +04:00
success = 0 ;
break ;
}
}
if ( ! success )
free_msrs ( ) ;
return success ;
}
2008-01-30 15:32:33 +03:00
static void nmi_cpu_setup ( void * dummy )
2005-04-17 02:20:36 +04:00
{
int cpu = smp_processor_id ( ) ;
2008-03-26 01:06:59 +03:00
struct op_msrs * msrs = & per_cpu ( cpu_msrs , cpu ) ;
2005-04-17 02:20:36 +04:00
spin_lock ( & oprofilefs_lock ) ;
2009-05-25 21:31:44 +04:00
model - > setup_ctrs ( model , msrs ) ;
2005-04-17 02:20:36 +04:00
spin_unlock ( & oprofilefs_lock ) ;
2008-03-26 01:06:59 +03:00
per_cpu ( saved_lvtpc , cpu ) = apic_read ( APIC_LVTPC ) ;
2005-04-17 02:20:36 +04:00
apic_write ( APIC_LVTPC , APIC_DM_NMI ) ;
}
2006-09-26 12:52:27 +04:00
static struct notifier_block profile_exceptions_nb = {
. notifier_call = profile_exceptions_notify ,
. next = NULL ,
2009-02-04 19:11:34 +03:00
. priority = 2
2006-09-26 12:52:27 +04:00
} ;
2005-04-17 02:20:36 +04:00
static int nmi_setup ( void )
{
2008-01-30 15:32:33 +03:00
int err = 0 ;
2007-05-21 16:31:45 +04:00
int cpu ;
2006-09-26 12:52:27 +04:00
2005-04-17 02:20:36 +04:00
if ( ! allocate_msrs ( ) )
return - ENOMEM ;
2008-01-30 15:32:33 +03:00
err = register_die_notifier ( & profile_exceptions_nb ) ;
if ( err ) {
2005-04-17 02:20:36 +04:00
free_msrs ( ) ;
2006-09-26 12:52:27 +04:00
return err ;
2005-04-17 02:20:36 +04:00
}
2006-09-26 12:52:27 +04:00
2008-09-24 13:08:52 +04:00
/* We need to serialize save and setup for HT because the subset
2005-04-17 02:20:36 +04:00
* of msrs are distinct for save and setup operations
*/
2007-05-21 16:31:45 +04:00
/* Assume saved/restored counters are the same on all CPUs */
2008-03-26 01:06:59 +03:00
model - > fill_in_addresses ( & per_cpu ( cpu_msrs , 0 ) ) ;
2008-01-30 15:32:33 +03:00
for_each_possible_cpu ( cpu ) {
2007-06-01 11:46:39 +04:00
if ( cpu ! = 0 ) {
2008-03-26 01:06:59 +03:00
memcpy ( per_cpu ( cpu_msrs , cpu ) . counters ,
per_cpu ( cpu_msrs , 0 ) . counters ,
2007-06-01 11:46:39 +04:00
sizeof ( struct op_msr ) * model - > num_counters ) ;
2008-03-26 01:06:59 +03:00
memcpy ( per_cpu ( cpu_msrs , cpu ) . controls ,
per_cpu ( cpu_msrs , 0 ) . controls ,
2007-06-01 11:46:39 +04:00
sizeof ( struct op_msr ) * model - > num_controls ) ;
}
2008-09-24 13:08:52 +04:00
2007-05-21 16:31:45 +04:00
}
2008-05-09 11:39:44 +04:00
on_each_cpu ( nmi_save_registers , NULL , 1 ) ;
on_each_cpu ( nmi_cpu_setup , NULL , 1 ) ;
2005-04-17 02:20:36 +04:00
nmi_enabled = 1 ;
return 0 ;
}
2008-09-24 13:08:52 +04:00
static void nmi_restore_registers ( struct op_msrs * msrs )
2005-04-17 02:20:36 +04:00
{
2008-01-30 15:32:33 +03:00
struct op_msr * counters = msrs - > counters ;
struct op_msr * controls = msrs - > controls ;
2005-04-17 02:20:36 +04:00
unsigned int i ;
2009-06-05 17:54:24 +04:00
for ( i = 0 ; i < model - > num_controls ; + + i ) {
2009-06-03 21:09:27 +04:00
if ( controls [ i ] . addr )
wrmsrl ( controls [ i ] . addr , controls [ i ] . saved ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-30 15:32:33 +03:00
2009-06-05 17:54:24 +04:00
for ( i = 0 ; i < model - > num_counters ; + + i ) {
2009-06-03 21:09:27 +04:00
if ( counters [ i ] . addr )
wrmsrl ( counters [ i ] . addr , counters [ i ] . saved ) ;
2005-04-17 02:20:36 +04:00
}
}
2008-01-30 15:32:33 +03:00
static void nmi_cpu_shutdown ( void * dummy )
2005-04-17 02:20:36 +04:00
{
unsigned int v ;
int cpu = smp_processor_id ( ) ;
2008-03-26 01:06:59 +03:00
struct op_msrs * msrs = & __get_cpu_var ( cpu_msrs ) ;
2008-01-30 15:32:33 +03:00
2005-04-17 02:20:36 +04:00
/* restoring APIC_LVTPC can trigger an apic error because the delivery
* mode and vector nr combination can be illegal . That ' s by design : on
* power on apic lvt contain a zero vector nr which are legal only for
* NMI delivery mode . So inhibit apic err before restoring lvtpc
*/
v = apic_read ( APIC_LVTERR ) ;
apic_write ( APIC_LVTERR , v | APIC_LVT_MASKED ) ;
2008-03-26 01:06:59 +03:00
apic_write ( APIC_LVTPC , per_cpu ( saved_lvtpc , cpu ) ) ;
2005-04-17 02:20:36 +04:00
apic_write ( APIC_LVTERR , v ) ;
2008-09-24 13:08:52 +04:00
nmi_restore_registers ( msrs ) ;
2005-04-17 02:20:36 +04:00
}
static void nmi_shutdown ( void )
{
2008-09-20 20:02:27 +04:00
struct op_msrs * msrs ;
2005-04-17 02:20:36 +04:00
nmi_enabled = 0 ;
2008-05-09 11:39:44 +04:00
on_each_cpu ( nmi_cpu_shutdown , NULL , 1 ) ;
2006-09-26 12:52:27 +04:00
unregister_die_notifier ( & profile_exceptions_nb ) ;
2008-09-20 20:02:27 +04:00
msrs = & get_cpu_var ( cpu_msrs ) ;
2008-03-26 01:06:59 +03:00
model - > shutdown ( msrs ) ;
2005-04-17 02:20:36 +04:00
free_msrs ( ) ;
2008-06-22 11:40:18 +04:00
put_cpu_var ( cpu_msrs ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-30 15:32:33 +03:00
static void nmi_cpu_start ( void * dummy )
2005-04-17 02:20:36 +04:00
{
2008-03-26 01:06:59 +03:00
struct op_msrs const * msrs = & __get_cpu_var ( cpu_msrs ) ;
2005-04-17 02:20:36 +04:00
model - > start ( msrs ) ;
}
static int nmi_start ( void )
{
2008-05-09 11:39:44 +04:00
on_each_cpu ( nmi_cpu_start , NULL , 1 ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2008-01-30 15:32:33 +03:00
static void nmi_cpu_stop ( void * dummy )
2005-04-17 02:20:36 +04:00
{
2008-03-26 01:06:59 +03:00
struct op_msrs const * msrs = & __get_cpu_var ( cpu_msrs ) ;
2005-04-17 02:20:36 +04:00
model - > stop ( msrs ) ;
}
2008-01-30 15:32:33 +03:00
2005-04-17 02:20:36 +04:00
static void nmi_stop ( void )
{
2008-05-09 11:39:44 +04:00
on_each_cpu ( nmi_cpu_stop , NULL , 1 ) ;
2005-04-17 02:20:36 +04:00
}
struct op_counter_config counter_config [ OP_MAX_COUNTER ] ;
2008-01-30 15:32:33 +03:00
static int nmi_create_files ( struct super_block * sb , struct dentry * root )
2005-04-17 02:20:36 +04:00
{
unsigned int i ;
for ( i = 0 ; i < model - > num_counters ; + + i ) {
2008-01-30 15:32:33 +03:00
struct dentry * dir ;
2006-06-26 11:24:34 +04:00
char buf [ 4 ] ;
2008-01-30 15:32:33 +03:00
/* quick little hack to _not_ expose a counter if it is not
2006-09-26 12:52:26 +04:00
* available for use . This should protect userspace app .
* NOTE : assumes 1 : 1 mapping here ( that counters are organized
* sequentially in their struct assignment ) .
*/
if ( unlikely ( ! avail_to_resrv_perfctr_nmi_bit ( i ) ) )
continue ;
2006-06-26 11:24:34 +04:00
snprintf ( buf , sizeof ( buf ) , " %d " , i ) ;
2005-04-17 02:20:36 +04:00
dir = oprofilefs_mkdir ( sb , root , buf ) ;
2008-01-30 15:32:33 +03:00
oprofilefs_create_ulong ( sb , dir , " enabled " , & counter_config [ i ] . enabled ) ;
oprofilefs_create_ulong ( sb , dir , " event " , & counter_config [ i ] . event ) ;
oprofilefs_create_ulong ( sb , dir , " count " , & counter_config [ i ] . count ) ;
oprofilefs_create_ulong ( sb , dir , " unit_mask " , & counter_config [ i ] . unit_mask ) ;
oprofilefs_create_ulong ( sb , dir , " kernel " , & counter_config [ i ] . kernel ) ;
oprofilefs_create_ulong ( sb , dir , " user " , & counter_config [ i ] . user ) ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
2008-01-30 15:32:33 +03:00
2008-09-05 14:17:40 +04:00
# ifdef CONFIG_SMP
static int oprofile_cpu_notifier ( struct notifier_block * b , unsigned long action ,
void * data )
{
int cpu = ( unsigned long ) data ;
switch ( action ) {
case CPU_DOWN_FAILED :
case CPU_ONLINE :
smp_call_function_single ( cpu , nmi_cpu_start , NULL , 0 ) ;
break ;
case CPU_DOWN_PREPARE :
smp_call_function_single ( cpu , nmi_cpu_stop , NULL , 1 ) ;
break ;
}
return NOTIFY_DONE ;
}
static struct notifier_block oprofile_cpu_nb = {
. notifier_call = oprofile_cpu_notifier
} ;
# endif
# ifdef CONFIG_PM
static int nmi_suspend ( struct sys_device * dev , pm_message_t state )
{
/* Only one CPU left, just stop that one */
if ( nmi_enabled = = 1 )
nmi_cpu_stop ( NULL ) ;
return 0 ;
}
static int nmi_resume ( struct sys_device * dev )
{
if ( nmi_enabled = = 1 )
nmi_cpu_start ( NULL ) ;
return 0 ;
}
static struct sysdev_class oprofile_sysclass = {
. name = " oprofile " ,
. resume = nmi_resume ,
. suspend = nmi_suspend ,
} ;
static struct sys_device device_oprofile = {
. id = 0 ,
. cls = & oprofile_sysclass ,
} ;
static int __init init_sysfs ( void )
{
int error ;
error = sysdev_class_register ( & oprofile_sysclass ) ;
if ( ! error )
error = sysdev_register ( & device_oprofile ) ;
return error ;
}
static void exit_sysfs ( void )
{
sysdev_unregister ( & device_oprofile ) ;
sysdev_class_unregister ( & oprofile_sysclass ) ;
}
# else
# define init_sysfs() do { } while (0)
# define exit_sysfs() do { } while (0)
# endif /* CONFIG_PM */
2008-01-30 15:32:33 +03:00
static int __init p4_init ( char * * cpu_type )
2005-04-17 02:20:36 +04:00
{
__u8 cpu_model = boot_cpu_data . x86_model ;
2009-04-27 19:44:12 +04:00
if ( cpu_model > 6 | | cpu_model = = 5 )
2005-04-17 02:20:36 +04:00
return 0 ;
# ifndef CONFIG_SMP
* cpu_type = " i386/p4 " ;
model = & op_p4_spec ;
return 1 ;
# else
switch ( smp_num_siblings ) {
2008-01-30 15:32:33 +03:00
case 1 :
* cpu_type = " i386/p4 " ;
model = & op_p4_spec ;
return 1 ;
case 2 :
* cpu_type = " i386/p4-ht " ;
model = & op_p4_ht2_spec ;
return 1 ;
2005-04-17 02:20:36 +04:00
}
# endif
printk ( KERN_INFO " oprofile: P4 HyperThreading detected with > 2 threads \n " ) ;
printk ( KERN_INFO " oprofile: Reverting to timer mode. \n " ) ;
return 0 ;
}
2009-05-06 14:10:23 +04:00
static int force_arch_perfmon ;
static int force_cpu_type ( const char * str , struct kernel_param * kp )
{
if ( ! strcmp ( str , " archperfmon " ) ) {
force_arch_perfmon = 1 ;
printk ( KERN_INFO " oprofile: forcing architectural perfmon \n " ) ;
}
return 0 ;
}
module_param_call ( cpu_type , force_cpu_type , NULL , NULL , 0 ) ;
2009-04-27 19:44:11 +04:00
2008-01-30 15:32:33 +03:00
static int __init ppro_init ( char * * cpu_type )
2005-04-17 02:20:36 +04:00
{
__u8 cpu_model = boot_cpu_data . x86_model ;
2009-04-27 19:44:11 +04:00
if ( force_arch_perfmon & & cpu_has_arch_perfmon )
return 0 ;
2008-07-25 04:29:00 +04:00
switch ( cpu_model ) {
case 0 . . . 2 :
* cpu_type = " i386/ppro " ;
break ;
case 3 . . . 5 :
* cpu_type = " i386/pii " ;
break ;
case 6 . . . 8 :
2008-11-30 23:39:10 +03:00
case 10 . . . 11 :
2008-07-25 04:29:00 +04:00
* cpu_type = " i386/piii " ;
break ;
case 9 :
2008-11-30 23:39:10 +03:00
case 13 :
2008-07-25 04:29:00 +04:00
* cpu_type = " i386/p6_mobile " ;
break ;
case 14 :
2006-05-15 20:44:24 +04:00
* cpu_type = " i386/core " ;
2008-07-25 04:29:00 +04:00
break ;
case 15 : case 23 :
* cpu_type = " i386/core_2 " ;
break ;
2009-04-27 19:44:13 +04:00
case 26 :
2008-10-12 23:12:34 +04:00
model = & op_arch_perfmon_spec ;
2009-04-27 19:44:13 +04:00
* cpu_type = " i386/core_i7 " ;
break ;
case 28 :
* cpu_type = " i386/atom " ;
break ;
2008-07-25 04:29:00 +04:00
default :
/* Unknown */
2005-04-17 02:20:36 +04:00
return 0 ;
}
model = & op_ppro_spec ;
return 1 ;
}
2007-02-17 21:13:42 +03:00
/* in order to get sysfs right */
2005-04-17 02:20:36 +04:00
static int using_nmi ;
2005-09-07 02:17:26 +04:00
int __init op_nmi_init ( struct oprofile_operations * ops )
2005-04-17 02:20:36 +04:00
{
__u8 vendor = boot_cpu_data . x86_vendor ;
__u8 family = boot_cpu_data . x86 ;
2008-08-18 16:50:31 +04:00
char * cpu_type = NULL ;
2008-07-22 23:08:48 +04:00
int ret = 0 ;
2005-04-17 02:20:36 +04:00
if ( ! cpu_has_apic )
return - ENODEV ;
2008-01-30 15:32:33 +03:00
2005-04-17 02:20:36 +04:00
switch ( vendor ) {
2008-01-30 15:32:33 +03:00
case X86_VENDOR_AMD :
/* Needs to be at least an Athlon (or hammer in 32bit mode) */
2005-04-17 02:20:36 +04:00
2008-01-30 15:32:33 +03:00
switch ( family ) {
case 6 :
cpu_type = " i386/athlon " ;
break ;
case 0xf :
2009-01-11 15:01:16 +03:00
/*
* Actually it could be i386 / hammer too , but
* give user space an consistent name .
*/
2008-01-30 15:32:33 +03:00
cpu_type = " x86-64/hammer " ;
break ;
case 0x10 :
cpu_type = " x86-64/family10 " ;
break ;
2008-07-22 23:08:47 +04:00
case 0x11 :
cpu_type = " x86-64/family11h " ;
break ;
2009-01-11 15:01:16 +03:00
default :
return - ENODEV ;
2008-01-30 15:32:33 +03:00
}
2009-01-11 15:01:16 +03:00
model = & op_amd_spec ;
2008-01-30 15:32:33 +03:00
break ;
case X86_VENDOR_INTEL :
switch ( family ) {
/* Pentium IV */
case 0xf :
2008-08-18 16:50:31 +04:00
p4_init ( & cpu_type ) ;
2005-04-17 02:20:36 +04:00
break ;
2008-01-30 15:32:33 +03:00
/* A P6-class processor */
case 6 :
2008-08-18 16:50:31 +04:00
ppro_init ( & cpu_type ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
2008-08-18 16:50:31 +04:00
break ;
2008-01-30 15:32:33 +03:00
}
2008-08-18 16:50:31 +04:00
2008-10-12 23:12:34 +04:00
if ( cpu_type )
break ;
if ( ! cpu_has_arch_perfmon )
2008-08-18 16:50:31 +04:00
return - ENODEV ;
2008-10-12 23:12:34 +04:00
/* use arch perfmon as fallback */
cpu_type = " i386/arch_perfmon " ;
model = & op_arch_perfmon_spec ;
2008-01-30 15:32:33 +03:00
break ;
default :
return - ENODEV ;
2005-04-17 02:20:36 +04:00
}
2008-08-19 05:13:38 +04:00
# ifdef CONFIG_SMP
register_cpu_notifier ( & oprofile_cpu_nb ) ;
# endif
2008-07-22 23:09:01 +04:00
/* default values, can be overwritten by model */
ops - > create_files = nmi_create_files ;
ops - > setup = nmi_setup ;
ops - > shutdown = nmi_shutdown ;
ops - > start = nmi_start ;
ops - > stop = nmi_stop ;
ops - > cpu_type = cpu_type ;
2008-07-22 23:08:48 +04:00
if ( model - > init )
ret = model - > init ( ops ) ;
if ( ret )
return ret ;
2007-02-17 21:13:42 +03:00
init_sysfs ( ) ;
2005-04-17 02:20:36 +04:00
using_nmi = 1 ;
printk ( KERN_INFO " oprofile: using NMI interrupt. \n " ) ;
return 0 ;
}
2005-09-07 02:17:26 +04:00
void op_nmi_exit ( void )
2005-04-17 02:20:36 +04:00
{
2008-08-19 05:13:38 +04:00
if ( using_nmi ) {
2007-02-17 21:13:42 +03:00
exit_sysfs ( ) ;
2008-08-19 05:13:38 +04:00
# ifdef CONFIG_SMP
unregister_cpu_notifier ( & oprofile_cpu_nb ) ;
# endif
}
2008-07-22 23:08:48 +04:00
if ( model - > exit )
model - > exit ( ) ;
2005-04-17 02:20:36 +04:00
}