2008-12-03 12:39:53 +03:00
/*
* Performance counter x86 architecture code
*
* Copyright ( C ) 2008 Thomas Gleixner < tglx @ linutronix . de >
* Copyright ( C ) 2008 Red Hat , Inc . , Ingo Molnar
*
* For licencing details see kernel - base / COPYING
*/
# include <linux/perf_counter.h>
# include <linux/capability.h>
# include <linux/notifier.h>
# include <linux/hardirq.h>
# include <linux/kprobes.h>
2008-12-09 23:43:39 +03:00
# include <linux/module.h>
2008-12-03 12:39:53 +03:00
# include <linux/kdebug.h>
# include <linux/sched.h>
2008-12-17 11:02:19 +03:00
# include <asm/perf_counter.h>
2008-12-03 12:39:53 +03:00
# include <asm/apic.h>
static bool perf_counters_initialized __read_mostly ;
/*
* Number of ( generic ) HW counters :
*/
static int nr_hw_counters __read_mostly ;
static u32 perf_counter_mask __read_mostly ;
struct cpu_hw_counters {
2008-12-17 11:09:13 +03:00
struct perf_counter * generic [ X86_PMC_MAX_GENERIC ] ;
unsigned long used [ BITS_TO_LONGS ( X86_PMC_MAX_GENERIC ) ] ;
struct perf_counter * fixed [ X86_PMC_MAX_FIXED ] ;
unsigned long used_fixed [ BITS_TO_LONGS ( X86_PMC_MAX_FIXED ) ] ;
2008-12-03 12:39:53 +03:00
} ;
/*
* Intel PerfMon v3 . Used on Core2 and later .
*/
static DEFINE_PER_CPU ( struct cpu_hw_counters , cpu_hw_counters ) ;
2008-12-19 20:07:58 +03:00
static const int intel_perfmon_event_map [ ] =
2008-12-03 12:39:53 +03:00
{
[ PERF_COUNT_CYCLES ] = 0x003c ,
[ PERF_COUNT_INSTRUCTIONS ] = 0x00c0 ,
[ PERF_COUNT_CACHE_REFERENCES ] = 0x4f2e ,
[ PERF_COUNT_CACHE_MISSES ] = 0x412e ,
[ PERF_COUNT_BRANCH_INSTRUCTIONS ] = 0x00c4 ,
[ PERF_COUNT_BRANCH_MISSES ] = 0x00c5 ,
} ;
2008-12-19 20:07:58 +03:00
static const int max_intel_perfmon_events = ARRAY_SIZE ( intel_perfmon_event_map ) ;
2008-12-03 12:39:53 +03:00
2008-12-13 11:00:03 +03:00
/*
* Propagate counter elapsed time into the generic counter .
* Can only be executed on the CPU where the counter is active .
* Returns the delta events processed .
*/
static void
x86_perf_counter_update ( struct perf_counter * counter ,
struct hw_perf_counter * hwc , int idx )
{
u64 prev_raw_count , new_raw_count , delta ;
/*
* Careful : an NMI might modify the previous counter value .
*
* Our tactic to handle this is to first atomically read and
* exchange a new raw count - then add that new - prev delta
* count to the generic counter atomically :
*/
again :
prev_raw_count = atomic64_read ( & hwc - > prev_count ) ;
rdmsrl ( hwc - > counter_base + idx , new_raw_count ) ;
if ( atomic64_cmpxchg ( & hwc - > prev_count , prev_raw_count ,
new_raw_count ) ! = prev_raw_count )
goto again ;
/*
* Now we have the new raw value and have updated the prev
* timestamp already . We can now calculate the elapsed delta
* ( counter - ) time and add that to the generic counter .
*
* Careful , not all hw sign - extends above the physical width
* of the count , so we do that by clipping the delta to 32 bits :
*/
delta = ( u64 ) ( u32 ) ( ( s32 ) new_raw_count - ( s32 ) prev_raw_count ) ;
atomic64_add ( delta , & counter - > count ) ;
atomic64_sub ( delta , & hwc - > period_left ) ;
}
2008-12-03 12:39:53 +03:00
/*
* Setup the hardware configuration for a given hw_event_type
*/
2008-12-11 14:46:46 +03:00
static int __hw_perf_counter_init ( struct perf_counter * counter )
2008-12-03 12:39:53 +03:00
{
2008-12-10 14:33:23 +03:00
struct perf_counter_hw_event * hw_event = & counter - > hw_event ;
2008-12-03 12:39:53 +03:00
struct hw_perf_counter * hwc = & counter - > hw ;
if ( unlikely ( ! perf_counters_initialized ) )
return - EINVAL ;
/*
* Count user events , and generate PMC IRQs :
* ( keep ' enabled ' bit clear for now )
*/
hwc - > config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT ;
/*
* If privileged enough , count OS events too , and allow
* NMI events as well :
*/
hwc - > nmi = 0 ;
if ( capable ( CAP_SYS_ADMIN ) ) {
hwc - > config | = ARCH_PERFMON_EVENTSEL_OS ;
2008-12-10 14:33:23 +03:00
if ( hw_event - > nmi )
2008-12-03 12:39:53 +03:00
hwc - > nmi = 1 ;
}
2008-12-10 14:33:23 +03:00
hwc - > config_base = MSR_ARCH_PERFMON_EVENTSEL0 ;
hwc - > counter_base = MSR_ARCH_PERFMON_PERFCTR0 ;
2008-12-03 12:39:53 +03:00
2008-12-10 14:33:23 +03:00
hwc - > irq_period = hw_event - > irq_period ;
2008-12-03 12:39:53 +03:00
/*
* Intel PMCs cannot be accessed sanely above 32 bit width ,
* so we install an artificial 1 < < 31 period regardless of
* the generic counter period :
*/
2008-12-13 11:00:03 +03:00
if ( ( s64 ) hwc - > irq_period < = 0 | | hwc - > irq_period > 0x7FFFFFFF )
2008-12-03 12:39:53 +03:00
hwc - > irq_period = 0x7FFFFFFF ;
2008-12-13 11:00:03 +03:00
atomic64_set ( & hwc - > period_left , hwc - > irq_period ) ;
2008-12-03 12:39:53 +03:00
/*
2008-12-08 21:35:37 +03:00
* Raw event type provide the config in the event structure
2008-12-03 12:39:53 +03:00
*/
2008-12-10 14:33:23 +03:00
if ( hw_event - > raw ) {
hwc - > config | = hw_event - > type ;
2008-12-03 12:39:53 +03:00
} else {
2008-12-10 14:33:23 +03:00
if ( hw_event - > type > = max_intel_perfmon_events )
2008-12-03 12:39:53 +03:00
return - EINVAL ;
/*
* The generic map :
*/
2008-12-10 14:33:23 +03:00
hwc - > config | = intel_perfmon_event_map [ hw_event - > type ] ;
2008-12-03 12:39:53 +03:00
}
counter - > wakeup_pending = 0 ;
return 0 ;
}
void hw_perf_enable_all ( void )
{
2008-12-14 20:36:30 +03:00
if ( unlikely ( ! perf_counters_initialized ) )
return ;
2008-12-09 14:23:59 +03:00
wrmsr ( MSR_CORE_PERF_GLOBAL_CTRL , perf_counter_mask , 0 ) ;
2008-12-03 12:39:53 +03:00
}
2008-12-11 15:45:51 +03:00
u64 hw_perf_save_disable ( void )
2008-12-09 23:43:39 +03:00
{
u64 ctrl ;
2008-12-14 20:36:30 +03:00
if ( unlikely ( ! perf_counters_initialized ) )
return 0 ;
2008-12-09 23:43:39 +03:00
rdmsrl ( MSR_CORE_PERF_GLOBAL_CTRL , ctrl ) ;
2008-12-03 12:39:53 +03:00
wrmsr ( MSR_CORE_PERF_GLOBAL_CTRL , 0 , 0 ) ;
2008-12-14 20:36:30 +03:00
2008-12-09 23:43:39 +03:00
return ctrl ;
2008-12-03 12:39:53 +03:00
}
2008-12-11 15:45:51 +03:00
EXPORT_SYMBOL_GPL ( hw_perf_save_disable ) ;
2008-12-03 12:39:53 +03:00
2008-12-13 11:00:03 +03:00
void hw_perf_restore ( u64 ctrl )
{
2008-12-14 20:36:30 +03:00
if ( unlikely ( ! perf_counters_initialized ) )
return ;
2008-12-13 11:00:03 +03:00
wrmsr ( MSR_CORE_PERF_GLOBAL_CTRL , ctrl , 0 ) ;
}
EXPORT_SYMBOL_GPL ( hw_perf_restore ) ;
2008-12-09 13:40:46 +03:00
static inline void
2008-12-17 11:09:13 +03:00
__pmc_generic_disable ( struct perf_counter * counter ,
2008-12-13 11:00:03 +03:00
struct hw_perf_counter * hwc , unsigned int idx )
2008-12-09 13:40:46 +03:00
{
2008-12-13 11:00:03 +03:00
int err ;
err = wrmsr_safe ( hwc - > config_base + idx , hwc - > config , 0 ) ;
2008-12-09 13:40:46 +03:00
}
2008-12-17 11:09:13 +03:00
static DEFINE_PER_CPU ( u64 , prev_left [ X86_PMC_MAX_GENERIC ] ) ;
2008-12-03 12:39:53 +03:00
2008-12-13 11:00:03 +03:00
/*
* Set the next IRQ period , based on the hwc - > period_left value .
* To be called with the counter disabled in hw :
*/
static void
__hw_perf_counter_set_period ( struct perf_counter * counter ,
struct hw_perf_counter * hwc , int idx )
2008-12-03 12:39:53 +03:00
{
2008-12-13 11:00:03 +03:00
s32 left = atomic64_read ( & hwc - > period_left ) ;
s32 period = hwc - > irq_period ;
/*
* If we are way outside a reasoable range then just skip forward :
*/
if ( unlikely ( left < = - period ) ) {
left = period ;
atomic64_set ( & hwc - > period_left , left ) ;
}
if ( unlikely ( left < = 0 ) ) {
left + = period ;
atomic64_set ( & hwc - > period_left , left ) ;
}
2008-12-03 12:39:53 +03:00
2008-12-13 11:00:03 +03:00
per_cpu ( prev_left [ idx ] , smp_processor_id ( ) ) = left ;
/*
* The hw counter starts counting from this counter offset ,
* mark it to be able to extra future deltas :
*/
atomic64_set ( & hwc - > prev_count , ( u64 ) ( s64 ) - left ) ;
wrmsr ( hwc - > counter_base + idx , - left , 0 ) ;
2008-12-09 13:40:46 +03:00
}
2008-12-13 11:00:03 +03:00
static void
2008-12-17 11:09:13 +03:00
__pmc_generic_enable ( struct perf_counter * counter ,
2008-12-13 11:00:03 +03:00
struct hw_perf_counter * hwc , int idx )
2008-12-09 13:40:46 +03:00
{
wrmsr ( hwc - > config_base + idx ,
hwc - > config | ARCH_PERFMON_EVENTSEL0_ENABLE , 0 ) ;
2008-12-03 12:39:53 +03:00
}
2008-12-13 11:00:03 +03:00
/*
* Find a PMC slot for the freshly enabled / scheduled in counter :
*/
2008-12-17 11:09:13 +03:00
static void pmc_generic_enable ( struct perf_counter * counter )
2008-12-03 12:39:53 +03:00
{
struct cpu_hw_counters * cpuc = & __get_cpu_var ( cpu_hw_counters ) ;
struct hw_perf_counter * hwc = & counter - > hw ;
int idx = hwc - > idx ;
/* Try to get the previous counter again */
if ( test_and_set_bit ( idx , cpuc - > used ) ) {
idx = find_first_zero_bit ( cpuc - > used , nr_hw_counters ) ;
set_bit ( idx , cpuc - > used ) ;
hwc - > idx = idx ;
}
perf_counters_lapic_init ( hwc - > nmi ) ;
2008-12-17 11:09:13 +03:00
__pmc_generic_disable ( counter , hwc , idx ) ;
2008-12-03 12:39:53 +03:00
2008-12-17 11:09:13 +03:00
cpuc - > generic [ idx ] = counter ;
2008-12-09 13:40:46 +03:00
2008-12-13 11:00:03 +03:00
__hw_perf_counter_set_period ( counter , hwc , idx ) ;
2008-12-17 11:09:13 +03:00
__pmc_generic_enable ( counter , hwc , idx ) ;
2008-12-03 12:39:53 +03:00
}
void perf_counter_print_debug ( void )
{
2008-12-13 11:00:03 +03:00
u64 ctrl , status , overflow , pmc_ctrl , pmc_count , prev_left ;
2008-12-09 14:18:18 +03:00
int cpu , idx ;
if ( ! nr_hw_counters )
return ;
2008-12-03 12:39:53 +03:00
local_irq_disable ( ) ;
cpu = smp_processor_id ( ) ;
2008-12-09 14:18:18 +03:00
rdmsrl ( MSR_CORE_PERF_GLOBAL_CTRL , ctrl ) ;
rdmsrl ( MSR_CORE_PERF_GLOBAL_STATUS , status ) ;
rdmsrl ( MSR_CORE_PERF_GLOBAL_OVF_CTRL , overflow ) ;
2008-12-03 12:39:53 +03:00
printk ( KERN_INFO " \n " ) ;
printk ( KERN_INFO " CPU#%d: ctrl: %016llx \n " , cpu , ctrl ) ;
printk ( KERN_INFO " CPU#%d: status: %016llx \n " , cpu , status ) ;
printk ( KERN_INFO " CPU#%d: overflow: %016llx \n " , cpu , overflow ) ;
for ( idx = 0 ; idx < nr_hw_counters ; idx + + ) {
2008-12-09 14:18:18 +03:00
rdmsrl ( MSR_ARCH_PERFMON_EVENTSEL0 + idx , pmc_ctrl ) ;
rdmsrl ( MSR_ARCH_PERFMON_PERFCTR0 + idx , pmc_count ) ;
2008-12-03 12:39:53 +03:00
2008-12-13 11:00:03 +03:00
prev_left = per_cpu ( prev_left [ idx ] , cpu ) ;
2008-12-03 12:39:53 +03:00
printk ( KERN_INFO " CPU#%d: PMC%d ctrl: %016llx \n " ,
cpu , idx , pmc_ctrl ) ;
printk ( KERN_INFO " CPU#%d: PMC%d count: %016llx \n " ,
cpu , idx , pmc_count ) ;
2008-12-13 11:00:03 +03:00
printk ( KERN_INFO " CPU#%d: PMC%d left: %016llx \n " ,
cpu , idx , prev_left ) ;
2008-12-03 12:39:53 +03:00
}
local_irq_enable ( ) ;
}
2008-12-17 11:09:13 +03:00
static void pmc_generic_disable ( struct perf_counter * counter )
2008-12-03 12:39:53 +03:00
{
struct cpu_hw_counters * cpuc = & __get_cpu_var ( cpu_hw_counters ) ;
struct hw_perf_counter * hwc = & counter - > hw ;
unsigned int idx = hwc - > idx ;
2008-12-17 11:09:13 +03:00
__pmc_generic_disable ( counter , hwc , idx ) ;
2008-12-03 12:39:53 +03:00
clear_bit ( idx , cpuc - > used ) ;
2008-12-17 11:09:13 +03:00
cpuc - > generic [ idx ] = NULL ;
2008-12-03 12:39:53 +03:00
2008-12-13 11:00:03 +03:00
/*
* Drain the remaining delta count out of a counter
* that we are disabling :
*/
x86_perf_counter_update ( counter , hwc , idx ) ;
2008-12-03 12:39:53 +03:00
}
static void perf_store_irq_data ( struct perf_counter * counter , u64 data )
{
struct perf_data * irqdata = counter - > irqdata ;
if ( irqdata - > len > PERF_DATA_BUFLEN - sizeof ( u64 ) ) {
irqdata - > overrun + + ;
} else {
u64 * p = ( u64 * ) & irqdata - > data [ irqdata - > len ] ;
* p = data ;
irqdata - > len + = sizeof ( u64 ) ;
}
}
2008-12-09 13:40:46 +03:00
/*
2008-12-13 11:00:03 +03:00
* Save and restart an expired counter . Called by NMI contexts ,
* so it has to be careful about preempting normal counter ops :
2008-12-09 13:40:46 +03:00
*/
2008-12-03 12:39:53 +03:00
static void perf_save_and_restart ( struct perf_counter * counter )
{
struct hw_perf_counter * hwc = & counter - > hw ;
int idx = hwc - > idx ;
2008-12-09 13:40:46 +03:00
u64 pmc_ctrl ;
2008-12-03 12:39:53 +03:00
2008-12-09 14:18:18 +03:00
rdmsrl ( MSR_ARCH_PERFMON_EVENTSEL0 + idx , pmc_ctrl ) ;
2008-12-03 12:39:53 +03:00
2008-12-13 11:00:03 +03:00
x86_perf_counter_update ( counter , hwc , idx ) ;
__hw_perf_counter_set_period ( counter , hwc , idx ) ;
2008-12-09 13:40:46 +03:00
if ( pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE )
2008-12-17 11:09:13 +03:00
__pmc_generic_enable ( counter , hwc , idx ) ;
2008-12-03 12:39:53 +03:00
}
static void
2008-12-11 10:38:42 +03:00
perf_handle_group ( struct perf_counter * sibling , u64 * status , u64 * overflown )
2008-12-03 12:39:53 +03:00
{
2008-12-11 10:38:42 +03:00
struct perf_counter * counter , * group_leader = sibling - > group_leader ;
2008-12-03 12:39:53 +03:00
2008-12-11 10:38:42 +03:00
/*
2008-12-13 11:00:03 +03:00
* Store sibling timestamps ( if any ) :
2008-12-11 10:38:42 +03:00
*/
list_for_each_entry ( counter , & group_leader - > sibling_list , list_entry ) {
2008-12-13 11:00:03 +03:00
x86_perf_counter_update ( counter , & counter - > hw , counter - > hw . idx ) ;
2008-12-11 10:38:42 +03:00
perf_store_irq_data ( sibling , counter - > hw_event . type ) ;
2008-12-13 11:00:03 +03:00
perf_store_irq_data ( sibling , atomic64_read ( & counter - > count ) ) ;
2008-12-03 12:39:53 +03:00
}
}
/*
* This handler is triggered by the local APIC , so the APIC IRQ handling
* rules apply :
*/
static void __smp_perf_counter_interrupt ( struct pt_regs * regs , int nmi )
{
int bit , cpu = smp_processor_id ( ) ;
2008-12-09 14:23:59 +03:00
u64 ack , status , saved_global ;
2008-12-03 12:39:53 +03:00
struct cpu_hw_counters * cpuc ;
2008-12-09 14:23:59 +03:00
rdmsrl ( MSR_CORE_PERF_GLOBAL_CTRL , saved_global ) ;
2008-12-03 12:39:53 +03:00
/* Disable counters globally */
wrmsr ( MSR_CORE_PERF_GLOBAL_CTRL , 0 , 0 ) ;
ack_APIC_irq ( ) ;
cpuc = & per_cpu ( cpu_hw_counters , cpu ) ;
2008-12-08 16:20:16 +03:00
rdmsrl ( MSR_CORE_PERF_GLOBAL_STATUS , status ) ;
if ( ! status )
goto out ;
2008-12-03 12:39:53 +03:00
again :
ack = status ;
for_each_bit ( bit , ( unsigned long * ) & status , nr_hw_counters ) {
2008-12-17 11:09:13 +03:00
struct perf_counter * counter = cpuc - > generic [ bit ] ;
2008-12-03 12:39:53 +03:00
clear_bit ( bit , ( unsigned long * ) & status ) ;
if ( ! counter )
continue ;
perf_save_and_restart ( counter ) ;
2008-12-10 14:33:23 +03:00
switch ( counter - > hw_event . record_type ) {
2008-12-03 12:39:53 +03:00
case PERF_RECORD_SIMPLE :
continue ;
case PERF_RECORD_IRQ :
perf_store_irq_data ( counter , instruction_pointer ( regs ) ) ;
break ;
case PERF_RECORD_GROUP :
perf_handle_group ( counter , & status , & ack ) ;
break ;
}
/*
* From NMI context we cannot call into the scheduler to
2008-12-17 11:09:13 +03:00
* do a task wakeup - but we mark these generic as
2008-12-03 12:39:53 +03:00
* wakeup_pending and initate a wakeup callback :
*/
if ( nmi ) {
counter - > wakeup_pending = 1 ;
set_tsk_thread_flag ( current , TIF_PERF_COUNTERS ) ;
} else {
wake_up ( & counter - > waitq ) ;
}
}
wrmsr ( MSR_CORE_PERF_GLOBAL_OVF_CTRL , ack , 0 ) ;
/*
* Repeat if there is more work to be done :
*/
rdmsrl ( MSR_CORE_PERF_GLOBAL_STATUS , status ) ;
if ( status )
goto again ;
2008-12-08 16:20:16 +03:00
out :
2008-12-03 12:39:53 +03:00
/*
2008-12-09 14:23:59 +03:00
* Restore - do not reenable when global enable is off :
2008-12-03 12:39:53 +03:00
*/
2008-12-09 14:23:59 +03:00
wrmsr ( MSR_CORE_PERF_GLOBAL_CTRL , saved_global , 0 ) ;
2008-12-03 12:39:53 +03:00
}
void smp_perf_counter_interrupt ( struct pt_regs * regs )
{
irq_enter ( ) ;
2008-12-12 14:00:02 +03:00
inc_irq_stat ( apic_perf_irqs ) ;
2008-12-03 12:39:53 +03:00
apic_write ( APIC_LVTPC , LOCAL_PERF_VECTOR ) ;
__smp_perf_counter_interrupt ( regs , 0 ) ;
irq_exit ( ) ;
}
/*
* This handler is triggered by NMI contexts :
*/
void perf_counter_notify ( struct pt_regs * regs )
{
struct cpu_hw_counters * cpuc ;
unsigned long flags ;
int bit , cpu ;
local_irq_save ( flags ) ;
cpu = smp_processor_id ( ) ;
cpuc = & per_cpu ( cpu_hw_counters , cpu ) ;
for_each_bit ( bit , cpuc - > used , nr_hw_counters ) {
2008-12-17 11:09:13 +03:00
struct perf_counter * counter = cpuc - > generic [ bit ] ;
2008-12-03 12:39:53 +03:00
if ( ! counter )
continue ;
if ( counter - > wakeup_pending ) {
counter - > wakeup_pending = 0 ;
wake_up ( & counter - > waitq ) ;
}
}
local_irq_restore ( flags ) ;
}
void __cpuinit perf_counters_lapic_init ( int nmi )
{
u32 apic_val ;
if ( ! perf_counters_initialized )
return ;
/*
* Enable the performance counter vector in the APIC LVT :
*/
apic_val = apic_read ( APIC_LVTERR ) ;
apic_write ( APIC_LVTERR , apic_val | APIC_LVT_MASKED ) ;
if ( nmi )
apic_write ( APIC_LVTPC , APIC_DM_NMI ) ;
else
apic_write ( APIC_LVTPC , LOCAL_PERF_VECTOR ) ;
apic_write ( APIC_LVTERR , apic_val ) ;
}
static int __kprobes
perf_counter_nmi_handler ( struct notifier_block * self ,
unsigned long cmd , void * __args )
{
struct die_args * args = __args ;
struct pt_regs * regs ;
if ( likely ( cmd ! = DIE_NMI_IPI ) )
return NOTIFY_DONE ;
regs = args - > regs ;
apic_write ( APIC_LVTPC , APIC_DM_NMI ) ;
__smp_perf_counter_interrupt ( regs , 1 ) ;
return NOTIFY_STOP ;
}
static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
. notifier_call = perf_counter_nmi_handler
} ;
void __init init_hw_perf_counters ( void )
{
union cpuid10_eax eax ;
unsigned int unused ;
unsigned int ebx ;
if ( ! cpu_has ( & boot_cpu_data , X86_FEATURE_ARCH_PERFMON ) )
return ;
/*
* Check whether the Architectural PerfMon supports
* Branch Misses Retired Event or not .
*/
cpuid ( 10 , & ( eax . full ) , & ebx , & unused , & unused ) ;
if ( eax . split . mask_length < = ARCH_PERFMON_BRANCH_MISSES_RETIRED )
return ;
printk ( KERN_INFO " Intel Performance Monitoring support detected. \n " ) ;
printk ( KERN_INFO " ... version: %d \n " , eax . split . version_id ) ;
printk ( KERN_INFO " ... num_counters: %d \n " , eax . split . num_counters ) ;
nr_hw_counters = eax . split . num_counters ;
2008-12-17 11:09:13 +03:00
if ( nr_hw_counters > X86_PMC_MAX_GENERIC ) {
nr_hw_counters = X86_PMC_MAX_GENERIC ;
2008-12-03 12:39:53 +03:00
WARN ( 1 , KERN_ERR " hw perf counters %d > max(%d), clipping! " ,
2008-12-17 11:09:13 +03:00
nr_hw_counters , X86_PMC_MAX_GENERIC ) ;
2008-12-03 12:39:53 +03:00
}
perf_counter_mask = ( 1 < < nr_hw_counters ) - 1 ;
perf_max_counters = nr_hw_counters ;
printk ( KERN_INFO " ... bit_width: %d \n " , eax . split . bit_width ) ;
printk ( KERN_INFO " ... mask_length: %d \n " , eax . split . mask_length ) ;
2008-12-14 23:58:46 +03:00
perf_counters_initialized = true ;
2008-12-03 12:39:53 +03:00
perf_counters_lapic_init ( 0 ) ;
register_die_notifier ( & perf_counter_nmi_notifier ) ;
}
2008-12-11 14:46:46 +03:00
2008-12-17 11:09:13 +03:00
static void pmc_generic_read ( struct perf_counter * counter )
2008-12-13 11:00:03 +03:00
{
x86_perf_counter_update ( counter , & counter - > hw , counter - > hw . idx ) ;
}
2008-12-11 15:21:10 +03:00
static const struct hw_perf_counter_ops x86_perf_counter_ops = {
2008-12-17 11:09:13 +03:00
. hw_perf_counter_enable = pmc_generic_enable ,
. hw_perf_counter_disable = pmc_generic_disable ,
. hw_perf_counter_read = pmc_generic_read ,
2008-12-11 14:46:46 +03:00
} ;
2008-12-11 15:21:10 +03:00
const struct hw_perf_counter_ops *
hw_perf_counter_init ( struct perf_counter * counter )
2008-12-11 14:46:46 +03:00
{
int err ;
err = __hw_perf_counter_init ( counter ) ;
if ( err )
return NULL ;
return & x86_perf_counter_ops ;
}