2015-07-20 11:49:06 -04:00
# include <linux/perf_event.h>
2016-06-02 17:19:33 -07:00
# include <asm/intel-family.h>
2015-07-20 11:49:06 -04:00
enum perf_msr_id {
PERF_MSR_TSC = 0 ,
PERF_MSR_APERF = 1 ,
PERF_MSR_MPERF = 2 ,
PERF_MSR_PPERF = 3 ,
PERF_MSR_SMI = 4 ,
2016-01-29 16:29:56 +08:00
PERF_MSR_PTSC = 5 ,
2016-01-29 16:29:57 +08:00
PERF_MSR_IRPERF = 6 ,
2015-07-20 11:49:06 -04:00
PERF_MSR_EVENT_MAX ,
} ;
2015-09-24 04:48:53 -07:00
static bool test_aperfmperf ( int idx )
2015-08-06 17:26:58 +02:00
{
return boot_cpu_has ( X86_FEATURE_APERFMPERF ) ;
}
2016-01-29 16:29:56 +08:00
static bool test_ptsc ( int idx )
{
return boot_cpu_has ( X86_FEATURE_PTSC ) ;
}
2016-01-29 16:29:57 +08:00
static bool test_irperf ( int idx )
{
return boot_cpu_has ( X86_FEATURE_IRPERF ) ;
}
2015-09-24 04:48:53 -07:00
static bool test_intel ( int idx )
2015-08-06 17:26:58 +02:00
{
if ( boot_cpu_data . x86_vendor ! = X86_VENDOR_INTEL | |
boot_cpu_data . x86 ! = 6 )
return false ;
switch ( boot_cpu_data . x86_model ) {
2016-06-02 17:19:33 -07:00
case INTEL_FAM6_NEHALEM :
2016-07-30 12:56:26 -07:00
case INTEL_FAM6_NEHALEM_G :
2016-06-02 17:19:33 -07:00
case INTEL_FAM6_NEHALEM_EP :
case INTEL_FAM6_NEHALEM_EX :
2015-08-06 17:26:58 +02:00
2016-06-02 17:19:33 -07:00
case INTEL_FAM6_WESTMERE :
case INTEL_FAM6_WESTMERE_EP :
case INTEL_FAM6_WESTMERE_EX :
2015-08-06 17:26:58 +02:00
2016-06-02 17:19:33 -07:00
case INTEL_FAM6_SANDYBRIDGE :
case INTEL_FAM6_SANDYBRIDGE_X :
2015-08-06 17:26:58 +02:00
2016-06-02 17:19:33 -07:00
case INTEL_FAM6_IVYBRIDGE :
case INTEL_FAM6_IVYBRIDGE_X :
2015-08-06 17:26:58 +02:00
2016-06-02 17:19:33 -07:00
case INTEL_FAM6_HASWELL_CORE :
case INTEL_FAM6_HASWELL_X :
case INTEL_FAM6_HASWELL_ULT :
case INTEL_FAM6_HASWELL_GT3E :
2015-08-06 17:26:58 +02:00
2016-06-02 17:19:33 -07:00
case INTEL_FAM6_BROADWELL_CORE :
case INTEL_FAM6_BROADWELL_XEON_D :
case INTEL_FAM6_BROADWELL_GT3E :
case INTEL_FAM6_BROADWELL_X :
2015-08-06 17:26:58 +02:00
2016-06-02 17:19:33 -07:00
case INTEL_FAM6_ATOM_SILVERMONT1 :
case INTEL_FAM6_ATOM_SILVERMONT2 :
case INTEL_FAM6_ATOM_AIRMONT :
2015-08-06 17:26:58 +02:00
if ( idx = = PERF_MSR_SMI )
return true ;
break ;
2016-06-02 17:19:33 -07:00
case INTEL_FAM6_SKYLAKE_MOBILE :
case INTEL_FAM6_SKYLAKE_DESKTOP :
2016-06-02 17:19:35 -07:00
case INTEL_FAM6_SKYLAKE_X :
case INTEL_FAM6_KABYLAKE_MOBILE :
case INTEL_FAM6_KABYLAKE_DESKTOP :
2015-08-06 17:26:58 +02:00
if ( idx = = PERF_MSR_SMI | | idx = = PERF_MSR_PPERF )
return true ;
break ;
}
return false ;
}
2015-07-20 11:49:06 -04:00
struct perf_msr {
u64 msr ;
2015-08-06 17:26:58 +02:00
struct perf_pmu_events_attr * attr ;
bool ( * test ) ( int idx ) ;
2015-07-20 11:49:06 -04:00
} ;
2016-01-29 16:29:57 +08:00
PMU_EVENT_ATTR_STRING ( tsc , evattr_tsc , " event=0x00 " ) ;
PMU_EVENT_ATTR_STRING ( aperf , evattr_aperf , " event=0x01 " ) ;
PMU_EVENT_ATTR_STRING ( mperf , evattr_mperf , " event=0x02 " ) ;
PMU_EVENT_ATTR_STRING ( pperf , evattr_pperf , " event=0x03 " ) ;
PMU_EVENT_ATTR_STRING ( smi , evattr_smi , " event=0x04 " ) ;
PMU_EVENT_ATTR_STRING ( ptsc , evattr_ptsc , " event=0x05 " ) ;
PMU_EVENT_ATTR_STRING ( irperf , evattr_irperf , " event=0x06 " ) ;
2015-07-20 11:49:06 -04:00
2015-08-06 17:26:58 +02:00
static struct perf_msr msr [ ] = {
2016-01-29 16:29:57 +08:00
[ PERF_MSR_TSC ] = { 0 , & evattr_tsc , NULL , } ,
[ PERF_MSR_APERF ] = { MSR_IA32_APERF , & evattr_aperf , test_aperfmperf , } ,
[ PERF_MSR_MPERF ] = { MSR_IA32_MPERF , & evattr_mperf , test_aperfmperf , } ,
[ PERF_MSR_PPERF ] = { MSR_PPERF , & evattr_pperf , test_intel , } ,
[ PERF_MSR_SMI ] = { MSR_SMI_COUNT , & evattr_smi , test_intel , } ,
2016-01-29 16:29:56 +08:00
[ PERF_MSR_PTSC ] = { MSR_F15H_PTSC , & evattr_ptsc , test_ptsc , } ,
2016-01-29 16:29:57 +08:00
[ PERF_MSR_IRPERF ] = { MSR_F17H_IRPERF , & evattr_irperf , test_irperf , } ,
2015-08-06 17:26:58 +02:00
} ;
2015-07-20 11:49:06 -04:00
static struct attribute * events_attrs [ PERF_MSR_EVENT_MAX + 1 ] = {
2015-08-06 17:26:58 +02:00
NULL ,
2015-07-20 11:49:06 -04:00
} ;
static struct attribute_group events_attr_group = {
. name = " events " ,
. attrs = events_attrs ,
} ;
PMU_FORMAT_ATTR ( event , " config:0-63 " ) ;
static struct attribute * format_attrs [ ] = {
& format_attr_event . attr ,
NULL ,
} ;
static struct attribute_group format_attr_group = {
. name = " format " ,
. attrs = format_attrs ,
} ;
static const struct attribute_group * attr_groups [ ] = {
& events_attr_group ,
& format_attr_group ,
NULL ,
} ;
static int msr_event_init ( struct perf_event * event )
{
u64 cfg = event - > attr . config ;
if ( event - > attr . type ! = event - > pmu - > type )
return - ENOENT ;
if ( cfg > = PERF_MSR_EVENT_MAX )
return - EINVAL ;
/* unsupported modes and filters */
if ( event - > attr . exclude_user | |
event - > attr . exclude_kernel | |
event - > attr . exclude_hv | |
event - > attr . exclude_idle | |
event - > attr . exclude_host | |
event - > attr . exclude_guest | |
event - > attr . sample_period ) /* no sampling */
return - EINVAL ;
2015-08-06 17:26:58 +02:00
if ( ! msr [ cfg ] . attr )
return - EINVAL ;
2015-07-20 11:49:06 -04:00
event - > hw . idx = - 1 ;
event - > hw . event_base = msr [ cfg ] . msr ;
event - > hw . config = cfg ;
return 0 ;
}
static inline u64 msr_read_counter ( struct perf_event * event )
{
u64 now ;
if ( event - > hw . event_base )
rdmsrl ( event - > hw . event_base , now ) ;
else
2015-08-21 08:14:46 +02:00
rdtscll ( now ) ;
2015-07-20 11:49:06 -04:00
return now ;
}
static void msr_event_update ( struct perf_event * event )
{
u64 prev , now ;
s64 delta ;
/* Careful, an NMI might modify the previous event value. */
again :
prev = local64_read ( & event - > hw . prev_count ) ;
now = msr_read_counter ( event ) ;
if ( local64_cmpxchg ( & event - > hw . prev_count , prev , now ) ! = prev )
goto again ;
delta = now - prev ;
2015-11-06 16:31:08 -08:00
if ( unlikely ( event - > hw . event_base = = MSR_SMI_COUNT ) )
delta = sign_extend64 ( delta , 31 ) ;
2016-05-10 14:16:54 +02:00
local64_add ( delta , & event - > count ) ;
2015-07-20 11:49:06 -04:00
}
static void msr_event_start ( struct perf_event * event , int flags )
{
u64 now ;
now = msr_read_counter ( event ) ;
local64_set ( & event - > hw . prev_count , now ) ;
}
static void msr_event_stop ( struct perf_event * event , int flags )
{
msr_event_update ( event ) ;
}
static void msr_event_del ( struct perf_event * event , int flags )
{
msr_event_stop ( event , PERF_EF_UPDATE ) ;
}
static int msr_event_add ( struct perf_event * event , int flags )
{
if ( flags & PERF_EF_START )
msr_event_start ( event , flags ) ;
return 0 ;
}
static struct pmu pmu_msr = {
. task_ctx_nr = perf_sw_context ,
. attr_groups = attr_groups ,
. event_init = msr_event_init ,
. add = msr_event_add ,
. del = msr_event_del ,
. start = msr_event_start ,
. stop = msr_event_stop ,
. read = msr_event_update ,
. capabilities = PERF_PMU_CAP_NO_INTERRUPT ,
} ;
static int __init msr_init ( void )
{
2015-08-06 17:26:58 +02:00
int i , j = 0 ;
2015-07-20 11:49:06 -04:00
2015-08-06 17:26:58 +02:00
if ( ! boot_cpu_has ( X86_FEATURE_TSC ) ) {
pr_cont ( " no MSR PMU driver. \n " ) ;
return 0 ;
2015-07-20 11:49:06 -04:00
}
2015-08-06 17:26:58 +02:00
/* Probe the MSRs. */
for ( i = PERF_MSR_TSC + 1 ; i < PERF_MSR_EVENT_MAX ; i + + ) {
u64 val ;
2015-07-20 11:49:06 -04:00
2015-08-06 17:26:58 +02:00
/*
* Virt sucks arse ; you cannot tell if a R / O MSR is present : /
*/
if ( ! msr [ i ] . test ( i ) | | rdmsrl_safe ( msr [ i ] . msr , & val ) )
msr [ i ] . attr = NULL ;
2015-07-20 11:49:06 -04:00
}
2015-08-06 17:26:58 +02:00
/* List remaining MSRs in the sysfs attrs. */
for ( i = 0 ; i < PERF_MSR_EVENT_MAX ; i + + ) {
if ( msr [ i ] . attr )
events_attrs [ j + + ] = & msr [ i ] . attr - > attr . attr ;
2015-07-20 11:49:06 -04:00
}
2015-08-06 17:26:58 +02:00
events_attrs [ j ] = NULL ;
2015-07-20 11:49:06 -04:00
perf_pmu_register ( & pmu_msr , " msr " , - 1 ) ;
return 0 ;
}
device_initcall ( msr_init ) ;