2016-07-13 20:19:01 -04:00
# include <linux/module.h>
2016-03-20 01:33:36 -07:00
# include <asm/cpu_device_id.h>
2016-06-02 17:19:42 -07:00
# include <asm/intel-family.h>
2016-02-10 10:55:15 +01:00
# include "uncore.h"
2012-06-15 14:31:34 +08:00
static struct intel_uncore_type * empty_uncore [ ] = { NULL , } ;
2014-07-30 15:22:12 +08:00
struct intel_uncore_type * * uncore_msr_uncores = empty_uncore ;
struct intel_uncore_type * * uncore_pci_uncores = empty_uncore ;
2012-06-15 14:31:36 +08:00
2014-07-30 15:22:12 +08:00
static bool pcidrv_registered ;
struct pci_driver * uncore_pci_driver ;
/* pci bus to socket mapping */
2015-09-24 21:10:21 +09:00
DEFINE_RAW_SPINLOCK ( pci2phy_map_lock ) ;
struct list_head pci2phy_map_head = LIST_HEAD_INIT ( pci2phy_map_head ) ;
2016-02-22 22:19:16 +00:00
struct pci_extra_dev * uncore_extra_pci_dev ;
static int max_packages ;
2013-08-07 14:17:23 +08:00
2012-06-15 14:31:34 +08:00
/* mask of cpus that collect uncore events */
static cpumask_t uncore_cpu_mask ;
/* constraint for the fixed counter */
2014-07-30 15:22:12 +08:00
static struct event_constraint uncore_constraint_fixed =
2012-06-15 14:31:34 +08:00
EVENT_CONSTRAINT ( ~ 0ULL , 1 < < UNCORE_PMC_IDX_FIXED , ~ 0ULL ) ;
2014-07-30 15:22:12 +08:00
struct event_constraint uncore_constraint_empty =
2012-07-04 14:00:15 +08:00
EVENT_CONSTRAINT ( 0 , 0 , 0 ) ;
2012-06-15 14:31:34 +08:00
2016-03-20 01:33:36 -07:00
MODULE_LICENSE ( " GPL " ) ;
2016-02-22 22:19:13 +00:00
static int uncore_pcibus_to_physid ( struct pci_bus * bus )
2015-09-24 21:10:21 +09:00
{
struct pci2phy_map * map ;
int phys_id = - 1 ;
raw_spin_lock ( & pci2phy_map_lock ) ;
list_for_each_entry ( map , & pci2phy_map_head , list ) {
if ( map - > segment = = pci_domain_nr ( bus ) ) {
phys_id = map - > pbus_to_physid [ bus - > number ] ;
break ;
}
}
raw_spin_unlock ( & pci2phy_map_lock ) ;
return phys_id ;
}
2016-02-22 22:19:09 +00:00
static void uncore_free_pcibus_map ( void )
{
struct pci2phy_map * map , * tmp ;
list_for_each_entry_safe ( map , tmp , & pci2phy_map_head , list ) {
list_del ( & map - > list ) ;
kfree ( map ) ;
}
}
2015-09-24 21:10:21 +09:00
struct pci2phy_map * __find_pci2phy_map ( int segment )
{
struct pci2phy_map * map , * alloc = NULL ;
int i ;
lockdep_assert_held ( & pci2phy_map_lock ) ;
lookup :
list_for_each_entry ( map , & pci2phy_map_head , list ) {
if ( map - > segment = = segment )
goto end ;
}
if ( ! alloc ) {
raw_spin_unlock ( & pci2phy_map_lock ) ;
alloc = kmalloc ( sizeof ( struct pci2phy_map ) , GFP_KERNEL ) ;
raw_spin_lock ( & pci2phy_map_lock ) ;
if ( ! alloc )
return NULL ;
goto lookup ;
}
map = alloc ;
alloc = NULL ;
map - > segment = segment ;
for ( i = 0 ; i < 256 ; i + + )
map - > pbus_to_physid [ i ] = - 1 ;
list_add_tail ( & map - > list , & pci2phy_map_head ) ;
end :
kfree ( alloc ) ;
return map ;
}
2014-07-30 15:22:12 +08:00
ssize_t uncore_event_show ( struct kobject * kobj ,
struct kobj_attribute * attr , char * buf )
{
struct uncore_event_desc * event =
container_of ( attr , struct uncore_event_desc , attr ) ;
return sprintf ( buf , " %s " , event - > config ) ;
}
struct intel_uncore_box * uncore_pmu_to_box ( struct intel_uncore_pmu * pmu , int cpu )
2014-02-11 16:20:11 +01:00
{
2016-02-22 22:19:16 +00:00
return pmu - > boxes [ topology_logical_package_id ( cpu ) ] ;
2014-02-11 16:20:11 +01:00
}
2014-07-30 15:22:12 +08:00
u64 uncore_msr_read_counter ( struct intel_uncore_box * box , struct perf_event * event )
2012-07-05 14:32:17 +08:00
{
u64 count ;
rdmsrl ( event - > hw . event_base , count ) ;
return count ;
}
/*
* generic get constraint function for shared match / mask registers .
*/
2014-07-30 15:22:12 +08:00
struct event_constraint *
2012-07-05 14:32:17 +08:00
uncore_get_constraint ( struct intel_uncore_box * box , struct perf_event * event )
{
struct intel_uncore_extra_reg * er ;
struct hw_perf_event_extra * reg1 = & event - > hw . extra_reg ;
struct hw_perf_event_extra * reg2 = & event - > hw . branch_reg ;
unsigned long flags ;
bool ok = false ;
/*
* reg - > alloc can be set due to existing state , so for fake box we
* need to ignore this , otherwise we might fail to allocate proper
* fake state for this extra reg constraint .
*/
if ( reg1 - > idx = = EXTRA_REG_NONE | |
( ! uncore_box_is_fake ( box ) & & reg1 - > alloc ) )
return NULL ;
er = & box - > shared_regs [ reg1 - > idx ] ;
raw_spin_lock_irqsave ( & er - > lock , flags ) ;
if ( ! atomic_read ( & er - > ref ) | |
( er - > config1 = = reg1 - > config & & er - > config2 = = reg2 - > config ) ) {
atomic_inc ( & er - > ref ) ;
er - > config1 = reg1 - > config ;
er - > config2 = reg2 - > config ;
ok = true ;
}
raw_spin_unlock_irqrestore ( & er - > lock , flags ) ;
if ( ok ) {
if ( ! uncore_box_is_fake ( box ) )
reg1 - > alloc = 1 ;
return NULL ;
}
2014-07-30 15:22:12 +08:00
return & uncore_constraint_empty ;
2012-07-05 14:32:17 +08:00
}
2014-07-30 15:22:12 +08:00
void uncore_put_constraint ( struct intel_uncore_box * box , struct perf_event * event )
2012-07-05 14:32:17 +08:00
{
struct intel_uncore_extra_reg * er ;
struct hw_perf_event_extra * reg1 = & event - > hw . extra_reg ;
/*
* Only put constraint if extra reg was actually allocated . Also
* takes care of event which do not use an extra shared reg .
*
* Also , if this is a fake box we shouldn ' t touch any event state
* ( reg - > alloc ) and we don ' t care about leaving inconsistent box
* state either since it will be thrown out .
*/
if ( uncore_box_is_fake ( box ) | | ! reg1 - > alloc )
return ;
er = & box - > shared_regs [ reg1 - > idx ] ;
atomic_dec ( & er - > ref ) ;
reg1 - > alloc = 0 ;
}
2014-07-30 15:22:12 +08:00
u64 uncore_shared_reg_config ( struct intel_uncore_box * box , int idx )
2013-04-16 19:51:06 +08:00
{
struct intel_uncore_extra_reg * er ;
unsigned long flags ;
u64 config ;
er = & box - > shared_regs [ idx ] ;
raw_spin_lock_irqsave ( & er - > lock , flags ) ;
config = er - > config ;
raw_spin_unlock_irqrestore ( & er - > lock , flags ) ;
return config ;
}
2016-02-22 22:19:12 +00:00
static void uncore_assign_hw_event ( struct intel_uncore_box * box ,
struct perf_event * event , int idx )
2012-06-15 14:31:34 +08:00
{
struct hw_perf_event * hwc = & event - > hw ;
hwc - > idx = idx ;
hwc - > last_tag = + + box - > tags [ idx ] ;
if ( hwc - > idx = = UNCORE_PMC_IDX_FIXED ) {
2012-06-15 14:31:36 +08:00
hwc - > event_base = uncore_fixed_ctr ( box ) ;
hwc - > config_base = uncore_fixed_ctl ( box ) ;
2012-06-15 14:31:34 +08:00
return ;
}
2012-06-15 14:31:36 +08:00
hwc - > config_base = uncore_event_ctl ( box , hwc - > idx ) ;
hwc - > event_base = uncore_perf_ctr ( box , hwc - > idx ) ;
2012-06-15 14:31:34 +08:00
}
2014-07-30 15:22:12 +08:00
void uncore_perf_event_update ( struct intel_uncore_box * box , struct perf_event * event )
2012-06-15 14:31:34 +08:00
{
u64 prev_count , new_count , delta ;
int shift ;
if ( event - > hw . idx > = UNCORE_PMC_IDX_FIXED )
shift = 64 - uncore_fixed_ctr_bits ( box ) ;
else
shift = 64 - uncore_perf_ctr_bits ( box ) ;
/* the hrtimer might modify the previous event value */
again :
prev_count = local64_read ( & event - > hw . prev_count ) ;
new_count = uncore_read_counter ( box , event ) ;
if ( local64_xchg ( & event - > hw . prev_count , new_count ) ! = prev_count )
goto again ;
delta = ( new_count < < shift ) - ( prev_count < < shift ) ;
delta > > = shift ;
local64_add ( delta , & event - > count ) ;
}
/*
* The overflow interrupt is unavailable for SandyBridge - EP , is broken
* for SandyBridge . So we use hrtimer to periodically poll the counter
* to avoid overflow .
*/
static enum hrtimer_restart uncore_pmu_hrtimer ( struct hrtimer * hrtimer )
{
struct intel_uncore_box * box ;
2014-02-11 16:20:13 +01:00
struct perf_event * event ;
2012-06-15 14:31:34 +08:00
unsigned long flags ;
int bit ;
box = container_of ( hrtimer , struct intel_uncore_box , hrtimer ) ;
if ( ! box - > n_active | | box - > cpu ! = smp_processor_id ( ) )
return HRTIMER_NORESTART ;
/*
* disable local interrupt to prevent uncore_pmu_event_start / stop
* to interrupt the update process
*/
local_irq_save ( flags ) ;
2014-02-11 16:20:13 +01:00
/*
* handle boxes with an active event list as opposed to active
* counters
*/
list_for_each_entry ( event , & box - > active_list , active_entry ) {
uncore_perf_event_update ( box , event ) ;
}
2012-06-15 14:31:34 +08:00
for_each_set_bit ( bit , box - > active_mask , UNCORE_PMC_IDX_MAX )
uncore_perf_event_update ( box , box - > events [ bit ] ) ;
local_irq_restore ( flags ) ;
2014-02-11 16:20:10 +01:00
hrtimer_forward_now ( hrtimer , ns_to_ktime ( box - > hrtimer_duration ) ) ;
2012-06-15 14:31:34 +08:00
return HRTIMER_RESTART ;
}
2014-07-30 15:22:12 +08:00
void uncore_pmu_start_hrtimer ( struct intel_uncore_box * box )
2012-06-15 14:31:34 +08:00
{
2015-04-14 21:09:01 +00:00
hrtimer_start ( & box - > hrtimer , ns_to_ktime ( box - > hrtimer_duration ) ,
HRTIMER_MODE_REL_PINNED ) ;
2012-06-15 14:31:34 +08:00
}
2014-07-30 15:22:12 +08:00
void uncore_pmu_cancel_hrtimer ( struct intel_uncore_box * box )
2012-06-15 14:31:34 +08:00
{
hrtimer_cancel ( & box - > hrtimer ) ;
}
static void uncore_pmu_init_hrtimer ( struct intel_uncore_box * box )
{
hrtimer_init ( & box - > hrtimer , CLOCK_MONOTONIC , HRTIMER_MODE_REL ) ;
box - > hrtimer . function = uncore_pmu_hrtimer ;
}
2016-02-22 22:19:12 +00:00
static struct intel_uncore_box * uncore_alloc_box ( struct intel_uncore_type * type ,
int node )
2012-06-15 14:31:34 +08:00
{
2016-02-22 22:19:12 +00:00
int i , size , numshared = type - > num_shared_regs ;
2012-06-15 14:31:34 +08:00
struct intel_uncore_box * box ;
2016-02-22 22:19:12 +00:00
size = sizeof ( * box ) + numshared * sizeof ( struct intel_uncore_extra_reg ) ;
2012-07-04 14:00:15 +08:00
2013-09-17 14:48:13 +08:00
box = kzalloc_node ( size , GFP_KERNEL , node ) ;
2012-06-15 14:31:34 +08:00
if ( ! box )
return NULL ;
2016-02-22 22:19:12 +00:00
for ( i = 0 ; i < numshared ; i + + )
2012-07-04 14:00:15 +08:00
raw_spin_lock_init ( & box - > shared_regs [ i ] . lock ) ;
2012-06-15 14:31:34 +08:00
uncore_pmu_init_hrtimer ( box ) ;
box - > cpu = - 1 ;
2016-02-22 22:19:16 +00:00
box - > pci_phys_id = - 1 ;
box - > pkgid = - 1 ;
2012-06-15 14:31:34 +08:00
2014-02-11 16:20:10 +01:00
/* set default hrtimer timeout */
box - > hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL ;
2012-06-15 14:31:34 +08:00
2014-02-11 16:20:13 +01:00
INIT_LIST_HEAD ( & box - > active_list ) ;
2012-06-15 14:31:36 +08:00
2012-06-15 14:31:34 +08:00
return box ;
}
2014-12-10 21:23:50 +01:00
/*
* Using uncore_pmu_event_init pmu event_init callback
* as a detection point for uncore events .
*/
static int uncore_pmu_event_init ( struct perf_event * event ) ;
2016-11-18 13:53:54 +01:00
static bool is_box_event ( struct intel_uncore_box * box , struct perf_event * event )
2014-12-10 21:23:50 +01:00
{
2016-11-18 13:53:54 +01:00
return & box - > pmu - > pmu = = event - > pmu ;
2014-12-10 21:23:50 +01:00
}
2012-07-05 14:32:17 +08:00
static int
2016-02-22 22:19:12 +00:00
uncore_collect_events ( struct intel_uncore_box * box , struct perf_event * leader ,
bool dogrp )
2012-06-15 14:31:34 +08:00
{
struct perf_event * event ;
int n , max_count ;
max_count = box - > pmu - > type - > num_counters ;
if ( box - > pmu - > type - > fixed_ctl )
max_count + + ;
if ( box - > n_events > = max_count )
return - EINVAL ;
n = box - > n_events ;
2014-12-10 21:23:50 +01:00
2016-11-18 13:53:54 +01:00
if ( is_box_event ( box , leader ) ) {
2014-12-10 21:23:50 +01:00
box - > event_list [ n ] = leader ;
n + + ;
}
2012-06-15 14:31:34 +08:00
if ( ! dogrp )
return n ;
list_for_each_entry ( event , & leader - > sibling_list , group_entry ) {
2016-11-18 13:53:54 +01:00
if ( ! is_box_event ( box , event ) | |
2014-12-10 21:23:50 +01:00
event - > state < = PERF_EVENT_STATE_OFF )
2012-06-15 14:31:34 +08:00
continue ;
if ( n > = max_count )
return - EINVAL ;
box - > event_list [ n ] = event ;
n + + ;
}
return n ;
}
static struct event_constraint *
2012-07-05 14:32:17 +08:00
uncore_get_event_constraint ( struct intel_uncore_box * box , struct perf_event * event )
2012-06-15 14:31:34 +08:00
{
2012-07-04 14:00:15 +08:00
struct intel_uncore_type * type = box - > pmu - > type ;
2012-06-15 14:31:34 +08:00
struct event_constraint * c ;
2012-07-04 14:00:15 +08:00
if ( type - > ops - > get_constraint ) {
c = type - > ops - > get_constraint ( box , event ) ;
if ( c )
return c ;
}
2013-09-09 12:53:50 -07:00
if ( event - > attr . config = = UNCORE_FIXED_EVENT )
2014-07-30 15:22:12 +08:00
return & uncore_constraint_fixed ;
2012-06-15 14:31:34 +08:00
if ( type - > constraints ) {
for_each_event_constraint ( c , type - > constraints ) {
if ( ( event - > hw . config & c - > cmask ) = = c - > code )
return c ;
}
}
return & type - > unconstrainted ;
}
2016-02-22 22:19:12 +00:00
static void uncore_put_event_constraint ( struct intel_uncore_box * box ,
struct perf_event * event )
2012-07-04 14:00:15 +08:00
{
if ( box - > pmu - > type - > ops - > put_constraint )
box - > pmu - > type - > ops - > put_constraint ( box , event ) ;
}
2012-07-05 14:32:17 +08:00
static int uncore_assign_events ( struct intel_uncore_box * box , int assign [ ] , int n )
2012-06-15 14:31:34 +08:00
{
unsigned long used_mask [ BITS_TO_LONGS ( UNCORE_PMC_IDX_MAX ) ] ;
2013-05-23 11:07:03 -07:00
struct event_constraint * c ;
2012-07-04 14:00:15 +08:00
int i , wmin , wmax , ret = 0 ;
2012-06-15 14:31:34 +08:00
struct hw_perf_event * hwc ;
bitmap_zero ( used_mask , UNCORE_PMC_IDX_MAX ) ;
for ( i = 0 , wmin = UNCORE_PMC_IDX_MAX , wmax = 0 ; i < n ; i + + ) {
2012-07-04 14:00:15 +08:00
c = uncore_get_event_constraint ( box , box - > event_list [ i ] ) ;
2015-05-21 10:57:13 +02:00
box - > event_constraint [ i ] = c ;
2012-06-15 14:31:34 +08:00
wmin = min ( wmin , c - > weight ) ;
wmax = max ( wmax , c - > weight ) ;
}
/* fastpath, try to reuse previous register */
for ( i = 0 ; i < n ; i + + ) {
hwc = & box - > event_list [ i ] - > hw ;
2015-05-21 10:57:13 +02:00
c = box - > event_constraint [ i ] ;
2012-06-15 14:31:34 +08:00
/* never assigned */
if ( hwc - > idx = = - 1 )
break ;
/* constraint still honored */
if ( ! test_bit ( hwc - > idx , c - > idxmsk ) )
break ;
/* not already used */
if ( test_bit ( hwc - > idx , used_mask ) )
break ;
__set_bit ( hwc - > idx , used_mask ) ;
2012-07-04 14:00:15 +08:00
if ( assign )
assign [ i ] = hwc - > idx ;
2012-06-15 14:31:34 +08:00
}
/* slow path */
2012-07-04 14:00:15 +08:00
if ( i ! = n )
2015-05-21 10:57:13 +02:00
ret = perf_assign_events ( box - > event_constraint , n ,
2015-05-21 10:57:17 +02:00
wmin , wmax , n , assign ) ;
2012-07-04 14:00:15 +08:00
if ( ! assign | | ret ) {
for ( i = 0 ; i < n ; i + + )
uncore_put_event_constraint ( box , box - > event_list [ i ] ) ;
}
2012-06-15 14:31:34 +08:00
return ret ? - EINVAL : 0 ;
}
static void uncore_pmu_event_start ( struct perf_event * event , int flags )
{
struct intel_uncore_box * box = uncore_event_to_box ( event ) ;
int idx = event - > hw . idx ;
if ( WARN_ON_ONCE ( ! ( event - > hw . state & PERF_HES_STOPPED ) ) )
return ;
if ( WARN_ON_ONCE ( idx = = - 1 | | idx > = UNCORE_PMC_IDX_MAX ) )
return ;
event - > hw . state = 0 ;
box - > events [ idx ] = event ;
box - > n_active + + ;
__set_bit ( idx , box - > active_mask ) ;
local64_set ( & event - > hw . prev_count , uncore_read_counter ( box , event ) ) ;
uncore_enable_event ( box , event ) ;
if ( box - > n_active = = 1 ) {
uncore_enable_box ( box ) ;
uncore_pmu_start_hrtimer ( box ) ;
}
}
static void uncore_pmu_event_stop ( struct perf_event * event , int flags )
{
struct intel_uncore_box * box = uncore_event_to_box ( event ) ;
struct hw_perf_event * hwc = & event - > hw ;
if ( __test_and_clear_bit ( hwc - > idx , box - > active_mask ) ) {
uncore_disable_event ( box , event ) ;
box - > n_active - - ;
box - > events [ hwc - > idx ] = NULL ;
WARN_ON_ONCE ( hwc - > state & PERF_HES_STOPPED ) ;
hwc - > state | = PERF_HES_STOPPED ;
if ( box - > n_active = = 0 ) {
uncore_disable_box ( box ) ;
uncore_pmu_cancel_hrtimer ( box ) ;
}
}
if ( ( flags & PERF_EF_UPDATE ) & & ! ( hwc - > state & PERF_HES_UPTODATE ) ) {
/*
* Drain the remaining delta count out of a event
* that we are disabling :
*/
uncore_perf_event_update ( box , event ) ;
hwc - > state | = PERF_HES_UPTODATE ;
}
}
static int uncore_pmu_event_add ( struct perf_event * event , int flags )
{
struct intel_uncore_box * box = uncore_event_to_box ( event ) ;
struct hw_perf_event * hwc = & event - > hw ;
int assign [ UNCORE_PMC_IDX_MAX ] ;
int i , n , ret ;
if ( ! box )
return - ENODEV ;
ret = n = uncore_collect_events ( box , event , false ) ;
if ( ret < 0 )
return ret ;
hwc - > state = PERF_HES_UPTODATE | PERF_HES_STOPPED ;
if ( ! ( flags & PERF_EF_START ) )
hwc - > state | = PERF_HES_ARCH ;
ret = uncore_assign_events ( box , assign , n ) ;
if ( ret )
return ret ;
/* save events moving to new counters */
for ( i = 0 ; i < box - > n_events ; i + + ) {
event = box - > event_list [ i ] ;
hwc = & event - > hw ;
if ( hwc - > idx = = assign [ i ] & &
hwc - > last_tag = = box - > tags [ assign [ i ] ] )
continue ;
/*
* Ensure we don ' t accidentally enable a stopped
* counter simply because we rescheduled .
*/
if ( hwc - > state & PERF_HES_STOPPED )
hwc - > state | = PERF_HES_ARCH ;
uncore_pmu_event_stop ( event , PERF_EF_UPDATE ) ;
}
/* reprogram moved events into new counters */
for ( i = 0 ; i < n ; i + + ) {
event = box - > event_list [ i ] ;
hwc = & event - > hw ;
if ( hwc - > idx ! = assign [ i ] | |
hwc - > last_tag ! = box - > tags [ assign [ i ] ] )
uncore_assign_hw_event ( box , event , assign [ i ] ) ;
else if ( i < box - > n_events )
continue ;
if ( hwc - > state & PERF_HES_ARCH )
continue ;
uncore_pmu_event_start ( event , 0 ) ;
}
box - > n_events = n ;
return 0 ;
}
static void uncore_pmu_event_del ( struct perf_event * event , int flags )
{
struct intel_uncore_box * box = uncore_event_to_box ( event ) ;
int i ;
uncore_pmu_event_stop ( event , PERF_EF_UPDATE ) ;
for ( i = 0 ; i < box - > n_events ; i + + ) {
if ( event = = box - > event_list [ i ] ) {
2012-07-04 14:00:15 +08:00
uncore_put_event_constraint ( box , event ) ;
2016-02-22 22:19:12 +00:00
for ( + + i ; i < box - > n_events ; i + + )
2012-06-15 14:31:34 +08:00
box - > event_list [ i - 1 ] = box - > event_list [ i ] ;
- - box - > n_events ;
break ;
}
}
event - > hw . idx = - 1 ;
event - > hw . last_tag = ~ 0ULL ;
}
2014-07-30 15:22:12 +08:00
void uncore_pmu_event_read ( struct perf_event * event )
2012-06-15 14:31:34 +08:00
{
struct intel_uncore_box * box = uncore_event_to_box ( event ) ;
uncore_perf_event_update ( box , event ) ;
}
/*
* validation ensures the group can be loaded onto the
* PMU if it was the only group available .
*/
static int uncore_validate_group ( struct intel_uncore_pmu * pmu ,
struct perf_event * event )
{
struct perf_event * leader = event - > group_leader ;
struct intel_uncore_box * fake_box ;
int ret = - EINVAL , n ;
2013-09-17 14:48:13 +08:00
fake_box = uncore_alloc_box ( pmu - > type , NUMA_NO_NODE ) ;
2012-06-15 14:31:34 +08:00
if ( ! fake_box )
return - ENOMEM ;
fake_box - > pmu = pmu ;
/*
* the event is not yet connected with its
* siblings therefore we must first collect
* existing siblings , then add the new event
* before we can simulate the scheduling
*/
n = uncore_collect_events ( fake_box , leader , true ) ;
if ( n < 0 )
goto out ;
fake_box - > n_events = n ;
n = uncore_collect_events ( fake_box , event , false ) ;
if ( n < 0 )
goto out ;
fake_box - > n_events = n ;
2012-07-04 14:00:15 +08:00
ret = uncore_assign_events ( fake_box , NULL , n ) ;
2012-06-15 14:31:34 +08:00
out :
kfree ( fake_box ) ;
return ret ;
}
2013-04-16 19:51:06 +08:00
static int uncore_pmu_event_init ( struct perf_event * event )
2012-06-15 14:31:34 +08:00
{
struct intel_uncore_pmu * pmu ;
struct intel_uncore_box * box ;
struct hw_perf_event * hwc = & event - > hw ;
int ret ;
if ( event - > attr . type ! = event - > pmu - > type )
return - ENOENT ;
pmu = uncore_event_to_pmu ( event ) ;
/* no device found for this pmu */
if ( pmu - > func_id < 0 )
return - ENOENT ;
/*
* Uncore PMU does measure at all privilege level all the time .
* So it doesn ' t make sense to specify any exclude bits .
*/
if ( event - > attr . exclude_user | | event - > attr . exclude_kernel | |
event - > attr . exclude_hv | | event - > attr . exclude_idle )
return - EINVAL ;
/* Sampling not supported yet */
if ( hwc - > sample_period )
return - EINVAL ;
/*
* Place all uncore events for a particular physical package
* onto a single cpu
*/
if ( event - > cpu < 0 )
return - EINVAL ;
box = uncore_pmu_to_box ( pmu , event - > cpu ) ;
if ( ! box | | box - > cpu < 0 )
return - EINVAL ;
event - > cpu = box - > cpu ;
2016-02-22 22:19:14 +00:00
event - > pmu_private = box ;
2012-06-15 14:31:34 +08:00
2016-08-17 13:55:07 -07:00
event - > event_caps | = PERF_EV_CAP_READ_ACTIVE_PKG ;
2012-07-04 14:00:15 +08:00
event - > hw . idx = - 1 ;
event - > hw . last_tag = ~ 0ULL ;
event - > hw . extra_reg . idx = EXTRA_REG_NONE ;
2012-08-06 13:11:21 +08:00
event - > hw . branch_reg . idx = EXTRA_REG_NONE ;
2012-07-04 14:00:15 +08:00
2012-06-15 14:31:34 +08:00
if ( event - > attr . config = = UNCORE_FIXED_EVENT ) {
/* no fixed counter */
if ( ! pmu - > type - > fixed_ctl )
return - EINVAL ;
/*
* if there is only one fixed counter , only the first pmu
* can access the fixed counter
*/
if ( pmu - > type - > single_fixed & & pmu - > pmu_idx > 0 )
return - EINVAL ;
2013-09-09 12:53:50 -07:00
/* fixed counters have event field hardcoded to zero */
hwc - > config = 0ULL ;
2012-06-15 14:31:34 +08:00
} else {
2016-08-16 16:09:50 -04:00
hwc - > config = event - > attr . config &
( pmu - > type - > event_mask | ( ( u64 ) pmu - > type - > event_mask_ext < < 32 ) ) ;
2012-07-04 14:00:15 +08:00
if ( pmu - > type - > ops - > hw_config ) {
ret = pmu - > type - > ops - > hw_config ( box , event ) ;
if ( ret )
return ret ;
}
2012-06-15 14:31:34 +08:00
}
if ( event - > group_leader ! = event )
ret = uncore_validate_group ( pmu , event ) ;
else
ret = 0 ;
return ret ;
}
2012-09-10 15:53:49 +08:00
static ssize_t uncore_get_attr_cpumask ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2014-09-30 14:48:22 +01:00
return cpumap_print_to_pagebuf ( true , buf , & uncore_cpu_mask ) ;
2012-09-10 15:53:49 +08:00
}
static DEVICE_ATTR ( cpumask , S_IRUGO , uncore_get_attr_cpumask , NULL ) ;
static struct attribute * uncore_pmu_attrs [ ] = {
& dev_attr_cpumask . attr ,
NULL ,
} ;
static struct attribute_group uncore_pmu_attr_group = {
. attrs = uncore_pmu_attrs ,
} ;
2014-08-29 10:20:58 -07:00
static int uncore_pmu_register ( struct intel_uncore_pmu * pmu )
2012-06-15 14:31:34 +08:00
{
int ret ;
2014-02-11 16:20:08 +01:00
if ( ! pmu - > type - > pmu ) {
pmu - > pmu = ( struct pmu ) {
. attr_groups = pmu - > type - > attr_groups ,
. task_ctx_nr = perf_invalid_context ,
. event_init = uncore_pmu_event_init ,
. add = uncore_pmu_event_add ,
. del = uncore_pmu_event_del ,
. start = uncore_pmu_event_start ,
. stop = uncore_pmu_event_stop ,
. read = uncore_pmu_event_read ,
2016-12-22 17:17:40 -08:00
. module = THIS_MODULE ,
2014-02-11 16:20:08 +01:00
} ;
} else {
pmu - > pmu = * pmu - > type - > pmu ;
pmu - > pmu . attr_groups = pmu - > type - > attr_groups ;
}
2012-06-15 14:31:34 +08:00
if ( pmu - > type - > num_boxes = = 1 ) {
if ( strlen ( pmu - > type - > name ) > 0 )
sprintf ( pmu - > name , " uncore_%s " , pmu - > type - > name ) ;
else
sprintf ( pmu - > name , " uncore " ) ;
} else {
sprintf ( pmu - > name , " uncore_%s_%d " , pmu - > type - > name ,
pmu - > pmu_idx ) ;
}
ret = perf_pmu_register ( & pmu - > pmu , pmu - > name , - 1 ) ;
2016-02-22 22:19:09 +00:00
if ( ! ret )
pmu - > registered = true ;
2012-06-15 14:31:34 +08:00
return ret ;
}
2016-02-22 22:19:09 +00:00
static void uncore_pmu_unregister ( struct intel_uncore_pmu * pmu )
{
if ( ! pmu - > registered )
return ;
perf_pmu_unregister ( & pmu - > pmu ) ;
pmu - > registered = false ;
}
2016-02-22 22:19:16 +00:00
static void uncore_free_boxes ( struct intel_uncore_pmu * pmu )
{
int pkg ;
for ( pkg = 0 ; pkg < max_packages ; pkg + + )
kfree ( pmu - > boxes [ pkg ] ) ;
kfree ( pmu - > boxes ) ;
}
2016-03-20 01:33:36 -07:00
static void uncore_type_exit ( struct intel_uncore_type * type )
2012-06-15 14:31:34 +08:00
{
2016-02-22 22:19:16 +00:00
struct intel_uncore_pmu * pmu = type - > pmus ;
2012-06-15 14:31:34 +08:00
int i ;
2016-02-22 22:19:16 +00:00
if ( pmu ) {
for ( i = 0 ; i < type - > num_boxes ; i + + , pmu + + ) {
uncore_pmu_unregister ( pmu ) ;
uncore_free_boxes ( pmu ) ;
2016-02-22 22:19:09 +00:00
}
2016-02-22 22:19:09 +00:00
kfree ( type - > pmus ) ;
type - > pmus = NULL ;
}
2012-09-10 15:53:49 +08:00
kfree ( type - > events_group ) ;
type - > events_group = NULL ;
2012-06-15 14:31:34 +08:00
}
2016-03-20 01:33:36 -07:00
static void uncore_types_exit ( struct intel_uncore_type * * types )
2012-06-15 14:31:36 +08:00
{
2016-02-22 22:19:12 +00:00
for ( ; * types ; types + + )
uncore_type_exit ( * types ) ;
2012-06-15 14:31:36 +08:00
}
2016-02-22 22:19:16 +00:00
static int __init uncore_type_init ( struct intel_uncore_type * type , bool setid )
2012-06-15 14:31:34 +08:00
{
struct intel_uncore_pmu * pmus ;
2013-04-30 12:02:33 +02:00
struct attribute_group * attr_group ;
2012-06-15 14:31:34 +08:00
struct attribute * * attrs ;
2016-02-22 22:19:16 +00:00
size_t size ;
2012-06-15 14:31:34 +08:00
int i , j ;
pmus = kzalloc ( sizeof ( * pmus ) * type - > num_boxes , GFP_KERNEL ) ;
if ( ! pmus )
return - ENOMEM ;
2016-02-22 22:19:16 +00:00
size = max_packages * sizeof ( struct intel_uncore_box * ) ;
2012-06-15 14:31:34 +08:00
for ( i = 0 ; i < type - > num_boxes ; i + + ) {
2016-02-22 22:19:16 +00:00
pmus [ i ] . func_id = setid ? i : - 1 ;
pmus [ i ] . pmu_idx = i ;
pmus [ i ] . type = type ;
pmus [ i ] . boxes = kzalloc ( size , GFP_KERNEL ) ;
if ( ! pmus [ i ] . boxes )
2016-02-22 22:19:09 +00:00
return - ENOMEM ;
2012-06-15 14:31:34 +08:00
}
2016-02-22 22:19:16 +00:00
type - > pmus = pmus ;
type - > unconstrainted = ( struct event_constraint )
__EVENT_CONSTRAINT ( 0 , ( 1ULL < < type - > num_counters ) - 1 ,
0 , type - > num_counters , 0 , 0 ) ;
2012-06-15 14:31:34 +08:00
if ( type - > event_descs ) {
2016-02-22 22:19:16 +00:00
for ( i = 0 ; type - > event_descs [ i ] . attr . attr . name ; i + + ) ;
2012-06-15 14:31:34 +08:00
2013-04-30 12:02:33 +02:00
attr_group = kzalloc ( sizeof ( struct attribute * ) * ( i + 1 ) +
sizeof ( * attr_group ) , GFP_KERNEL ) ;
if ( ! attr_group )
2016-02-22 22:19:09 +00:00
return - ENOMEM ;
2012-06-15 14:31:34 +08:00
2013-04-30 12:02:33 +02:00
attrs = ( struct attribute * * ) ( attr_group + 1 ) ;
attr_group - > name = " events " ;
attr_group - > attrs = attrs ;
2012-06-15 14:31:34 +08:00
for ( j = 0 ; j < i ; j + + )
attrs [ j ] = & type - > event_descs [ j ] . attr . attr ;
2013-04-30 12:02:33 +02:00
type - > events_group = attr_group ;
2012-06-15 14:31:34 +08:00
}
2012-09-10 15:53:49 +08:00
type - > pmu_group = & uncore_pmu_attr_group ;
2012-06-15 14:31:34 +08:00
return 0 ;
}
2016-02-22 22:19:16 +00:00
static int __init
uncore_types_init ( struct intel_uncore_type * * types , bool setid )
2012-06-15 14:31:34 +08:00
{
2016-02-22 22:19:16 +00:00
int ret ;
2012-06-15 14:31:34 +08:00
2016-02-22 22:19:16 +00:00
for ( ; * types ; types + + ) {
ret = uncore_type_init ( * types , setid ) ;
2012-06-15 14:31:34 +08:00
if ( ret )
2016-02-22 22:19:09 +00:00
return ret ;
2012-06-15 14:31:34 +08:00
}
return 0 ;
}
2012-06-15 14:31:36 +08:00
/*
* add a pci uncore device
*/
2013-08-07 14:17:23 +08:00
static int uncore_pci_probe ( struct pci_dev * pdev , const struct pci_device_id * id )
2012-06-15 14:31:36 +08:00
{
2016-02-22 22:19:16 +00:00
struct intel_uncore_type * type ;
2016-05-15 23:18:24 -07:00
struct intel_uncore_pmu * pmu = NULL ;
2012-06-15 14:31:36 +08:00
struct intel_uncore_box * box ;
2016-02-22 22:19:16 +00:00
int phys_id , pkg , ret ;
2012-06-15 14:31:36 +08:00
2015-09-24 21:10:21 +09:00
phys_id = uncore_pcibus_to_physid ( pdev - > bus ) ;
2016-02-22 22:19:16 +00:00
if ( phys_id < 0 )
2012-06-15 14:31:36 +08:00
return - ENODEV ;
2016-02-22 22:19:16 +00:00
pkg = topology_phys_to_logical_pkg ( phys_id ) ;
2016-05-18 08:16:10 +02:00
if ( pkg < 0 )
2016-02-22 22:19:16 +00:00
return - EINVAL ;
2013-08-07 14:17:23 +08:00
if ( UNCORE_PCI_DEV_TYPE ( id - > driver_data ) = = UNCORE_EXTRA_PCI_DEV ) {
2014-07-30 15:22:12 +08:00
int idx = UNCORE_PCI_DEV_IDX ( id - > driver_data ) ;
2016-02-22 22:19:16 +00:00
uncore_extra_pci_dev [ pkg ] . dev [ idx ] = pdev ;
2013-08-07 14:17:23 +08:00
pci_set_drvdata ( pdev , NULL ) ;
return 0 ;
}
2014-07-30 15:22:12 +08:00
type = uncore_pci_uncores [ UNCORE_PCI_DEV_TYPE ( id - > driver_data ) ] ;
2016-05-15 23:18:24 -07:00
2012-06-15 14:31:36 +08:00
/*
2016-05-15 23:18:24 -07:00
* Some platforms , e . g . Knights Landing , use a common PCI device ID
* for multiple instances of an uncore PMU device type . We should check
* PCI slot and func to indicate the uncore box .
2012-06-15 14:31:36 +08:00
*/
2016-05-15 23:18:24 -07:00
if ( id - > driver_data & ~ 0xffff ) {
struct pci_driver * pci_drv = pdev - > driver ;
const struct pci_device_id * ids = pci_drv - > id_table ;
unsigned int devfn ;
while ( ids & & ids - > vendor ) {
if ( ( ids - > vendor = = pdev - > vendor ) & &
( ids - > device = = pdev - > device ) ) {
devfn = PCI_DEVFN ( UNCORE_PCI_DEV_DEV ( ids - > driver_data ) ,
UNCORE_PCI_DEV_FUNC ( ids - > driver_data ) ) ;
if ( devfn = = pdev - > devfn ) {
pmu = & type - > pmus [ UNCORE_PCI_DEV_IDX ( ids - > driver_data ) ] ;
break ;
}
}
ids + + ;
}
if ( pmu = = NULL )
return - ENODEV ;
} else {
/*
* for performance monitoring unit with multiple boxes ,
* each box has a different function id .
*/
pmu = & type - > pmus [ UNCORE_PCI_DEV_IDX ( id - > driver_data ) ] ;
2016-02-22 22:19:12 +00:00
}
2016-02-22 22:19:16 +00:00
if ( WARN_ON_ONCE ( pmu - > boxes [ pkg ] ! = NULL ) )
return - EINVAL ;
box = uncore_alloc_box ( type , NUMA_NO_NODE ) ;
if ( ! box )
return - ENOMEM ;
2013-08-07 14:17:23 +08:00
if ( pmu - > func_id < 0 )
pmu - > func_id = pdev - > devfn ;
else
WARN_ON_ONCE ( pmu - > func_id ! = pdev - > devfn ) ;
2012-06-15 14:31:36 +08:00
2016-02-22 22:19:16 +00:00
atomic_inc ( & box - > refcnt ) ;
box - > pci_phys_id = phys_id ;
box - > pkgid = pkg ;
2012-06-15 14:31:36 +08:00
box - > pci_dev = pdev ;
box - > pmu = pmu ;
2015-06-09 11:40:28 +02:00
uncore_box_init ( box ) ;
2012-06-15 14:31:36 +08:00
pci_set_drvdata ( pdev , box ) ;
2016-02-22 22:19:16 +00:00
pmu - > boxes [ pkg ] = box ;
if ( atomic_inc_return ( & pmu - > activeboxes ) > 1 )
2016-02-22 22:19:09 +00:00
return 0 ;
2016-02-22 22:19:16 +00:00
/* First active box registers the pmu */
2016-02-22 22:19:09 +00:00
ret = uncore_pmu_register ( pmu ) ;
if ( ret ) {
pci_set_drvdata ( pdev , NULL ) ;
2016-02-22 22:19:16 +00:00
pmu - > boxes [ pkg ] = NULL ;
2016-02-22 22:19:11 +00:00
uncore_box_exit ( box ) ;
2016-02-22 22:19:09 +00:00
kfree ( box ) ;
}
return ret ;
2012-06-15 14:31:36 +08:00
}
2012-06-20 18:39:27 +02:00
static void uncore_pci_remove ( struct pci_dev * pdev )
2012-06-15 14:31:36 +08:00
{
2016-05-31 16:25:27 -05:00
struct intel_uncore_box * box ;
2013-08-07 14:17:23 +08:00
struct intel_uncore_pmu * pmu ;
2016-02-22 22:19:16 +00:00
int i , phys_id , pkg ;
2013-08-07 14:17:23 +08:00
2015-09-24 21:10:21 +09:00
phys_id = uncore_pcibus_to_physid ( pdev - > bus ) ;
2016-02-22 22:19:16 +00:00
pkg = topology_phys_to_logical_pkg ( phys_id ) ;
2013-08-07 14:17:23 +08:00
box = pci_get_drvdata ( pdev ) ;
if ( ! box ) {
for ( i = 0 ; i < UNCORE_EXTRA_PCI_DEV_MAX ; i + + ) {
2016-02-22 22:19:16 +00:00
if ( uncore_extra_pci_dev [ pkg ] . dev [ i ] = = pdev ) {
uncore_extra_pci_dev [ pkg ] . dev [ i ] = NULL ;
2013-08-07 14:17:23 +08:00
break ;
}
}
WARN_ON_ONCE ( i > = UNCORE_EXTRA_PCI_DEV_MAX ) ;
return ;
}
2012-06-15 14:31:36 +08:00
2013-08-07 14:17:23 +08:00
pmu = box - > pmu ;
2016-02-22 22:19:16 +00:00
if ( WARN_ON_ONCE ( phys_id ! = box - > pci_phys_id ) )
2012-06-15 14:31:36 +08:00
return ;
2013-04-16 19:51:07 +08:00
pci_set_drvdata ( pdev , NULL ) ;
2016-02-22 22:19:16 +00:00
pmu - > boxes [ pkg ] = NULL ;
if ( atomic_dec_return ( & pmu - > activeboxes ) = = 0 )
uncore_pmu_unregister ( pmu ) ;
2016-02-22 22:19:11 +00:00
uncore_box_exit ( box ) ;
2012-06-15 14:31:36 +08:00
kfree ( box ) ;
}
static int __init uncore_pci_init ( void )
{
2016-02-22 22:19:16 +00:00
size_t size ;
2012-06-15 14:31:36 +08:00
int ret ;
2016-02-22 22:19:16 +00:00
size = max_packages * sizeof ( struct pci_extra_dev ) ;
uncore_extra_pci_dev = kzalloc ( size , GFP_KERNEL ) ;
if ( ! uncore_extra_pci_dev ) {
ret = - ENOMEM ;
2016-02-22 22:19:09 +00:00
goto err ;
2016-02-22 22:19:16 +00:00
}
ret = uncore_types_init ( uncore_pci_uncores , false ) ;
if ( ret )
goto errtype ;
2012-06-15 14:31:36 +08:00
uncore_pci_driver - > probe = uncore_pci_probe ;
uncore_pci_driver - > remove = uncore_pci_remove ;
ret = pci_register_driver ( uncore_pci_driver ) ;
2016-02-22 22:19:09 +00:00
if ( ret )
2016-02-22 22:19:16 +00:00
goto errtype ;
2016-02-22 22:19:09 +00:00
pcidrv_registered = true ;
return 0 ;
2012-06-15 14:31:36 +08:00
2016-02-22 22:19:16 +00:00
errtype :
2016-02-22 22:19:09 +00:00
uncore_types_exit ( uncore_pci_uncores ) ;
2016-02-22 22:19:16 +00:00
kfree ( uncore_extra_pci_dev ) ;
uncore_extra_pci_dev = NULL ;
2016-02-22 22:19:09 +00:00
uncore_free_pcibus_map ( ) ;
2016-02-22 22:19:16 +00:00
err :
uncore_pci_uncores = empty_uncore ;
2012-06-15 14:31:36 +08:00
return ret ;
}
2016-03-20 01:33:36 -07:00
static void uncore_pci_exit ( void )
2012-06-15 14:31:36 +08:00
{
if ( pcidrv_registered ) {
pcidrv_registered = false ;
pci_unregister_driver ( uncore_pci_driver ) ;
2014-07-30 15:22:12 +08:00
uncore_types_exit ( uncore_pci_uncores ) ;
2016-02-22 22:19:16 +00:00
kfree ( uncore_extra_pci_dev ) ;
2016-02-22 22:19:09 +00:00
uncore_free_pcibus_map ( ) ;
2012-06-15 14:31:36 +08:00
}
}
2016-07-13 17:16:12 +00:00
static int uncore_cpu_dying ( unsigned int cpu )
2012-06-15 14:31:34 +08:00
{
2016-02-22 22:19:16 +00:00
struct intel_uncore_type * type , * * types = uncore_msr_uncores ;
2012-06-15 14:31:34 +08:00
struct intel_uncore_pmu * pmu ;
struct intel_uncore_box * box ;
2016-02-22 22:19:16 +00:00
int i , pkg ;
2012-06-15 14:31:34 +08:00
2016-02-22 22:19:16 +00:00
pkg = topology_logical_package_id ( cpu ) ;
for ( ; * types ; types + + ) {
type = * types ;
pmu = type - > pmus ;
for ( i = 0 ; i < type - > num_boxes ; i + + , pmu + + ) {
box = pmu - > boxes [ pkg ] ;
if ( box & & atomic_dec_return ( & box - > refcnt ) = = 0 )
2016-02-22 22:19:11 +00:00
uncore_box_exit ( box ) ;
2012-06-15 14:31:34 +08:00
}
}
2016-07-13 17:16:12 +00:00
return 0 ;
2012-06-15 14:31:34 +08:00
}
2016-07-13 17:16:12 +00:00
static int uncore_cpu_starting ( unsigned int cpu )
2012-06-15 14:31:34 +08:00
{
2016-02-22 22:19:16 +00:00
struct intel_uncore_type * type , * * types = uncore_msr_uncores ;
2012-06-15 14:31:34 +08:00
struct intel_uncore_pmu * pmu ;
2016-02-22 22:19:16 +00:00
struct intel_uncore_box * box ;
2017-01-31 23:58:39 +01:00
int i , pkg ;
2012-06-15 14:31:34 +08:00
2016-02-22 22:19:16 +00:00
pkg = topology_logical_package_id ( cpu ) ;
for ( ; * types ; types + + ) {
type = * types ;
pmu = type - > pmus ;
for ( i = 0 ; i < type - > num_boxes ; i + + , pmu + + ) {
box = pmu - > boxes [ pkg ] ;
if ( ! box )
continue ;
/* The first cpu on a package activates the box */
2017-01-31 23:58:39 +01:00
if ( atomic_inc_return ( & box - > refcnt ) = = 1 )
2015-06-09 11:40:28 +02:00
uncore_box_init ( box ) ;
2012-06-15 14:31:34 +08:00
}
}
2016-07-13 17:16:12 +00:00
return 0 ;
2012-06-15 14:31:34 +08:00
}
2016-07-13 17:16:12 +00:00
static int uncore_cpu_prepare ( unsigned int cpu )
2012-06-15 14:31:34 +08:00
{
2016-02-22 22:19:16 +00:00
struct intel_uncore_type * type , * * types = uncore_msr_uncores ;
2012-06-15 14:31:34 +08:00
struct intel_uncore_pmu * pmu ;
struct intel_uncore_box * box ;
2016-02-22 22:19:16 +00:00
int i , pkg ;
2012-06-15 14:31:34 +08:00
2016-02-22 22:19:16 +00:00
pkg = topology_logical_package_id ( cpu ) ;
for ( ; * types ; types + + ) {
type = * types ;
pmu = type - > pmus ;
for ( i = 0 ; i < type - > num_boxes ; i + + , pmu + + ) {
if ( pmu - > boxes [ pkg ] )
continue ;
/* First cpu of a package allocates the box */
2013-09-17 14:48:13 +08:00
box = uncore_alloc_box ( type , cpu_to_node ( cpu ) ) ;
2012-06-15 14:31:34 +08:00
if ( ! box )
return - ENOMEM ;
box - > pmu = pmu ;
2016-02-22 22:19:16 +00:00
box - > pkgid = pkg ;
pmu - > boxes [ pkg ] = box ;
2012-06-15 14:31:34 +08:00
}
}
return 0 ;
}
2016-02-22 22:19:12 +00:00
static void uncore_change_type_ctx ( struct intel_uncore_type * type , int old_cpu ,
int new_cpu )
2012-06-15 14:31:34 +08:00
{
2016-02-22 22:19:12 +00:00
struct intel_uncore_pmu * pmu = type - > pmus ;
2012-06-15 14:31:34 +08:00
struct intel_uncore_box * box ;
2016-02-22 22:19:16 +00:00
int i , pkg ;
2012-06-15 14:31:34 +08:00
2016-02-22 22:19:16 +00:00
pkg = topology_logical_package_id ( old_cpu < 0 ? new_cpu : old_cpu ) ;
2016-02-22 22:19:12 +00:00
for ( i = 0 ; i < type - > num_boxes ; i + + , pmu + + ) {
2016-02-22 22:19:16 +00:00
box = pmu - > boxes [ pkg ] ;
2016-02-22 22:19:12 +00:00
if ( ! box )
continue ;
2012-06-15 14:31:34 +08:00
2016-02-22 22:19:12 +00:00
if ( old_cpu < 0 ) {
WARN_ON_ONCE ( box - > cpu ! = - 1 ) ;
box - > cpu = new_cpu ;
continue ;
2012-06-15 14:31:34 +08:00
}
2016-02-22 22:19:12 +00:00
WARN_ON_ONCE ( box - > cpu ! = old_cpu ) ;
box - > cpu = - 1 ;
if ( new_cpu < 0 )
continue ;
uncore_pmu_cancel_hrtimer ( box ) ;
perf_pmu_migrate_context ( & pmu - > pmu , old_cpu , new_cpu ) ;
box - > cpu = new_cpu ;
2012-06-15 14:31:34 +08:00
}
}
2016-02-22 22:19:12 +00:00
static void uncore_change_context ( struct intel_uncore_type * * uncores ,
int old_cpu , int new_cpu )
{
for ( ; * uncores ; uncores + + )
uncore_change_type_ctx ( * uncores , old_cpu , new_cpu ) ;
}
2016-07-13 17:16:12 +00:00
static int uncore_event_cpu_offline ( unsigned int cpu )
2012-06-15 14:31:34 +08:00
{
2016-02-22 22:19:16 +00:00
int target ;
2012-06-15 14:31:34 +08:00
2016-02-22 22:19:16 +00:00
/* Check if exiting cpu is used for collecting uncore events */
2012-06-15 14:31:34 +08:00
if ( ! cpumask_test_and_clear_cpu ( cpu , & uncore_cpu_mask ) )
2016-07-13 17:16:12 +00:00
return 0 ;
2012-06-15 14:31:34 +08:00
2016-02-22 22:19:16 +00:00
/* Find a new cpu to collect uncore events */
target = cpumask_any_but ( topology_core_cpumask ( cpu ) , cpu ) ;
2012-06-15 14:31:34 +08:00
2016-02-22 22:19:16 +00:00
/* Migrate uncore events to the new target */
if ( target < nr_cpu_ids )
2012-06-15 14:31:34 +08:00
cpumask_set_cpu ( target , & uncore_cpu_mask ) ;
2016-02-22 22:19:16 +00:00
else
target = - 1 ;
2012-06-15 14:31:34 +08:00
2014-07-30 15:22:12 +08:00
uncore_change_context ( uncore_msr_uncores , cpu , target ) ;
uncore_change_context ( uncore_pci_uncores , cpu , target ) ;
2016-07-13 17:16:12 +00:00
return 0 ;
2012-06-15 14:31:34 +08:00
}
2016-07-13 17:16:12 +00:00
static int uncore_event_cpu_online ( unsigned int cpu )
2012-06-15 14:31:34 +08:00
{
2016-02-22 22:19:16 +00:00
int target ;
2012-06-15 14:31:34 +08:00
2016-02-22 22:19:16 +00:00
/*
* Check if there is an online cpu in the package
* which collects uncore events already .
*/
target = cpumask_any_and ( & uncore_cpu_mask , topology_core_cpumask ( cpu ) ) ;
if ( target < nr_cpu_ids )
2016-07-13 17:16:12 +00:00
return 0 ;
2012-06-15 14:31:34 +08:00
cpumask_set_cpu ( cpu , & uncore_cpu_mask ) ;
2014-07-30 15:22:12 +08:00
uncore_change_context ( uncore_msr_uncores , - 1 , cpu ) ;
uncore_change_context ( uncore_pci_uncores , - 1 , cpu ) ;
2016-07-13 17:16:12 +00:00
return 0 ;
2012-06-15 14:31:34 +08:00
}
2016-02-22 22:19:09 +00:00
static int __init type_pmu_register ( struct intel_uncore_type * type )
2012-06-15 14:31:34 +08:00
{
2016-02-22 22:19:09 +00:00
int i , ret ;
for ( i = 0 ; i < type - > num_boxes ; i + + ) {
ret = uncore_pmu_register ( & type - > pmus [ i ] ) ;
if ( ret )
return ret ;
}
return 0 ;
}
static int __init uncore_msr_pmus_register ( void )
{
struct intel_uncore_type * * types = uncore_msr_uncores ;
int ret ;
2016-02-22 22:19:12 +00:00
for ( ; * types ; types + + ) {
ret = type_pmu_register ( * types ) ;
2016-02-22 22:19:09 +00:00
if ( ret )
return ret ;
}
return 0 ;
2012-06-15 14:31:34 +08:00
}
static int __init uncore_cpu_init ( void )
{
2014-07-30 15:22:15 +08:00
int ret ;
2012-06-15 14:31:34 +08:00
2016-02-22 22:19:16 +00:00
ret = uncore_types_init ( uncore_msr_uncores , true ) ;
2016-02-22 22:19:09 +00:00
if ( ret )
goto err ;
ret = uncore_msr_pmus_register ( ) ;
2012-06-15 14:31:34 +08:00
if ( ret )
2016-02-22 22:19:09 +00:00
goto err ;
2012-06-15 14:31:34 +08:00
return 0 ;
2016-02-22 22:19:09 +00:00
err :
uncore_types_exit ( uncore_msr_uncores ) ;
uncore_msr_uncores = empty_uncore ;
return ret ;
2012-06-15 14:31:34 +08:00
}
2016-03-20 01:33:36 -07:00
# define X86_UNCORE_MODEL_MATCH(model, init) \
{ X86_VENDOR_INTEL , 6 , model , X86_FEATURE_ANY , ( unsigned long ) & init }
struct intel_uncore_init_fun {
void ( * cpu_init ) ( void ) ;
int ( * pci_init ) ( void ) ;
} ;
static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
. cpu_init = nhm_uncore_cpu_init ,
} ;
static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
. cpu_init = snb_uncore_cpu_init ,
. pci_init = snb_uncore_pci_init ,
} ;
static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
. cpu_init = snb_uncore_cpu_init ,
. pci_init = ivb_uncore_pci_init ,
} ;
static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
. cpu_init = snb_uncore_cpu_init ,
. pci_init = hsw_uncore_pci_init ,
} ;
static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
. cpu_init = snb_uncore_cpu_init ,
. pci_init = bdw_uncore_pci_init ,
} ;
static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
. cpu_init = snbep_uncore_cpu_init ,
. pci_init = snbep_uncore_pci_init ,
} ;
static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
. cpu_init = nhmex_uncore_cpu_init ,
} ;
static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
. cpu_init = ivbep_uncore_cpu_init ,
. pci_init = ivbep_uncore_pci_init ,
} ;
static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
. cpu_init = hswep_uncore_cpu_init ,
. pci_init = hswep_uncore_pci_init ,
} ;
static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
. cpu_init = bdx_uncore_cpu_init ,
. pci_init = bdx_uncore_pci_init ,
} ;
static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
. cpu_init = knl_uncore_cpu_init ,
. pci_init = knl_uncore_pci_init ,
} ;
static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
2016-06-29 07:01:51 -07:00
. cpu_init = skl_uncore_cpu_init ,
2016-03-20 01:33:36 -07:00
. pci_init = skl_uncore_pci_init ,
} ;
2016-08-16 16:09:50 -04:00
static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
. cpu_init = skx_uncore_cpu_init ,
. pci_init = skx_uncore_pci_init ,
} ;
2016-03-20 01:33:36 -07:00
static const struct x86_cpu_id intel_uncore_match [ ] __initconst = {
2016-06-02 17:19:42 -07:00
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_NEHALEM_EP , nhm_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_NEHALEM , nhm_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_WESTMERE , nhm_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_WESTMERE_EP , nhm_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_SANDYBRIDGE , snb_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_IVYBRIDGE , ivb_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_HASWELL_CORE , hsw_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_HASWELL_ULT , hsw_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_HASWELL_GT3E , hsw_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_BROADWELL_CORE , bdw_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_BROADWELL_GT3E , bdw_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_SANDYBRIDGE_X , snbep_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_NEHALEM_EX , nhmex_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_WESTMERE_EX , nhmex_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_IVYBRIDGE_X , ivbep_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_HASWELL_X , hswep_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_BROADWELL_X , bdx_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_BROADWELL_XEON_D , bdx_uncore_init ) ,
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_XEON_PHI_KNL , knl_uncore_init ) ,
2016-10-12 20:27:58 +02:00
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_XEON_PHI_KNM , knl_uncore_init ) ,
2016-06-02 17:19:42 -07:00
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_SKYLAKE_DESKTOP , skl_uncore_init ) ,
2016-06-29 07:01:51 -07:00
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_SKYLAKE_MOBILE , skl_uncore_init ) ,
2016-08-16 16:09:50 -04:00
X86_UNCORE_MODEL_MATCH ( INTEL_FAM6_SKYLAKE_X , skx_uncore_init ) ,
2016-03-20 01:33:36 -07:00
{ } ,
} ;
MODULE_DEVICE_TABLE ( x86cpu , intel_uncore_match ) ;
2012-06-15 14:31:34 +08:00
static int __init intel_uncore_init ( void )
{
2016-03-20 01:33:36 -07:00
const struct x86_cpu_id * id ;
struct intel_uncore_init_fun * uncore_init ;
int pret = 0 , cret = 0 , ret ;
2012-06-15 14:31:34 +08:00
2016-03-20 01:33:36 -07:00
id = x86_match_cpu ( intel_uncore_match ) ;
if ( ! id )
2012-06-15 14:31:34 +08:00
return - ENODEV ;
2016-03-29 17:41:55 +02:00
if ( boot_cpu_has ( X86_FEATURE_HYPERVISOR ) )
2012-08-21 17:08:37 +08:00
return - ENODEV ;
2016-02-22 22:19:16 +00:00
max_packages = topology_max_packages ( ) ;
2016-03-20 01:33:36 -07:00
uncore_init = ( struct intel_uncore_init_fun * ) id - > driver_data ;
if ( uncore_init - > pci_init ) {
pret = uncore_init - > pci_init ( ) ;
if ( ! pret )
pret = uncore_pci_init ( ) ;
}
if ( uncore_init - > cpu_init ) {
uncore_init - > cpu_init ( ) ;
cret = uncore_cpu_init ( ) ;
}
2016-02-22 22:19:17 +00:00
if ( cret & & pret )
return - ENODEV ;
2016-02-22 22:19:16 +00:00
2016-07-13 17:16:12 +00:00
/*
* Install callbacks . Core will call them for each online cpu .
*
* The first online cpu of each package allocates and takes
* the refcounts for all other online cpus in that package .
* If msrs are not enabled no allocation is required and
* uncore_cpu_prepare ( ) is not called for each online cpu .
*/
if ( ! cret ) {
ret = cpuhp_setup_state ( CPUHP_PERF_X86_UNCORE_PREP ,
2016-12-21 20:19:54 +01:00
" perf/x86/intel/uncore:prepare " ,
uncore_cpu_prepare , NULL ) ;
2016-07-13 17:16:12 +00:00
if ( ret )
goto err ;
} else {
cpuhp_setup_state_nocalls ( CPUHP_PERF_X86_UNCORE_PREP ,
2016-12-21 20:19:54 +01:00
" perf/x86/intel/uncore:prepare " ,
2016-07-13 17:16:12 +00:00
uncore_cpu_prepare , NULL ) ;
}
2017-01-31 23:58:39 +01:00
2016-07-13 17:16:12 +00:00
cpuhp_setup_state ( CPUHP_AP_PERF_X86_UNCORE_STARTING ,
2016-12-21 20:19:54 +01:00
" perf/x86/uncore:starting " ,
2016-07-13 17:16:12 +00:00
uncore_cpu_starting , uncore_cpu_dying ) ;
2017-01-31 23:58:39 +01:00
2016-07-13 17:16:12 +00:00
cpuhp_setup_state ( CPUHP_AP_PERF_X86_UNCORE_ONLINE ,
2016-12-21 20:19:54 +01:00
" perf/x86/uncore:online " ,
2016-07-13 17:16:12 +00:00
uncore_event_cpu_online , uncore_event_cpu_offline ) ;
2012-06-15 14:31:34 +08:00
return 0 ;
2016-02-22 22:19:09 +00:00
2016-02-22 22:19:16 +00:00
err :
2016-02-22 22:19:09 +00:00
uncore_types_exit ( uncore_msr_uncores ) ;
uncore_pci_exit ( ) ;
2012-06-15 14:31:34 +08:00
return ret ;
}
2016-03-20 01:33:36 -07:00
module_init ( intel_uncore_init ) ;
static void __exit intel_uncore_exit ( void )
{
2016-07-13 17:16:12 +00:00
cpuhp_remove_state_nocalls ( CPUHP_AP_PERF_X86_UNCORE_ONLINE ) ;
cpuhp_remove_state_nocalls ( CPUHP_AP_PERF_X86_UNCORE_STARTING ) ;
cpuhp_remove_state_nocalls ( CPUHP_PERF_X86_UNCORE_PREP ) ;
2016-03-20 01:33:36 -07:00
uncore_types_exit ( uncore_msr_uncores ) ;
uncore_pci_exit ( ) ;
}
module_exit ( intel_uncore_exit ) ;