2013-06-05 16:11:49 -05:00
/*
* Copyright ( C ) 2013 Advanced Micro Devices , Inc .
*
* Author : Steven Kinney < Steven . Kinney @ amd . com >
* Author : Suravee Suthikulpanit < Suraveee . Suthikulpanit @ amd . com >
*
* Perf : amd_iommu - AMD IOMMU Performance Counter PMU implementation
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2017-02-24 02:48:13 -06:00
# define pr_fmt(fmt) "perf / amd_iommu: " fmt
2013-06-05 16:11:49 -05:00
# include <linux/perf_event.h>
2016-07-13 20:19:01 -04:00
# include <linux/init.h>
2013-06-05 16:11:49 -05:00
# include <linux/cpumask.h>
# include <linux/slab.h>
2016-02-10 10:55:23 +01:00
# include "../perf_event.h"
2016-02-08 17:09:07 +01:00
# include "iommu.h"
2013-06-05 16:11:49 -05:00
# define COUNTER_SHIFT 16
2017-02-24 02:48:21 -06:00
/* iommu pmu conf masks */
# define GET_CSOURCE(x) ((x)->conf & 0xFFULL)
# define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL)
# define GET_DOMID(x) (((x)->conf >> 24) & 0xFFFFULL)
# define GET_PASID(x) (((x)->conf >> 40) & 0xFFFFFULL)
/* iommu pmu conf1 masks */
# define GET_DEVID_MASK(x) ((x)->conf1 & 0xFFFFULL)
# define GET_DOMID_MASK(x) (((x)->conf1 >> 16) & 0xFFFFULL)
# define GET_PASID_MASK(x) (((x)->conf1 >> 32) & 0xFFFFFULL)
2013-06-05 16:11:49 -05:00
2017-03-22 02:02:42 -05:00
# define IOMMU_NAME_SIZE 16
2013-06-05 16:11:49 -05:00
struct perf_amd_iommu {
2017-03-22 02:02:42 -05:00
struct list_head list ;
2013-06-05 16:11:49 -05:00
struct pmu pmu ;
2017-03-22 02:02:42 -05:00
struct amd_iommu * iommu ;
char name [ IOMMU_NAME_SIZE ] ;
2013-06-05 16:11:49 -05:00
u8 max_banks ;
u8 max_counters ;
u64 cntr_assign_mask ;
raw_spinlock_t lock ;
} ;
2017-03-22 02:02:42 -05:00
static LIST_HEAD ( perf_amd_iommu_list ) ;
2013-06-05 16:11:49 -05:00
/*---------------------------------------------
* sysfs format attributes
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
PMU_FORMAT_ATTR ( csource , " config:0-7 " ) ;
PMU_FORMAT_ATTR ( devid , " config:8-23 " ) ;
2017-02-24 02:48:21 -06:00
PMU_FORMAT_ATTR ( domid , " config:24-39 " ) ;
PMU_FORMAT_ATTR ( pasid , " config:40-59 " ) ;
2013-06-05 16:11:49 -05:00
PMU_FORMAT_ATTR ( devid_mask , " config1:0-15 " ) ;
2017-02-24 02:48:21 -06:00
PMU_FORMAT_ATTR ( domid_mask , " config1:16-31 " ) ;
PMU_FORMAT_ATTR ( pasid_mask , " config1:32-51 " ) ;
2013-06-05 16:11:49 -05:00
static struct attribute * iommu_format_attrs [ ] = {
& format_attr_csource . attr ,
& format_attr_devid . attr ,
& format_attr_pasid . attr ,
& format_attr_domid . attr ,
& format_attr_devid_mask . attr ,
& format_attr_pasid_mask . attr ,
& format_attr_domid_mask . attr ,
NULL ,
} ;
static struct attribute_group amd_iommu_format_group = {
. name = " format " ,
. attrs = iommu_format_attrs ,
} ;
/*---------------------------------------------
* sysfs events attributes
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
2017-02-24 02:48:20 -06:00
static struct attribute_group amd_iommu_events_group = {
. name = " events " ,
} ;
2013-06-05 16:11:49 -05:00
struct amd_iommu_event_desc {
struct kobj_attribute attr ;
const char * event ;
} ;
static ssize_t _iommu_event_show ( struct kobject * kobj ,
struct kobj_attribute * attr , char * buf )
{
struct amd_iommu_event_desc * event =
container_of ( attr , struct amd_iommu_event_desc , attr ) ;
return sprintf ( buf , " %s \n " , event - > event ) ;
}
# define AMD_IOMMU_EVENT_DESC(_name, _event) \
{ \
. attr = __ATTR ( _name , 0444 , _iommu_event_show , NULL ) , \
. event = _event , \
}
static struct amd_iommu_event_desc amd_iommu_v2_event_descs [ ] = {
AMD_IOMMU_EVENT_DESC ( mem_pass_untrans , " csource=0x01 " ) ,
AMD_IOMMU_EVENT_DESC ( mem_pass_pretrans , " csource=0x02 " ) ,
AMD_IOMMU_EVENT_DESC ( mem_pass_excl , " csource=0x03 " ) ,
AMD_IOMMU_EVENT_DESC ( mem_target_abort , " csource=0x04 " ) ,
AMD_IOMMU_EVENT_DESC ( mem_trans_total , " csource=0x05 " ) ,
AMD_IOMMU_EVENT_DESC ( mem_iommu_tlb_pte_hit , " csource=0x06 " ) ,
AMD_IOMMU_EVENT_DESC ( mem_iommu_tlb_pte_mis , " csource=0x07 " ) ,
AMD_IOMMU_EVENT_DESC ( mem_iommu_tlb_pde_hit , " csource=0x08 " ) ,
AMD_IOMMU_EVENT_DESC ( mem_iommu_tlb_pde_mis , " csource=0x09 " ) ,
AMD_IOMMU_EVENT_DESC ( mem_dte_hit , " csource=0x0a " ) ,
AMD_IOMMU_EVENT_DESC ( mem_dte_mis , " csource=0x0b " ) ,
AMD_IOMMU_EVENT_DESC ( page_tbl_read_tot , " csource=0x0c " ) ,
AMD_IOMMU_EVENT_DESC ( page_tbl_read_nst , " csource=0x0d " ) ,
AMD_IOMMU_EVENT_DESC ( page_tbl_read_gst , " csource=0x0e " ) ,
AMD_IOMMU_EVENT_DESC ( int_dte_hit , " csource=0x0f " ) ,
AMD_IOMMU_EVENT_DESC ( int_dte_mis , " csource=0x10 " ) ,
AMD_IOMMU_EVENT_DESC ( cmd_processed , " csource=0x11 " ) ,
AMD_IOMMU_EVENT_DESC ( cmd_processed_inv , " csource=0x12 " ) ,
AMD_IOMMU_EVENT_DESC ( tlb_inv , " csource=0x13 " ) ,
2016-02-28 22:23:29 -06:00
AMD_IOMMU_EVENT_DESC ( ign_rd_wr_mmio_1ff8h , " csource=0x14 " ) ,
AMD_IOMMU_EVENT_DESC ( vapic_int_non_guest , " csource=0x15 " ) ,
AMD_IOMMU_EVENT_DESC ( vapic_int_guest , " csource=0x16 " ) ,
AMD_IOMMU_EVENT_DESC ( smi_recv , " csource=0x17 " ) ,
AMD_IOMMU_EVENT_DESC ( smi_blk , " csource=0x18 " ) ,
2013-06-05 16:11:49 -05:00
{ /* end: all zeroes */ } ,
} ;
/*---------------------------------------------
* sysfs cpumask attributes
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
static cpumask_t iommu_cpumask ;
static ssize_t _iommu_cpumask_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
2014-09-30 14:48:22 +01:00
return cpumap_print_to_pagebuf ( true , buf , & iommu_cpumask ) ;
2013-06-05 16:11:49 -05:00
}
static DEVICE_ATTR ( cpumask , S_IRUGO , _iommu_cpumask_show , NULL ) ;
static struct attribute * iommu_cpumask_attrs [ ] = {
& dev_attr_cpumask . attr ,
NULL ,
} ;
static struct attribute_group amd_iommu_cpumask_group = {
. attrs = iommu_cpumask_attrs ,
} ;
/*---------------------------------------------*/
2017-02-24 02:48:21 -06:00
static int get_next_avail_iommu_bnk_cntr ( struct perf_event * event )
2013-06-05 16:11:49 -05:00
{
2017-02-24 02:48:21 -06:00
struct perf_amd_iommu * piommu = container_of ( event - > pmu , struct perf_amd_iommu , pmu ) ;
int max_cntrs = piommu - > max_counters ;
int max_banks = piommu - > max_banks ;
u32 shift , bank , cntr ;
2013-06-05 16:11:49 -05:00
unsigned long flags ;
2017-02-24 02:48:21 -06:00
int retval ;
2013-06-05 16:11:49 -05:00
2017-02-24 02:48:21 -06:00
raw_spin_lock_irqsave ( & piommu - > lock , flags ) ;
2013-06-05 16:11:49 -05:00
for ( bank = 0 , shift = 0 ; bank < max_banks ; bank + + ) {
for ( cntr = 0 ; cntr < max_cntrs ; cntr + + ) {
shift = bank + ( bank * 3 ) + cntr ;
2017-02-24 02:48:21 -06:00
if ( piommu - > cntr_assign_mask & BIT_ULL ( shift ) ) {
2013-06-05 16:11:49 -05:00
continue ;
} else {
2017-02-24 02:48:21 -06:00
piommu - > cntr_assign_mask | = BIT_ULL ( shift ) ;
event - > hw . iommu_bank = bank ;
event - > hw . iommu_cntr = cntr ;
retval = 0 ;
2013-06-05 16:11:49 -05:00
goto out ;
}
}
}
retval = - ENOSPC ;
out :
2017-02-24 02:48:21 -06:00
raw_spin_unlock_irqrestore ( & piommu - > lock , flags ) ;
2013-06-05 16:11:49 -05:00
return retval ;
}
static int clear_avail_iommu_bnk_cntr ( struct perf_amd_iommu * perf_iommu ,
u8 bank , u8 cntr )
{
unsigned long flags ;
int max_banks , max_cntrs ;
int shift = 0 ;
max_banks = perf_iommu - > max_banks ;
max_cntrs = perf_iommu - > max_counters ;
if ( ( bank > max_banks ) | | ( cntr > max_cntrs ) )
return - EINVAL ;
shift = bank + cntr + ( bank * 3 ) ;
raw_spin_lock_irqsave ( & perf_iommu - > lock , flags ) ;
perf_iommu - > cntr_assign_mask & = ~ ( 1ULL < < shift ) ;
raw_spin_unlock_irqrestore ( & perf_iommu - > lock , flags ) ;
return 0 ;
}
static int perf_iommu_event_init ( struct perf_event * event )
{
struct hw_perf_event * hwc = & event - > hw ;
/* test the event attr type check for PMU enumeration */
if ( event - > attr . type ! = event - > pmu - > type )
return - ENOENT ;
/*
* IOMMU counters are shared across all cores .
* Therefore , it does not support per - process mode .
* Also , it does not support event sampling mode .
*/
if ( is_sampling_event ( event ) | | event - > attach_state & PERF_ATTACH_TASK )
return - EINVAL ;
if ( event - > cpu < 0 )
return - EINVAL ;
/* update the hw_perf_event struct with the iommu config data */
2017-02-24 02:48:21 -06:00
hwc - > conf = event - > attr . config ;
hwc - > conf1 = event - > attr . config1 ;
2013-06-05 16:11:49 -05:00
return 0 ;
}
2017-03-22 02:02:42 -05:00
static inline struct amd_iommu * perf_event_2_iommu ( struct perf_event * ev )
{
return ( container_of ( ev - > pmu , struct perf_amd_iommu , pmu ) ) - > iommu ;
}
2013-06-05 16:11:49 -05:00
static void perf_iommu_enable_event ( struct perf_event * ev )
{
2017-03-22 02:02:42 -05:00
struct amd_iommu * iommu = perf_event_2_iommu ( ev ) ;
2017-02-24 02:48:21 -06:00
struct hw_perf_event * hwc = & ev - > hw ;
u8 bank = hwc - > iommu_bank ;
u8 cntr = hwc - > iommu_cntr ;
2013-06-05 16:11:49 -05:00
u64 reg = 0ULL ;
2017-02-24 02:48:21 -06:00
reg = GET_CSOURCE ( hwc ) ;
2017-02-24 02:48:19 -06:00
amd_iommu_pc_set_reg ( iommu , bank , cntr , IOMMU_PC_COUNTER_SRC_REG , & reg ) ;
2013-06-05 16:11:49 -05:00
2017-02-24 02:48:21 -06:00
reg = GET_DEVID_MASK ( hwc ) ;
reg = GET_DEVID ( hwc ) | ( reg < < 32 ) ;
2013-06-05 16:11:49 -05:00
if ( reg )
2017-02-24 02:48:14 -06:00
reg | = BIT ( 31 ) ;
2017-02-24 02:48:19 -06:00
amd_iommu_pc_set_reg ( iommu , bank , cntr , IOMMU_PC_DEVID_MATCH_REG , & reg ) ;
2013-06-05 16:11:49 -05:00
2017-02-24 02:48:21 -06:00
reg = GET_PASID_MASK ( hwc ) ;
reg = GET_PASID ( hwc ) | ( reg < < 32 ) ;
2013-06-05 16:11:49 -05:00
if ( reg )
2017-02-24 02:48:14 -06:00
reg | = BIT ( 31 ) ;
2017-02-24 02:48:19 -06:00
amd_iommu_pc_set_reg ( iommu , bank , cntr , IOMMU_PC_PASID_MATCH_REG , & reg ) ;
2013-06-05 16:11:49 -05:00
2017-02-24 02:48:21 -06:00
reg = GET_DOMID_MASK ( hwc ) ;
reg = GET_DOMID ( hwc ) | ( reg < < 32 ) ;
2013-06-05 16:11:49 -05:00
if ( reg )
2017-02-24 02:48:14 -06:00
reg | = BIT ( 31 ) ;
2017-02-24 02:48:19 -06:00
amd_iommu_pc_set_reg ( iommu , bank , cntr , IOMMU_PC_DOMID_MATCH_REG , & reg ) ;
2013-06-05 16:11:49 -05:00
}
static void perf_iommu_disable_event ( struct perf_event * event )
{
2017-03-22 02:02:42 -05:00
struct amd_iommu * iommu = perf_event_2_iommu ( event ) ;
2017-02-24 02:48:21 -06:00
struct hw_perf_event * hwc = & event - > hw ;
2013-06-05 16:11:49 -05:00
u64 reg = 0ULL ;
2017-02-24 02:48:21 -06:00
amd_iommu_pc_set_reg ( iommu , hwc - > iommu_bank , hwc - > iommu_cntr ,
2017-02-24 02:48:19 -06:00
IOMMU_PC_COUNTER_SRC_REG , & reg ) ;
2013-06-05 16:11:49 -05:00
}
static void perf_iommu_start ( struct perf_event * event , int flags )
{
struct hw_perf_event * hwc = & event - > hw ;
if ( WARN_ON_ONCE ( ! ( hwc - > state & PERF_HES_STOPPED ) ) )
return ;
WARN_ON_ONCE ( ! ( hwc - > state & PERF_HES_UPTODATE ) ) ;
hwc - > state = 0 ;
if ( flags & PERF_EF_RELOAD ) {
2017-02-24 02:48:21 -06:00
u64 prev_raw_count = local64_read ( & hwc - > prev_count ) ;
2017-03-22 02:02:42 -05:00
struct amd_iommu * iommu = perf_event_2_iommu ( event ) ;
2017-02-24 02:48:21 -06:00
amd_iommu_pc_set_reg ( iommu , hwc - > iommu_bank , hwc - > iommu_cntr ,
2017-02-24 02:48:19 -06:00
IOMMU_PC_COUNTER_REG , & prev_raw_count ) ;
2013-06-05 16:11:49 -05:00
}
perf_iommu_enable_event ( event ) ;
perf_event_update_userpage ( event ) ;
}
static void perf_iommu_read ( struct perf_event * event )
{
2017-02-24 02:48:15 -06:00
u64 count , prev , delta ;
2013-06-05 16:11:49 -05:00
struct hw_perf_event * hwc = & event - > hw ;
2017-03-22 02:02:42 -05:00
struct amd_iommu * iommu = perf_event_2_iommu ( event ) ;
2013-06-05 16:11:49 -05:00
2017-02-24 02:48:21 -06:00
if ( amd_iommu_pc_get_reg ( iommu , hwc - > iommu_bank , hwc - > iommu_cntr ,
2017-02-24 02:48:19 -06:00
IOMMU_PC_COUNTER_REG , & count ) )
return ;
2013-06-05 16:11:49 -05:00
/* IOMMU pc counter register is only 48 bits */
2017-02-24 02:48:15 -06:00
count & = GENMASK_ULL ( 47 , 0 ) ;
2013-06-05 16:11:49 -05:00
2017-02-24 02:48:15 -06:00
prev = local64_read ( & hwc - > prev_count ) ;
if ( local64_cmpxchg ( & hwc - > prev_count , prev , count ) ! = prev )
2013-06-05 16:11:49 -05:00
return ;
2017-02-24 02:48:15 -06:00
/* Handle 48-bit counter overflow */
delta = ( count < < COUNTER_SHIFT ) - ( prev < < COUNTER_SHIFT ) ;
2013-06-05 16:11:49 -05:00
delta > > = COUNTER_SHIFT ;
local64_add ( delta , & event - > count ) ;
}
static void perf_iommu_stop ( struct perf_event * event , int flags )
{
struct hw_perf_event * hwc = & event - > hw ;
if ( hwc - > state & PERF_HES_UPTODATE )
return ;
perf_iommu_disable_event ( event ) ;
WARN_ON_ONCE ( hwc - > state & PERF_HES_STOPPED ) ;
hwc - > state | = PERF_HES_STOPPED ;
if ( hwc - > state & PERF_HES_UPTODATE )
return ;
perf_iommu_read ( event ) ;
hwc - > state | = PERF_HES_UPTODATE ;
}
static int perf_iommu_add ( struct perf_event * event , int flags )
{
int retval ;
event - > hw . state = PERF_HES_UPTODATE | PERF_HES_STOPPED ;
/* request an iommu bank/counter */
2017-02-24 02:48:21 -06:00
retval = get_next_avail_iommu_bnk_cntr ( event ) ;
if ( retval )
2013-06-05 16:11:49 -05:00
return retval ;
if ( flags & PERF_EF_START )
perf_iommu_start ( event , PERF_EF_RELOAD ) ;
return 0 ;
}
static void perf_iommu_del ( struct perf_event * event , int flags )
{
2017-02-24 02:48:21 -06:00
struct hw_perf_event * hwc = & event - > hw ;
2013-06-05 16:11:49 -05:00
struct perf_amd_iommu * perf_iommu =
container_of ( event - > pmu , struct perf_amd_iommu , pmu ) ;
perf_iommu_stop ( event , PERF_EF_UPDATE ) ;
/* clear the assigned iommu bank/counter */
clear_avail_iommu_bnk_cntr ( perf_iommu ,
2017-02-24 02:48:21 -06:00
hwc - > iommu_bank , hwc - > iommu_cntr ) ;
2013-06-05 16:11:49 -05:00
perf_event_update_userpage ( event ) ;
}
2017-02-24 02:48:20 -06:00
static __init int _init_events_attrs ( void )
2013-06-05 16:11:49 -05:00
{
int i = 0 , j ;
2017-02-24 02:48:20 -06:00
struct attribute * * attrs ;
2013-06-05 16:11:49 -05:00
while ( amd_iommu_v2_event_descs [ i ] . attr . attr . name )
i + + ;
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:03:40 -07:00
attrs = kcalloc ( i + 1 , sizeof ( struct attribute * * ) , GFP_KERNEL ) ;
2017-02-24 02:48:20 -06:00
if ( ! attrs )
2013-06-05 16:11:49 -05:00
return - ENOMEM ;
for ( j = 0 ; j < i ; j + + )
attrs [ j ] = & amd_iommu_v2_event_descs [ j ] . attr . attr ;
2017-02-24 02:48:20 -06:00
amd_iommu_events_group . attrs = attrs ;
2013-06-05 16:11:49 -05:00
return 0 ;
}
2017-02-24 02:48:20 -06:00
const struct attribute_group * amd_iommu_attr_groups [ ] = {
& amd_iommu_format_group ,
& amd_iommu_cpumask_group ,
& amd_iommu_events_group ,
NULL ,
} ;
2017-09-19 16:40:43 +05:30
static const struct pmu iommu_pmu __initconst = {
2017-03-22 02:02:42 -05:00
. event_init = perf_iommu_event_init ,
. add = perf_iommu_add ,
. del = perf_iommu_del ,
. start = perf_iommu_start ,
. stop = perf_iommu_stop ,
. read = perf_iommu_read ,
. task_ctx_nr = perf_invalid_context ,
. attr_groups = amd_iommu_attr_groups ,
2019-01-10 13:53:33 +00:00
. capabilities = PERF_PMU_CAP_NO_EXCLUDE ,
2017-03-22 02:02:42 -05:00
} ;
static __init int init_one_iommu ( unsigned int idx )
2013-06-05 16:11:49 -05:00
{
2017-03-22 02:02:42 -05:00
struct perf_amd_iommu * perf_iommu ;
2013-06-05 16:11:49 -05:00
int ret ;
2017-03-22 02:02:42 -05:00
perf_iommu = kzalloc ( sizeof ( struct perf_amd_iommu ) , GFP_KERNEL ) ;
if ( ! perf_iommu )
return - ENOMEM ;
2013-06-05 16:11:49 -05:00
raw_spin_lock_init ( & perf_iommu - > lock ) ;
2017-03-22 02:02:42 -05:00
perf_iommu - > pmu = iommu_pmu ;
perf_iommu - > iommu = get_amd_iommu ( idx ) ;
perf_iommu - > max_banks = amd_iommu_pc_get_max_banks ( idx ) ;
perf_iommu - > max_counters = amd_iommu_pc_get_max_counters ( idx ) ;
2013-06-05 16:11:49 -05:00
2017-03-22 02:02:42 -05:00
if ( ! perf_iommu - > iommu | |
! perf_iommu - > max_banks | |
! perf_iommu - > max_counters ) {
kfree ( perf_iommu ) ;
2017-02-24 02:48:18 -06:00
return - EINVAL ;
2017-03-22 02:02:42 -05:00
}
2017-02-24 02:48:18 -06:00
2017-03-22 02:02:42 -05:00
snprintf ( perf_iommu - > name , IOMMU_NAME_SIZE , " amd_iommu_%u " , idx ) ;
ret = perf_pmu_register ( & perf_iommu - > pmu , perf_iommu - > name , - 1 ) ;
if ( ! ret ) {
pr_info ( " Detected AMD IOMMU #%d (%d banks, %d counters/bank). \n " ,
idx , perf_iommu - > max_banks , perf_iommu - > max_counters ) ;
list_add_tail ( & perf_iommu - > list , & perf_amd_iommu_list ) ;
} else {
pr_warn ( " Error initializing IOMMU %d. \n " , idx ) ;
kfree ( perf_iommu ) ;
}
2013-06-05 16:11:49 -05:00
return ret ;
}
static __init int amd_iommu_pc_init ( void )
{
2017-03-22 02:02:42 -05:00
unsigned int i , cnt = 0 ;
2017-02-24 02:48:20 -06:00
int ret ;
2013-06-05 16:11:49 -05:00
/* Make sure the IOMMU PC resource is available */
2013-07-03 09:55:42 +02:00
if ( ! amd_iommu_pc_supported ( ) )
2013-06-05 16:11:49 -05:00
return - ENODEV ;
2017-02-24 02:48:20 -06:00
ret = _init_events_attrs ( ) ;
if ( ret )
return ret ;
2013-06-05 16:11:49 -05:00
2017-03-22 02:02:42 -05:00
/*
* An IOMMU PMU is specific to an IOMMU , and can function independently .
* So we go through all IOMMUs and ignore the one that fails init
* unless all IOMMU are failing .
*/
for ( i = 0 ; i < amd_iommu_get_num_iommus ( ) ; i + + ) {
ret = init_one_iommu ( i ) ;
if ( ! ret )
cnt + + ;
}
2017-02-24 02:48:20 -06:00
2017-03-22 02:02:42 -05:00
if ( ! cnt ) {
kfree ( amd_iommu_events_group . attrs ) ;
return - ENODEV ;
}
/* Init cpumask attributes to only core 0 */
cpumask_set_cpu ( 0 , & iommu_cpumask ) ;
return 0 ;
2013-06-05 16:11:49 -05:00
}
device_initcall ( amd_iommu_pc_init ) ;