2019-06-03 07:44:50 +02:00
/* SPDX-License-Identifier: GPL-2.0-only */
2015-09-11 09:38:32 +08:00
/*
* Copyright ( C ) 2015 Linaro Ltd .
* Author : Shannon Zhao < shannon . zhao @ linaro . org >
*/
# ifndef __ASM_ARM_KVM_PMU_H
# define __ASM_ARM_KVM_PMU_H
# include <linux/perf_event.h>
2023-03-17 15:50:20 -04:00
# include <linux/perf/arm_pmuv3.h>
2015-09-11 09:38:32 +08:00
2015-12-08 15:29:06 +08:00
# define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
2023-08-23 10:14:48 +01:00
# if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
2016-06-08 11:38:55 +01:00
2015-09-11 09:38:32 +08:00
struct kvm_pmc {
u8 idx ; /* index into the pmu->pmc array */
struct perf_event * perf_event ;
} ;
2022-05-10 09:57:09 +00:00
struct kvm_pmu_events {
u32 events_host ;
u32 events_guest ;
} ;
2015-09-11 09:38:32 +08:00
struct kvm_pmu {
2022-05-10 09:57:08 +00:00
struct irq_work overflow_work ;
2022-05-10 09:57:09 +00:00
struct kvm_pmu_events events ;
2015-09-11 09:38:32 +08:00
struct kvm_pmc pmc [ ARMV8_PMU_MAX_COUNTERS ] ;
2022-05-10 09:57:08 +00:00
int irq_num ;
2017-05-02 13:41:02 +02:00
bool created ;
2016-02-26 19:29:19 +08:00
bool irq_level ;
2015-09-11 09:38:32 +08:00
} ;
2015-06-18 16:01:53 +08:00
2022-01-27 16:17:57 +00:00
struct arm_pmu_entry {
struct list_head entry ;
struct arm_pmu * arm_pmu ;
} ;
2021-11-11 02:07:36 +00:00
DECLARE_STATIC_KEY_FALSE ( kvm_arm_pmu_available ) ;
static __always_inline bool kvm_arm_support_pmu_v3 ( void )
{
return static_branch_likely ( & kvm_arm_pmu_available ) ;
}
2016-01-11 21:35:32 +08:00
# define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
2015-12-08 15:29:06 +08:00
u64 kvm_pmu_get_counter_value ( struct kvm_vcpu * vcpu , u64 select_idx ) ;
void kvm_pmu_set_counter_value ( struct kvm_vcpu * vcpu , u64 select_idx , u64 val ) ;
2015-09-08 12:26:13 +08:00
u64 kvm_pmu_valid_counter_mask ( struct kvm_vcpu * vcpu ) ;
2020-03-12 16:11:24 +00:00
u64 kvm_pmu_get_pmceid ( struct kvm_vcpu * vcpu , bool pmceid1 ) ;
2019-07-18 08:15:10 +00:00
void kvm_pmu_vcpu_init ( struct kvm_vcpu * vcpu ) ;
2015-09-11 11:30:22 +08:00
void kvm_pmu_vcpu_reset ( struct kvm_vcpu * vcpu ) ;
2015-09-11 15:18:05 +08:00
void kvm_pmu_vcpu_destroy ( struct kvm_vcpu * vcpu ) ;
2019-06-17 20:01:01 +01:00
void kvm_pmu_disable_counter_mask ( struct kvm_vcpu * vcpu , u64 val ) ;
void kvm_pmu_enable_counter_mask ( struct kvm_vcpu * vcpu , u64 val ) ;
2016-02-26 19:29:19 +08:00
void kvm_pmu_flush_hwstate ( struct kvm_vcpu * vcpu ) ;
void kvm_pmu_sync_hwstate ( struct kvm_vcpu * vcpu ) ;
2017-02-01 12:51:52 +01:00
bool kvm_pmu_should_notify_user ( struct kvm_vcpu * vcpu ) ;
void kvm_pmu_update_run ( struct kvm_vcpu * vcpu ) ;
2015-09-08 15:49:39 +08:00
void kvm_pmu_software_increment ( struct kvm_vcpu * vcpu , u64 val ) ;
2015-10-28 12:10:30 +08:00
void kvm_pmu_handle_pmcr ( struct kvm_vcpu * vcpu , u64 val ) ;
2015-07-03 14:27:25 +08:00
void kvm_pmu_set_counter_event_type ( struct kvm_vcpu * vcpu , u64 data ,
u64 select_idx ) ;
2016-01-11 21:35:32 +08:00
int kvm_arm_pmu_v3_set_attr ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr ) ;
int kvm_arm_pmu_v3_get_attr ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr ) ;
int kvm_arm_pmu_v3_has_attr ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr ) ;
2017-05-02 13:41:02 +02:00
int kvm_arm_pmu_v3_enable ( struct kvm_vcpu * vcpu ) ;
2022-05-16 13:02:24 +01:00
struct kvm_pmu_events * kvm_get_pmu_events ( void ) ;
void kvm_vcpu_pmu_restore_guest ( struct kvm_vcpu * vcpu ) ;
void kvm_vcpu_pmu_restore_host ( struct kvm_vcpu * vcpu ) ;
KVM: arm64: pmu: Resync EL0 state on counter rotation
Huang Shijie reports that, when profiling a guest from the host
with a number of events that exceeds the number of available
counters, the reported counts are wildly inaccurate. Without
the counter oversubscription, the reported counts are correct.
Their investigation indicates that upon counter rotation (which
takes place on the back of a timer interrupt), we fail to
re-apply the guest EL0 enabling, leading to the counting of host
events instead of guest events.
In order to solve this, add yet another hook between the host PMU
driver and KVM, re-applying the guest EL0 configuration if the
right conditions apply (the host is VHE, we are in interrupt
context, and we interrupted a running vcpu). This triggers a new
vcpu request which will apply the correct configuration on guest
reentry.
With this, we have the correct counts, even when the counters are
oversubscribed.
Reported-by: Huang Shijie <shijie@os.amperecomputing.com>
Suggested-by: Oliver Upton <oliver.upton@linux.dev>
Tested_by: Huang Shijie <shijie@os.amperecomputing.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20230809013953.7692-1-shijie@os.amperecomputing.com
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20230820090108.177817-1-maz@kernel.org
2023-08-20 10:01:08 +01:00
void kvm_vcpu_pmu_resync_el0 ( void ) ;
2022-05-16 13:02:24 +01:00
# define kvm_vcpu_has_pmu(vcpu) \
( test_bit ( KVM_ARM_VCPU_PMU_V3 , ( vcpu ) - > arch . features ) )
/*
* Updates the vcpu ' s view of the pmu events for this cpu .
* Must be called before every vcpu run after disabling interrupts , to ensure
* that an interrupt cannot fire and update the structure .
*/
# define kvm_pmu_update_vcpu_events(vcpu) \
do { \
if ( ! has_vhe ( ) & & kvm_vcpu_has_pmu ( vcpu ) ) \
vcpu - > arch . pmu . events = * kvm_get_pmu_events ( ) ; \
} while ( 0 )
2022-11-13 16:38:29 +00:00
/*
* Evaluates as true when emulating PMUv3p5 , and false otherwise .
*/
2023-06-09 19:00:54 +00:00
# define kvm_pmu_is_3p5(vcpu) ({ \
u64 val = IDREG ( vcpu - > kvm , SYS_ID_AA64DFR0_EL1 ) ; \
u8 pmuver = SYS_FIELD_GET ( ID_AA64DFR0_EL1 , PMUVer , val ) ; \
\
pmuver > = ID_AA64DFR0_EL1_PMUVer_V3P5 ; \
} )
2022-11-13 16:38:29 +00:00
2022-11-13 16:38:26 +00:00
u8 kvm_arm_pmu_get_pmuver_limit ( void ) ;
2015-09-11 09:38:32 +08:00
# else
struct kvm_pmu {
} ;
2015-06-18 16:01:53 +08:00
2021-11-11 02:07:36 +00:00
static inline bool kvm_arm_support_pmu_v3 ( void )
{
return false ;
}
2016-01-11 21:35:32 +08:00
# define kvm_arm_pmu_irq_initialized(v) (false)
2015-12-08 15:29:06 +08:00
static inline u64 kvm_pmu_get_counter_value ( struct kvm_vcpu * vcpu ,
u64 select_idx )
{
return 0 ;
}
static inline void kvm_pmu_set_counter_value ( struct kvm_vcpu * vcpu ,
u64 select_idx , u64 val ) { }
2015-09-08 12:26:13 +08:00
static inline u64 kvm_pmu_valid_counter_mask ( struct kvm_vcpu * vcpu )
{
return 0 ;
}
2019-07-18 08:15:10 +00:00
static inline void kvm_pmu_vcpu_init ( struct kvm_vcpu * vcpu ) { }
2015-09-11 11:30:22 +08:00
static inline void kvm_pmu_vcpu_reset ( struct kvm_vcpu * vcpu ) { }
2015-09-11 15:18:05 +08:00
static inline void kvm_pmu_vcpu_destroy ( struct kvm_vcpu * vcpu ) { }
2019-06-17 20:01:01 +01:00
static inline void kvm_pmu_disable_counter_mask ( struct kvm_vcpu * vcpu , u64 val ) { }
static inline void kvm_pmu_enable_counter_mask ( struct kvm_vcpu * vcpu , u64 val ) { }
2016-02-26 19:29:19 +08:00
static inline void kvm_pmu_flush_hwstate ( struct kvm_vcpu * vcpu ) { }
static inline void kvm_pmu_sync_hwstate ( struct kvm_vcpu * vcpu ) { }
2017-02-01 12:51:52 +01:00
static inline bool kvm_pmu_should_notify_user ( struct kvm_vcpu * vcpu )
{
return false ;
}
static inline void kvm_pmu_update_run ( struct kvm_vcpu * vcpu ) { }
2015-09-08 15:49:39 +08:00
static inline void kvm_pmu_software_increment ( struct kvm_vcpu * vcpu , u64 val ) { }
2015-10-28 12:10:30 +08:00
static inline void kvm_pmu_handle_pmcr ( struct kvm_vcpu * vcpu , u64 val ) { }
2015-07-03 14:27:25 +08:00
static inline void kvm_pmu_set_counter_event_type ( struct kvm_vcpu * vcpu ,
u64 data , u64 select_idx ) { }
2016-01-11 21:35:32 +08:00
static inline int kvm_arm_pmu_v3_set_attr ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr )
{
return - ENXIO ;
}
static inline int kvm_arm_pmu_v3_get_attr ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr )
{
return - ENXIO ;
}
static inline int kvm_arm_pmu_v3_has_attr ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr )
{
return - ENXIO ;
}
2017-05-02 13:41:02 +02:00
static inline int kvm_arm_pmu_v3_enable ( struct kvm_vcpu * vcpu )
{
return 0 ;
}
2020-03-12 16:11:24 +00:00
static inline u64 kvm_pmu_get_pmceid ( struct kvm_vcpu * vcpu , bool pmceid1 )
{
return 0 ;
}
2021-04-14 14:44:05 +01:00
2022-05-16 13:02:24 +01:00
# define kvm_vcpu_has_pmu(vcpu) ({ false; })
2022-11-13 16:38:29 +00:00
# define kvm_pmu_is_3p5(vcpu) ({ false; })
2022-05-16 13:02:24 +01:00
static inline void kvm_pmu_update_vcpu_events ( struct kvm_vcpu * vcpu ) { }
static inline void kvm_vcpu_pmu_restore_guest ( struct kvm_vcpu * vcpu ) { }
static inline void kvm_vcpu_pmu_restore_host ( struct kvm_vcpu * vcpu ) { }
2022-11-13 16:38:26 +00:00
static inline u8 kvm_arm_pmu_get_pmuver_limit ( void )
{
return 0 ;
}
KVM: arm64: pmu: Resync EL0 state on counter rotation
Huang Shijie reports that, when profiling a guest from the host
with a number of events that exceeds the number of available
counters, the reported counts are wildly inaccurate. Without
the counter oversubscription, the reported counts are correct.
Their investigation indicates that upon counter rotation (which
takes place on the back of a timer interrupt), we fail to
re-apply the guest EL0 enabling, leading to the counting of host
events instead of guest events.
In order to solve this, add yet another hook between the host PMU
driver and KVM, re-applying the guest EL0 configuration if the
right conditions apply (the host is VHE, we are in interrupt
context, and we interrupted a running vcpu). This triggers a new
vcpu request which will apply the correct configuration on guest
reentry.
With this, we have the correct counts, even when the counters are
oversubscribed.
Reported-by: Huang Shijie <shijie@os.amperecomputing.com>
Suggested-by: Oliver Upton <oliver.upton@linux.dev>
Tested_by: Huang Shijie <shijie@os.amperecomputing.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20230809013953.7692-1-shijie@os.amperecomputing.com
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20230820090108.177817-1-maz@kernel.org
2023-08-20 10:01:08 +01:00
static inline void kvm_vcpu_pmu_resync_el0 ( void ) { }
2022-05-16 13:02:24 +01:00
2015-09-11 09:38:32 +08:00
# endif
# endif