Merge branch kvm-arm64/6.6/pmu-fixes into kvmarm-master/next

* kvm-arm64/6.6/pmu-fixes:
  : .
  : Another set of PMU fixes, coutrtesy of Reiji Watanabe.
  : From the cover letter:
  :
  : "This series fixes a couple of PMUver related handling of
  : vPMU support.
  :
  : On systems where the PMUVer is not uniform across all PEs,
  : KVM currently does not advertise PMUv3 to the guest,
  : even if userspace successfully runs KVM_ARM_VCPU_INIT with
  : KVM_ARM_VCPU_PMU_V3."
  :
  : Additionally, a fix for an obscure counter oversubscription
  : issue happening when the hsot profines the guest's EL0.
  : .
  KVM: arm64: pmu: Guard PMU emulation definitions with CONFIG_KVM
  KVM: arm64: pmu: Resync EL0 state on counter rotation
  KVM: arm64: PMU: Don't advertise STALL_SLOT_{FRONTEND,BACKEND}
  KVM: arm64: PMU: Don't advertise the STALL_SLOT event
  KVM: arm64: PMU: Avoid inappropriate use of host's PMUVer
  KVM: arm64: PMU: Disallow vPMU on non-uniform PMUVer

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2023-08-28 09:29:11 +01:00
commit 50a40ff7d3
7 changed files with 55 additions and 12 deletions

View File

@ -227,6 +227,8 @@ static inline bool kvm_set_pmuserenr(u64 val)
return false;
}
static inline void kvm_vcpu_pmu_resync_el0(void) {}
/* PMU Version in DFR Register */
#define ARMV8_PMU_DFR_VER_NI 0
#define ARMV8_PMU_DFR_VER_V3P4 0x5

View File

@ -49,6 +49,7 @@
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)

View File

@ -804,6 +804,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
kvm_pmu_handle_pmcr(vcpu,
__vcpu_sys_reg(vcpu, PMCR_EL0));
if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
kvm_vcpu_pmu_restore_guest(vcpu);
if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
return kvm_vcpu_suspend(vcpu);

View File

@ -14,6 +14,7 @@
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
#include <asm/arm_pmuv3.h>
#define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
@ -35,12 +36,8 @@ static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
return &vcpu->arch.pmu.pmc[cnt_idx];
}
static u32 kvm_pmu_event_mask(struct kvm *kvm)
static u32 __kvm_pmu_event_mask(unsigned int pmuver)
{
unsigned int pmuver;
pmuver = kvm->arch.arm_pmu->pmuver;
switch (pmuver) {
case ID_AA64DFR0_EL1_PMUVer_IMP:
return GENMASK(9, 0);
@ -55,6 +52,14 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
}
}
static u32 kvm_pmu_event_mask(struct kvm *kvm)
{
u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
return __kvm_pmu_event_mask(pmuver);
}
/**
* kvm_pmc_is_64bit - determine if counter is 64bit
* @pmc: counter context
@ -672,8 +677,11 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
{
struct arm_pmu_entry *entry;
if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
/*
* Check the sanitised PMU version for the system, as KVM does not
* support implementations where PMUv3 exists on a subset of CPUs.
*/
if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
return;
mutex_lock(&arm_pmus_lock);
@ -750,11 +758,12 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
} else {
val = read_sysreg(pmceid1_el0);
/*
* Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
* Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
* as RAZ
*/
if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
base = 32;
}
@ -950,11 +959,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return 0;
}
case KVM_ARM_VCPU_PMU_V3_FILTER: {
u8 pmuver = kvm_arm_pmu_get_pmuver_limit();
struct kvm_pmu_event_filter __user *uaddr;
struct kvm_pmu_event_filter filter;
int nr_events;
nr_events = kvm_pmu_event_mask(kvm) + 1;
/*
* Allow userspace to specify an event filter for the entire
* event range supported by PMUVer of the hardware, rather
* than the guest's PMUVer for KVM backward compatibility.
*/
nr_events = __kvm_pmu_event_mask(pmuver) + 1;
uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;

View File

@ -236,3 +236,21 @@ bool kvm_set_pmuserenr(u64 val)
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val;
return true;
}
/*
* If we interrupted the guest to update the host PMU context, make
* sure we re-apply the guest EL0 state.
*/
void kvm_vcpu_pmu_resync_el0(void)
{
struct kvm_vcpu *vcpu;
if (!has_vhe() || !in_interrupt())
return;
vcpu = kvm_get_running_vcpu();
if (!vcpu)
return;
kvm_make_request(KVM_REQ_RESYNC_PMU_EL0, vcpu);
}

View File

@ -772,6 +772,8 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
kvm_vcpu_pmu_resync_el0();
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)

View File

@ -12,7 +12,7 @@
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
#ifdef CONFIG_HW_PERF_EVENTS
#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
@ -74,6 +74,7 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
struct kvm_pmu_events *kvm_get_pmu_events(void);
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_resync_el0(void);
#define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
@ -171,6 +172,7 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
{
return 0;
}
static inline void kvm_vcpu_pmu_resync_el0(void) {}
#endif