Merge branch kvm-arm64/configurable-id-regs into kvmarm/next
* kvm-arm64/configurable-id-regs: : Configurable ID register infrastructure, courtesy of Jing Zhang : : Create generalized infrastructure for allowing userspace to select the : supported feature set for a VM, so long as the feature set is a subset : of what hardware + KVM allows. This does not add any new features that : are user-configurable, and instead focuses on the necessary refactoring : to enable future work. : : As a consequence of the series, feature asymmetry is now deliberately : disallowed for KVM. It is unlikely that VMMs ever configured VMs with : asymmetry, nor does it align with the kernel's overall stance that : features must be uniform across all cores in the system. : : Furthermore, KVM incorrectly advertised an IMP_DEF PMU to guests for : some time. Migrations from affected kernels was supported by explicitly : allowing such an ID register value from userspace, and forwarding that : along to the guest. KVM now allows an IMP_DEF PMU version to be restored : through the ID register interface, but reinterprets the user value as : not implemented (0). KVM: arm64: Rip out the vestiges of the 'old' ID register scheme KVM: arm64: Handle ID register reads using the VM-wide values KVM: arm64: Use generic sanitisation for ID_AA64PFR0_EL1 KVM: arm64: Use generic sanitisation for ID_(AA64)DFR0_EL1 KVM: arm64: Use arm64_ftr_bits to sanitise ID register writes KVM: arm64: Save ID registers' sanitized value per guest KVM: arm64: Reuse fields of sys_reg_desc for idreg KVM: arm64: Rewrite IMPDEF PMU version as NI KVM: arm64: Make vCPU feature flags consistent VM-wide KVM: arm64: Relax invariance of KVM_ARM_VCPU_POWER_OFF KVM: arm64: Separate out feature sanitisation and initialisation Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
commit
89a734b54c
@ -918,6 +918,7 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
|
||||
return 8;
|
||||
}
|
||||
|
||||
s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
|
||||
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
|
||||
|
||||
extern struct arm64_ftr_override id_aa64mmfr1_override;
|
||||
|
@ -62,12 +62,7 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
||||
#else
|
||||
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
|
||||
&kvm->arch.flags));
|
||||
|
||||
return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
|
||||
return test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -39,6 +39,7 @@
|
||||
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
|
||||
|
||||
#define KVM_VCPU_MAX_FEATURES 7
|
||||
#define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
|
||||
|
||||
#define KVM_REQ_SLEEP \
|
||||
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
@ -229,25 +230,23 @@ struct kvm_arch {
|
||||
#define KVM_ARCH_FLAG_MTE_ENABLED 1
|
||||
/* At least one vCPU has ran in the VM */
|
||||
#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
|
||||
/*
|
||||
* The following two bits are used to indicate the guest's EL1
|
||||
* register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
|
||||
* bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
|
||||
* Otherwise, the guest's EL1 register width has not yet been
|
||||
* determined yet.
|
||||
*/
|
||||
#define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3
|
||||
#define KVM_ARCH_FLAG_EL1_32BIT 4
|
||||
/* The vCPU feature set for the VM is configured */
|
||||
#define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3
|
||||
/* PSCI SYSTEM_SUSPEND enabled for the guest */
|
||||
#define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5
|
||||
#define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4
|
||||
/* VM counter offset */
|
||||
#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 6
|
||||
#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5
|
||||
/* Timer PPIs made immutable */
|
||||
#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 7
|
||||
#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
|
||||
/* SMCCC filter initialized for the VM */
|
||||
#define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED 8
|
||||
#define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED 7
|
||||
/* Initial ID reg values loaded */
|
||||
#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 8
|
||||
unsigned long flags;
|
||||
|
||||
/* VM-wide vCPU feature set */
|
||||
DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
/*
|
||||
* VM-wide PMU filter, implemented as a bitmap and big enough for
|
||||
* up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
|
||||
@ -257,17 +256,23 @@ struct kvm_arch {
|
||||
|
||||
cpumask_var_t supported_cpus;
|
||||
|
||||
u8 pfr0_csv2;
|
||||
u8 pfr0_csv3;
|
||||
struct {
|
||||
u8 imp:4;
|
||||
u8 unimp:4;
|
||||
} dfr0_pmuver;
|
||||
|
||||
/* Hypercall features firmware registers' descriptor */
|
||||
struct kvm_smccc_features smccc_feat;
|
||||
struct maple_tree smccc_filter;
|
||||
|
||||
/*
|
||||
* Emulated CPU ID registers per VM
|
||||
* (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it
|
||||
* is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
|
||||
*
|
||||
* These emulated idregs are VM-wide, but accessed from the context of a vCPU.
|
||||
* Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
|
||||
*/
|
||||
#define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
|
||||
#define IDREG(kvm, id) ((kvm)->arch.id_regs[IDREG_IDX(id)])
|
||||
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
|
||||
u64 id_regs[KVM_ARM_ID_REG_NUM];
|
||||
|
||||
/*
|
||||
* For an untrusted host VM, 'pkvm.handle' is used to lookup
|
||||
* the associated pKVM instance in the hypervisor.
|
||||
|
@ -800,7 +800,7 @@ static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
|
||||
return reg;
|
||||
}
|
||||
|
||||
static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
|
||||
s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
|
||||
s64 cur)
|
||||
{
|
||||
s64 ret = 0;
|
||||
|
@ -121,22 +121,6 @@ static int kvm_arm_default_max_vcpus(void)
|
||||
return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
|
||||
}
|
||||
|
||||
static void set_default_spectre(struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* The default is to expose CSV2 == 1 if the HW isn't affected.
|
||||
* Although this is a per-CPU feature, we make it global because
|
||||
* asymmetric systems are just a nuisance.
|
||||
*
|
||||
* Userspace can override this as long as it doesn't promise
|
||||
* the impossible.
|
||||
*/
|
||||
if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
|
||||
kvm->arch.pfr0_csv2 = 1;
|
||||
if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
|
||||
kvm->arch.pfr0_csv3 = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arch_init_vm - initializes a VM data structure
|
||||
* @kvm: pointer to the KVM struct
|
||||
@ -180,14 +164,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
/* The maximum number of VCPUs is limited by the host's GIC model */
|
||||
kvm->max_vcpus = kvm_arm_default_max_vcpus();
|
||||
|
||||
set_default_spectre(kvm);
|
||||
kvm_arm_init_hypercalls(kvm);
|
||||
|
||||
/*
|
||||
* Initialise the default PMUver before there is a chance to
|
||||
* create an actual PMU.
|
||||
*/
|
||||
kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();
|
||||
bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1195,58 +1174,115 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_vcpu_init *init)
|
||||
static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_vcpu_init *init)
|
||||
{
|
||||
unsigned int i, ret;
|
||||
u32 phys_target = kvm_target_cpu();
|
||||
unsigned long features = init->features[0];
|
||||
int i;
|
||||
|
||||
if (init->target != phys_target)
|
||||
return -EINVAL;
|
||||
if (features & ~KVM_VCPU_VALID_FEATURES)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
|
||||
* use the same target.
|
||||
*/
|
||||
if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
|
||||
return -EINVAL;
|
||||
|
||||
/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
|
||||
for (i = 0; i < sizeof(init->features) * 8; i++) {
|
||||
bool set = (init->features[i / 32] & (1 << (i % 32)));
|
||||
|
||||
if (set && i >= KVM_VCPU_MAX_FEATURES)
|
||||
for (i = 1; i < ARRAY_SIZE(init->features); i++) {
|
||||
if (init->features[i])
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
|
||||
* use the same feature set.
|
||||
*/
|
||||
if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
|
||||
test_bit(i, vcpu->arch.features) != set)
|
||||
return -EINVAL;
|
||||
|
||||
if (set)
|
||||
set_bit(i, vcpu->arch.features);
|
||||
}
|
||||
|
||||
vcpu->arch.target = phys_target;
|
||||
if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
|
||||
return 0;
|
||||
|
||||
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
|
||||
return -EINVAL;
|
||||
|
||||
/* MTE is incompatible with AArch32 */
|
||||
if (kvm_has_mte(vcpu->kvm))
|
||||
return -EINVAL;
|
||||
|
||||
/* NV is incompatible with AArch32 */
|
||||
if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_vcpu_init *init)
|
||||
{
|
||||
unsigned long features = init->features[0];
|
||||
|
||||
return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES) ||
|
||||
vcpu->arch.target != init->target;
|
||||
}
|
||||
|
||||
static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_vcpu_init *init)
|
||||
{
|
||||
unsigned long features = init->features[0];
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
int ret = -EINVAL;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
|
||||
if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
|
||||
!bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES))
|
||||
goto out_unlock;
|
||||
|
||||
vcpu->arch.target = init->target;
|
||||
bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
/* Now we know what it is, we can reset it. */
|
||||
ret = kvm_reset_vcpu(vcpu);
|
||||
if (ret) {
|
||||
vcpu->arch.target = -1;
|
||||
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
|
||||
set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_vcpu_init *init)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (init->target != kvm_target_cpu())
|
||||
return -EINVAL;
|
||||
|
||||
ret = kvm_vcpu_init_check_features(vcpu, init);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (vcpu->arch.target == -1)
|
||||
return __kvm_vcpu_set_target(vcpu, init);
|
||||
|
||||
if (kvm_vcpu_init_changed(vcpu, init))
|
||||
return -EINVAL;
|
||||
|
||||
return kvm_reset_vcpu(vcpu);
|
||||
}
|
||||
|
||||
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
|
||||
struct kvm_vcpu_init *init)
|
||||
{
|
||||
bool power_off = false;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid
|
||||
* reflecting it in the finalized feature set, thus limiting its scope
|
||||
* to a single KVM_ARM_VCPU_INIT call.
|
||||
*/
|
||||
if (init->features[0] & KVM_ARM_VCPU_POWER_OFF) {
|
||||
init->features[0] &= ~KVM_ARM_VCPU_POWER_OFF;
|
||||
power_off = true;
|
||||
}
|
||||
|
||||
ret = kvm_vcpu_set_target(vcpu, init);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1275,7 +1311,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
spin_lock(&vcpu->arch.mp_state_lock);
|
||||
|
||||
if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
|
||||
if (power_off)
|
||||
__kvm_arm_vcpu_power_off(vcpu);
|
||||
else
|
||||
WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
|
||||
|
@ -186,57 +186,6 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_set_vm_width() - set the register width for the guest
|
||||
* @vcpu: Pointer to the vcpu being configured
|
||||
*
|
||||
* Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
|
||||
* in the VM flags based on the vcpu's requested register width, the HW
|
||||
* capabilities and other options (such as MTE).
|
||||
* When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
|
||||
* consistent with the value of the FLAG_EL1_32BIT bit in the flags.
|
||||
*
|
||||
* Return: 0 on success, negative error code on failure.
|
||||
*/
|
||||
static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
bool is32bit;
|
||||
|
||||
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
|
||||
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
|
||||
if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
|
||||
/*
|
||||
* The guest's register width is already configured.
|
||||
* Make sure that the vcpu is consistent with it.
|
||||
*/
|
||||
if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
|
||||
return -EINVAL;
|
||||
|
||||
/* MTE is incompatible with AArch32 */
|
||||
if (kvm_has_mte(kvm) && is32bit)
|
||||
return -EINVAL;
|
||||
|
||||
/* NV is incompatible with AArch32 */
|
||||
if (vcpu_has_nv(vcpu) && is32bit)
|
||||
return -EINVAL;
|
||||
|
||||
if (is32bit)
|
||||
set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
|
||||
|
||||
set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
|
||||
* @vcpu: The VCPU pointer
|
||||
@ -262,13 +211,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
bool loaded;
|
||||
u32 pstate;
|
||||
|
||||
mutex_lock(&vcpu->kvm->arch.config_lock);
|
||||
ret = kvm_set_vm_width(vcpu);
|
||||
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock(&vcpu->arch.mp_state_lock);
|
||||
reset_state = vcpu->arch.reset_state;
|
||||
vcpu->arch.reset_state.reset = false;
|
||||
|
@ -42,6 +42,8 @@
|
||||
*/
|
||||
|
||||
static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
|
||||
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
u64 val);
|
||||
|
||||
static bool read_from_write_only(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *params,
|
||||
@ -540,10 +542,11 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void reset_bvr(struct kvm_vcpu *vcpu,
|
||||
static u64 reset_bvr(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
|
||||
return rd->val;
|
||||
}
|
||||
|
||||
static bool trap_bcr(struct kvm_vcpu *vcpu,
|
||||
@ -576,10 +579,11 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void reset_bcr(struct kvm_vcpu *vcpu,
|
||||
static u64 reset_bcr(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
|
||||
return rd->val;
|
||||
}
|
||||
|
||||
static bool trap_wvr(struct kvm_vcpu *vcpu,
|
||||
@ -613,10 +617,11 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void reset_wvr(struct kvm_vcpu *vcpu,
|
||||
static u64 reset_wvr(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
|
||||
return rd->val;
|
||||
}
|
||||
|
||||
static bool trap_wcr(struct kvm_vcpu *vcpu,
|
||||
@ -649,25 +654,28 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void reset_wcr(struct kvm_vcpu *vcpu,
|
||||
static u64 reset_wcr(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
|
||||
return rd->val;
|
||||
}
|
||||
|
||||
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 amair = read_sysreg(amair_el1);
|
||||
vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
|
||||
return amair;
|
||||
}
|
||||
|
||||
static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 actlr = read_sysreg(actlr_el1);
|
||||
vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
|
||||
return actlr;
|
||||
}
|
||||
|
||||
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 mpidr;
|
||||
|
||||
@ -681,7 +689,10 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
|
||||
mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
|
||||
mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
|
||||
vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
|
||||
mpidr |= (1ULL << 31);
|
||||
vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
|
||||
|
||||
return mpidr;
|
||||
}
|
||||
|
||||
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
|
||||
@ -693,13 +704,13 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
|
||||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
|
||||
/* No PMU available, any PMU reg may UNDEF... */
|
||||
if (!kvm_arm_support_pmu_v3())
|
||||
return;
|
||||
return 0;
|
||||
|
||||
n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
|
||||
n &= ARMV8_PMU_PMCR_N_MASK;
|
||||
@ -708,33 +719,41 @@ static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= mask;
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 pmcr;
|
||||
|
||||
/* No PMU available, PMCR_EL0 may UNDEF... */
|
||||
if (!kvm_arm_support_pmu_v3())
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* Only preserve PMCR_EL0.N, and reset the rest to 0 */
|
||||
pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
|
||||
@ -742,6 +761,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
pmcr |= ARMV8_PMU_PMCR_LC;
|
||||
|
||||
__vcpu_sys_reg(vcpu, r->reg) = pmcr;
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
|
||||
@ -1174,25 +1195,89 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
|
||||
return true;
|
||||
}
|
||||
|
||||
static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
|
||||
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
|
||||
s64 new, s64 cur)
|
||||
{
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
return vcpu->kvm->arch.dfr0_pmuver.imp;
|
||||
struct arm64_ftr_bits kvm_ftr = *ftrp;
|
||||
|
||||
return vcpu->kvm->arch.dfr0_pmuver.unimp;
|
||||
/* Some features have different safe value type in KVM than host features */
|
||||
switch (id) {
|
||||
case SYS_ID_AA64DFR0_EL1:
|
||||
if (kvm_ftr.shift == ID_AA64DFR0_EL1_PMUVer_SHIFT)
|
||||
kvm_ftr.type = FTR_LOWER_SAFE;
|
||||
break;
|
||||
case SYS_ID_DFR0_EL1:
|
||||
if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
|
||||
kvm_ftr.type = FTR_LOWER_SAFE;
|
||||
break;
|
||||
}
|
||||
|
||||
return arm64_ftr_safe_value(&kvm_ftr, new, cur);
|
||||
}
|
||||
|
||||
static u8 perfmon_to_pmuver(u8 perfmon)
|
||||
/**
|
||||
* arm64_check_features() - Check if a feature register value constitutes
|
||||
* a subset of features indicated by the idreg's KVM sanitised limit.
|
||||
*
|
||||
* This function will check if each feature field of @val is the "safe" value
|
||||
* against idreg's KVM sanitised limit return from reset() callback.
|
||||
* If a field value in @val is the same as the one in limit, it is always
|
||||
* considered the safe value regardless For register fields that are not in
|
||||
* writable, only the value in limit is considered the safe value.
|
||||
*
|
||||
* Return: 0 if all the fields are safe. Otherwise, return negative errno.
|
||||
*/
|
||||
static int arm64_check_features(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
u64 val)
|
||||
{
|
||||
switch (perfmon) {
|
||||
case ID_DFR0_EL1_PerfMon_PMUv3:
|
||||
return ID_AA64DFR0_EL1_PMUVer_IMP;
|
||||
case ID_DFR0_EL1_PerfMon_IMPDEF:
|
||||
return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
|
||||
default:
|
||||
/* Anything ARMv8.1+ and NI have the same value. For now. */
|
||||
return perfmon;
|
||||
const struct arm64_ftr_reg *ftr_reg;
|
||||
const struct arm64_ftr_bits *ftrp = NULL;
|
||||
u32 id = reg_to_encoding(rd);
|
||||
u64 writable_mask = rd->val;
|
||||
u64 limit = rd->reset(vcpu, rd);
|
||||
u64 mask = 0;
|
||||
|
||||
/*
|
||||
* Hidden and unallocated ID registers may not have a corresponding
|
||||
* struct arm64_ftr_reg. Of course, if the register is RAZ we know the
|
||||
* only safe value is 0.
|
||||
*/
|
||||
if (sysreg_visible_as_raz(vcpu, rd))
|
||||
return val ? -E2BIG : 0;
|
||||
|
||||
ftr_reg = get_arm64_ftr_reg(id);
|
||||
if (!ftr_reg)
|
||||
return -EINVAL;
|
||||
|
||||
ftrp = ftr_reg->ftr_bits;
|
||||
|
||||
for (; ftrp && ftrp->width; ftrp++) {
|
||||
s64 f_val, f_lim, safe_val;
|
||||
u64 ftr_mask;
|
||||
|
||||
ftr_mask = arm64_ftr_mask(ftrp);
|
||||
if ((ftr_mask & writable_mask) != ftr_mask)
|
||||
continue;
|
||||
|
||||
f_val = arm64_ftr_value(ftrp, val);
|
||||
f_lim = arm64_ftr_value(ftrp, limit);
|
||||
mask |= ftr_mask;
|
||||
|
||||
if (f_val == f_lim)
|
||||
safe_val = f_val;
|
||||
else
|
||||
safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
|
||||
|
||||
if (safe_val != f_val)
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
/* For fields that are not writable, values in limit are the safe values. */
|
||||
if ((val & ~mask) != (limit & ~mask))
|
||||
return -E2BIG;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u8 pmuver_to_perfmon(u8 pmuver)
|
||||
@ -1209,7 +1294,8 @@ static u8 pmuver_to_perfmon(u8 pmuver)
|
||||
}
|
||||
|
||||
/* Read a sanitised cpufeature ID register by sys_reg_desc */
|
||||
static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
|
||||
static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u32 id = reg_to_encoding(r);
|
||||
u64 val;
|
||||
@ -1220,19 +1306,6 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
|
||||
val = read_sanitised_ftr_reg(id);
|
||||
|
||||
switch (id) {
|
||||
case SYS_ID_AA64PFR0_EL1:
|
||||
if (!vcpu_has_sve(vcpu))
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
|
||||
if (kvm_vgic_global_state.type == VGIC_V3) {
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
|
||||
}
|
||||
break;
|
||||
case SYS_ID_AA64PFR1_EL1:
|
||||
if (!kvm_has_mte(vcpu->kvm))
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
|
||||
@ -1253,22 +1326,6 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
|
||||
if (!cpus_have_final_cap(ARM64_HAS_WFXT))
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
|
||||
break;
|
||||
case SYS_ID_AA64DFR0_EL1:
|
||||
/* Limit debug to ARMv8.0 */
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
|
||||
/* Set PMUver to the required version */
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
|
||||
vcpu_pmuver(vcpu));
|
||||
/* Hide SPE from guests */
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
|
||||
break;
|
||||
case SYS_ID_DFR0_EL1:
|
||||
val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
|
||||
pmuver_to_perfmon(vcpu_pmuver(vcpu)));
|
||||
break;
|
||||
case SYS_ID_AA64MMFR2_EL1:
|
||||
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
|
||||
break;
|
||||
@ -1280,6 +1337,28 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
|
||||
return val;
|
||||
}
|
||||
|
||||
static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
return __kvm_read_sanitised_id_reg(vcpu, r);
|
||||
}
|
||||
|
||||
static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
return IDREG(vcpu->kvm, reg_to_encoding(r));
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
|
||||
* (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
|
||||
*/
|
||||
static inline bool is_id_reg(u32 id)
|
||||
{
|
||||
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
|
||||
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
|
||||
sys_reg_CRm(id) < 8);
|
||||
}
|
||||
|
||||
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
@ -1341,88 +1420,113 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
|
||||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
u64 val)
|
||||
static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
u8 csv2, csv3;
|
||||
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
if (!vcpu_has_sve(vcpu))
|
||||
val &= ~ID_AA64PFR0_EL1_SVE_MASK;
|
||||
|
||||
/*
|
||||
* Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
|
||||
* it doesn't promise more than what is actually provided (the
|
||||
* guest could otherwise be covered in ectoplasmic residue).
|
||||
* The default is to expose CSV2 == 1 if the HW isn't affected.
|
||||
* Although this is a per-CPU feature, we make it global because
|
||||
* asymmetric systems are just a nuisance.
|
||||
*
|
||||
* Userspace can override this as long as it doesn't promise
|
||||
* the impossible.
|
||||
*/
|
||||
csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
|
||||
if (csv2 > 1 ||
|
||||
(csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
|
||||
return -EINVAL;
|
||||
if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
|
||||
val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
|
||||
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
|
||||
}
|
||||
if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
|
||||
val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
|
||||
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
|
||||
}
|
||||
|
||||
/* Same thing for CSV3 */
|
||||
csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
|
||||
if (csv3 > 1 ||
|
||||
(csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
|
||||
return -EINVAL;
|
||||
if (kvm_vgic_global_state.type == VGIC_V3) {
|
||||
val &= ~ID_AA64PFR0_EL1_GIC_MASK;
|
||||
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
|
||||
}
|
||||
|
||||
/* We can only differ with CSV[23], and anything else is an error */
|
||||
val ^= read_id_reg(vcpu, rd);
|
||||
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
|
||||
if (val)
|
||||
return -EINVAL;
|
||||
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
|
||||
|
||||
vcpu->kvm->arch.pfr0_csv2 = csv2;
|
||||
vcpu->kvm->arch.pfr0_csv3 = csv3;
|
||||
return val;
|
||||
}
|
||||
|
||||
return 0;
|
||||
static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
|
||||
|
||||
/* Limit debug to ARMv8.0 */
|
||||
val &= ~ID_AA64DFR0_EL1_DebugVer_MASK;
|
||||
val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DebugVer, IMP);
|
||||
|
||||
/*
|
||||
* Only initialize the PMU version if the vCPU was configured with one.
|
||||
*/
|
||||
val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
|
||||
kvm_arm_pmu_get_pmuver_limit());
|
||||
|
||||
/* Hide SPE from guests */
|
||||
val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
u64 val)
|
||||
{
|
||||
u8 pmuver, host_pmuver;
|
||||
bool valid_pmu;
|
||||
|
||||
host_pmuver = kvm_arm_pmu_get_pmuver_limit();
|
||||
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
|
||||
|
||||
/*
|
||||
* Allow AA64DFR0_EL1.PMUver to be set from userspace as long
|
||||
* as it doesn't promise more than what the HW gives us. We
|
||||
* allow an IMPDEF PMU though, only if no PMU is supported
|
||||
* (KVM backward compatibility handling).
|
||||
* Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
|
||||
* ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
|
||||
* exposed an IMP_DEF PMU to userspace and the guest on systems w/
|
||||
* non-architectural PMUs. Of course, PMUv3 is the only game in town for
|
||||
* PMU virtualization, so the IMP_DEF value was rather user-hostile.
|
||||
*
|
||||
* At minimum, we're on the hook to allow values that were given to
|
||||
* userspace by KVM. Cover our tracks here and replace the IMP_DEF value
|
||||
* with a more sensible NI. The value of an ID register changing under
|
||||
* the nose of the guest is unfortunate, but is certainly no more
|
||||
* surprising than an ill-guided PMU driver poking at impdef system
|
||||
* registers that end in an UNDEF...
|
||||
*/
|
||||
pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
|
||||
if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
|
||||
return -EINVAL;
|
||||
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
|
||||
val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
|
||||
|
||||
valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
|
||||
return set_id_reg(vcpu, rd, val);
|
||||
}
|
||||
|
||||
/* Make sure view register and PMU support do match */
|
||||
if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
|
||||
return -EINVAL;
|
||||
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
|
||||
u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
|
||||
|
||||
/* We can only differ with PMUver, and anything else is an error */
|
||||
val ^= read_id_reg(vcpu, rd);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
|
||||
if (val)
|
||||
return -EINVAL;
|
||||
val &= ~ID_DFR0_EL1_PerfMon_MASK;
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
|
||||
|
||||
if (valid_pmu)
|
||||
vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
|
||||
else
|
||||
vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
|
||||
|
||||
return 0;
|
||||
return val;
|
||||
}
|
||||
|
||||
static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
u64 val)
|
||||
{
|
||||
u8 perfmon, host_perfmon;
|
||||
bool valid_pmu;
|
||||
u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
|
||||
|
||||
host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
|
||||
if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
|
||||
val &= ~ID_DFR0_EL1_PerfMon_MASK;
|
||||
perfmon = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow DFR0_EL1.PerfMon to be set from userspace as long as
|
||||
@ -1430,29 +1534,10 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
|
||||
* AArch64 side (as everything is emulated with that), and
|
||||
* that this is a PMUv3.
|
||||
*/
|
||||
perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), val);
|
||||
if ((perfmon != ID_DFR0_EL1_PerfMon_IMPDEF && perfmon > host_perfmon) ||
|
||||
(perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3))
|
||||
if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
|
||||
return -EINVAL;
|
||||
|
||||
valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_EL1_PerfMon_IMPDEF);
|
||||
|
||||
/* Make sure view register and PMU support do match */
|
||||
if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
|
||||
return -EINVAL;
|
||||
|
||||
/* We can only differ with PerfMon, and anything else is an error */
|
||||
val ^= read_id_reg(vcpu, rd);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
|
||||
if (val)
|
||||
return -EINVAL;
|
||||
|
||||
if (valid_pmu)
|
||||
vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
|
||||
else
|
||||
vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
|
||||
|
||||
return 0;
|
||||
return set_id_reg(vcpu, rd, val);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1465,18 +1550,60 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
|
||||
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
u64 *val)
|
||||
{
|
||||
/*
|
||||
* Avoid locking if the VM has already started, as the ID registers are
|
||||
* guaranteed to be invariant at that point.
|
||||
*/
|
||||
if (kvm_vm_has_ran_once(vcpu->kvm)) {
|
||||
*val = read_id_reg(vcpu, rd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&vcpu->kvm->arch.config_lock);
|
||||
*val = read_id_reg(vcpu, rd);
|
||||
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
u64 val)
|
||||
{
|
||||
/* This is what we mean by invariant: you can't change it. */
|
||||
if (val != read_id_reg(vcpu, rd))
|
||||
return -EINVAL;
|
||||
u32 id = reg_to_encoding(rd);
|
||||
int ret;
|
||||
|
||||
return 0;
|
||||
mutex_lock(&vcpu->kvm->arch.config_lock);
|
||||
|
||||
/*
|
||||
* Once the VM has started the ID registers are immutable. Reject any
|
||||
* write that does not match the final register value.
|
||||
*/
|
||||
if (kvm_vm_has_ran_once(vcpu->kvm)) {
|
||||
if (val != read_id_reg(vcpu, rd))
|
||||
ret = -EBUSY;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = arm64_check_features(vcpu, rd, val);
|
||||
if (!ret)
|
||||
IDREG(vcpu->kvm, id) = val;
|
||||
|
||||
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
||||
|
||||
/*
|
||||
* arm64_check_features() returns -E2BIG to indicate the register's
|
||||
* feature set is a superset of the maximally-allowed register value.
|
||||
* While it would be nice to precisely describe this to userspace, the
|
||||
* existing UAPI for KVM_SET_ONE_REG has it that invalid register
|
||||
* writes return -EINVAL.
|
||||
*/
|
||||
if (ret == -E2BIG)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
@ -1516,7 +1643,7 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
* Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
|
||||
* by the physical CPU which the vcpu currently resides in.
|
||||
*/
|
||||
static void reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
|
||||
u64 clidr;
|
||||
@ -1564,6 +1691,8 @@ static void reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
|
||||
|
||||
__vcpu_sys_reg(vcpu, r->reg) = clidr;
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
@ -1663,6 +1792,17 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
|
||||
.visibility = elx2_visibility, \
|
||||
}
|
||||
|
||||
/*
|
||||
* Since reset() callback and field val are not used for idregs, they will be
|
||||
* used for specific purposes for idregs.
|
||||
* The reset() would return KVM sanitised register value. The value would be the
|
||||
* same as the host kernel sanitised value if there is no KVM sanitisation.
|
||||
* The val would be used as a mask indicating writable fields for the idreg.
|
||||
* Only bits with 1 are writable from userspace. This mask might not be
|
||||
* necessary in the future whenever all ID registers are enabled as writable
|
||||
* from userspace.
|
||||
*/
|
||||
|
||||
/* sys_reg_desc initialiser for known cpufeature ID registers */
|
||||
#define ID_SANITISED(name) { \
|
||||
SYS_DESC(SYS_##name), \
|
||||
@ -1670,6 +1810,8 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
|
||||
.get_user = get_id_reg, \
|
||||
.set_user = set_id_reg, \
|
||||
.visibility = id_visibility, \
|
||||
.reset = kvm_read_sanitised_id_reg, \
|
||||
.val = 0, \
|
||||
}
|
||||
|
||||
/* sys_reg_desc initialiser for known cpufeature ID registers */
|
||||
@ -1679,6 +1821,8 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
|
||||
.get_user = get_id_reg, \
|
||||
.set_user = set_id_reg, \
|
||||
.visibility = aa32_id_visibility, \
|
||||
.reset = kvm_read_sanitised_id_reg, \
|
||||
.val = 0, \
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1691,7 +1835,9 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
|
||||
.access = access_id_reg, \
|
||||
.get_user = get_id_reg, \
|
||||
.set_user = set_id_reg, \
|
||||
.visibility = raz_visibility \
|
||||
.visibility = raz_visibility, \
|
||||
.reset = kvm_read_sanitised_id_reg, \
|
||||
.val = 0, \
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1705,6 +1851,8 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
|
||||
.get_user = get_id_reg, \
|
||||
.set_user = set_id_reg, \
|
||||
.visibility = raz_visibility, \
|
||||
.reset = kvm_read_sanitised_id_reg, \
|
||||
.val = 0, \
|
||||
}
|
||||
|
||||
static bool access_sp_el1(struct kvm_vcpu *vcpu,
|
||||
@ -1806,9 +1954,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
/* CRm=1 */
|
||||
AA32_ID_SANITISED(ID_PFR0_EL1),
|
||||
AA32_ID_SANITISED(ID_PFR1_EL1),
|
||||
{ SYS_DESC(SYS_ID_DFR0_EL1), .access = access_id_reg,
|
||||
.get_user = get_id_reg, .set_user = set_id_dfr0_el1,
|
||||
.visibility = aa32_id_visibility, },
|
||||
{ SYS_DESC(SYS_ID_DFR0_EL1),
|
||||
.access = access_id_reg,
|
||||
.get_user = get_id_reg,
|
||||
.set_user = set_id_dfr0_el1,
|
||||
.visibility = aa32_id_visibility,
|
||||
.reset = read_sanitised_id_dfr0_el1,
|
||||
.val = ID_DFR0_EL1_PerfMon_MASK, },
|
||||
ID_HIDDEN(ID_AFR0_EL1),
|
||||
AA32_ID_SANITISED(ID_MMFR0_EL1),
|
||||
AA32_ID_SANITISED(ID_MMFR1_EL1),
|
||||
@ -1837,8 +1989,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
|
||||
/* AArch64 ID registers */
|
||||
/* CRm=4 */
|
||||
{ SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
|
||||
.get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
|
||||
{ SYS_DESC(SYS_ID_AA64PFR0_EL1),
|
||||
.access = access_id_reg,
|
||||
.get_user = get_id_reg,
|
||||
.set_user = set_id_reg,
|
||||
.reset = read_sanitised_id_aa64pfr0_el1,
|
||||
.val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
|
||||
ID_SANITISED(ID_AA64PFR1_EL1),
|
||||
ID_UNALLOCATED(4,2),
|
||||
ID_UNALLOCATED(4,3),
|
||||
@ -1848,8 +2004,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
ID_UNALLOCATED(4,7),
|
||||
|
||||
/* CRm=5 */
|
||||
{ SYS_DESC(SYS_ID_AA64DFR0_EL1), .access = access_id_reg,
|
||||
.get_user = get_id_reg, .set_user = set_id_aa64dfr0_el1, },
|
||||
{ SYS_DESC(SYS_ID_AA64DFR0_EL1),
|
||||
.access = access_id_reg,
|
||||
.get_user = get_id_reg,
|
||||
.set_user = set_id_aa64dfr0_el1,
|
||||
.reset = read_sanitised_id_aa64dfr0_el1,
|
||||
.val = ID_AA64DFR0_EL1_PMUVer_MASK, },
|
||||
ID_SANITISED(ID_AA64DFR1_EL1),
|
||||
ID_UNALLOCATED(5,2),
|
||||
ID_UNALLOCATED(5,3),
|
||||
@ -2237,6 +2397,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
|
||||
};
|
||||
|
||||
static const struct sys_reg_desc *first_idreg;
|
||||
|
||||
static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
@ -2927,6 +3089,28 @@ static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
|
||||
return false;
|
||||
}
|
||||
|
||||
static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const struct sys_reg_desc *idreg = first_idreg;
|
||||
u32 id = reg_to_encoding(idreg);
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
|
||||
return;
|
||||
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
|
||||
/* Initialize all idregs */
|
||||
while (is_id_reg(id)) {
|
||||
IDREG(kvm, id) = idreg->reset(vcpu, idreg);
|
||||
|
||||
idreg++;
|
||||
id = reg_to_encoding(idreg);
|
||||
}
|
||||
|
||||
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_reset_sys_regs - sets system registers to reset value
|
||||
* @vcpu: The VCPU pointer
|
||||
@ -2938,9 +3122,17 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
|
||||
if (sys_reg_descs[i].reset)
|
||||
sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
|
||||
kvm_reset_id_regs(vcpu);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
|
||||
const struct sys_reg_desc *r = &sys_reg_descs[i];
|
||||
|
||||
if (is_id_reg(reg_to_encoding(r)))
|
||||
continue;
|
||||
|
||||
if (r->reset)
|
||||
r->reset(vcpu, r);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3041,19 +3233,21 @@ id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
|
||||
*/
|
||||
|
||||
#define FUNCTION_INVARIANT(reg) \
|
||||
static void get_##reg(struct kvm_vcpu *v, \
|
||||
static u64 get_##reg(struct kvm_vcpu *v, \
|
||||
const struct sys_reg_desc *r) \
|
||||
{ \
|
||||
((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
|
||||
return ((struct sys_reg_desc *)r)->val; \
|
||||
}
|
||||
|
||||
FUNCTION_INVARIANT(midr_el1)
|
||||
FUNCTION_INVARIANT(revidr_el1)
|
||||
FUNCTION_INVARIANT(aidr_el1)
|
||||
|
||||
static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
|
||||
static u64 get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
|
||||
{
|
||||
((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
|
||||
return ((struct sys_reg_desc *)r)->val;
|
||||
}
|
||||
|
||||
/* ->val is filled in by kvm_sys_reg_table_init() */
|
||||
@ -3345,6 +3539,7 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
|
||||
int __init kvm_sys_reg_table_init(void)
|
||||
{
|
||||
struct sys_reg_params params;
|
||||
bool valid = true;
|
||||
unsigned int i;
|
||||
|
||||
@ -3363,5 +3558,11 @@ int __init kvm_sys_reg_table_init(void)
|
||||
for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
|
||||
invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
|
||||
|
||||
/* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
|
||||
params = encoding_to_params(SYS_ID_PFR0_EL1);
|
||||
first_idreg = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
|
||||
if (!first_idreg)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -27,6 +27,13 @@ struct sys_reg_params {
|
||||
bool is_write;
|
||||
};
|
||||
|
||||
#define encoding_to_params(reg) \
|
||||
((struct sys_reg_params){ .Op0 = sys_reg_Op0(reg), \
|
||||
.Op1 = sys_reg_Op1(reg), \
|
||||
.CRn = sys_reg_CRn(reg), \
|
||||
.CRm = sys_reg_CRm(reg), \
|
||||
.Op2 = sys_reg_Op2(reg) })
|
||||
|
||||
#define esr_sys64_to_params(esr) \
|
||||
((struct sys_reg_params){ .Op0 = ((esr) >> 20) & 3, \
|
||||
.Op1 = ((esr) >> 14) & 0x7, \
|
||||
@ -64,13 +71,16 @@ struct sys_reg_desc {
|
||||
struct sys_reg_params *,
|
||||
const struct sys_reg_desc *);
|
||||
|
||||
/* Initialization for vcpu. */
|
||||
void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
|
||||
/*
|
||||
* Initialization for vcpu. Return initialized value, or KVM
|
||||
* sanitized value for ID registers.
|
||||
*/
|
||||
u64 (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
|
||||
|
||||
/* Index into sys_reg[], or 0 if we don't need to save it. */
|
||||
int reg;
|
||||
|
||||
/* Value (usually reset value) */
|
||||
/* Value (usually reset value), or write mask for idregs */
|
||||
u64 val;
|
||||
|
||||
/* Custom get/set_user functions, fallback to generic if NULL */
|
||||
@ -123,19 +133,21 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
/* Reset functions */
|
||||
static inline void reset_unknown(struct kvm_vcpu *vcpu,
|
||||
static inline u64 reset_unknown(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg >= NR_SYS_REGS);
|
||||
__vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
static inline u64 reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg >= NR_SYS_REGS);
|
||||
__vcpu_sys_reg(vcpu, r->reg) = r->val;
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
static inline unsigned int sysreg_visibility(const struct kvm_vcpu *vcpu,
|
||||
|
@ -92,8 +92,12 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
|
||||
/*
|
||||
* Evaluates as true when emulating PMUv3p5, and false otherwise.
|
||||
*/
|
||||
#define kvm_pmu_is_3p5(vcpu) \
|
||||
(vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)
|
||||
#define kvm_pmu_is_3p5(vcpu) ({ \
|
||||
u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); \
|
||||
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); \
|
||||
\
|
||||
pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5; \
|
||||
})
|
||||
|
||||
u8 kvm_arm_pmu_get_pmuver_limit(void);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user