Merge branch kvm-arm64/mpidr-reset into kvmarm-master/next
* kvm-arm64/mpidr-reset: : . : Fixes for CLIDR_EL1 and MPIDR_EL1 being accidentally mutable across : a vcpu reset, courtesy of Oliver. From the cover letter: : : "For VM-wide feature ID registers we ensure they get initialized once for : the lifetime of a VM. On the other hand, vCPU-local feature ID registers : get re-initialized on every vCPU reset, potentially clobbering the : values userspace set up. : : MPIDR_EL1 and CLIDR_EL1 are the only registers in this space that we : allow userspace to modify for now. Clobbering the value of MPIDR_EL1 has : some disastrous side effects as the compressed index used by the : MPIDR-to-vCPU lookup table assumes MPIDR_EL1 is immutable after KVM_RUN. : : Series + reproducer test case to address the problem of KVM wiping out : userspace changes to these registers. Note that there are still some : differences between VM and vCPU scoped feature ID registers from the : perspective of userspace. We do not allow the value of VM-scope : registers to change after KVM_RUN, but vCPU registers remain mutable." : . KVM: selftests: arm64: Test vCPU-scoped feature ID registers KVM: selftests: arm64: Test that feature ID regs survive a reset KVM: selftests: arm64: Store expected register value in set_id_regs KVM: selftests: arm64: Rename helper in set_id_regs to imply VM scope KVM: arm64: Only reset vCPU-scoped feature ID regs once KVM: arm64: Reset VM feature ID regs from kvm_reset_sys_regs() KVM: arm64: Rename is_id_reg() to imply VM scope Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
eaa46a28d5
@ -1318,6 +1318,8 @@ static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
|
||||
|
||||
#define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
|
||||
|
||||
#define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
|
||||
|
||||
int kvm_trng_call(struct kvm_vcpu *vcpu);
|
||||
#ifdef CONFIG_KVM
|
||||
extern phys_addr_t hyp_mem_base;
|
||||
|
@ -698,11 +698,6 @@ unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_flag(vcpu, VCPU_INITIALIZED);
|
||||
}
|
||||
|
||||
static void kvm_init_mpidr_data(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_mpidr_data *data = NULL;
|
||||
|
@ -1568,17 +1568,31 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r
|
||||
return IDREG(vcpu->kvm, reg_to_encoding(r));
|
||||
}
|
||||
|
||||
static bool is_feature_id_reg(u32 encoding)
|
||||
{
|
||||
return (sys_reg_Op0(encoding) == 3 &&
|
||||
(sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
|
||||
sys_reg_CRn(encoding) == 0 &&
|
||||
sys_reg_CRm(encoding) <= 7);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
|
||||
* (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
|
||||
* (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
|
||||
* registers KVM maintains on a per-VM basis.
|
||||
*/
|
||||
static inline bool is_id_reg(u32 id)
|
||||
static inline bool is_vm_ftr_id_reg(u32 id)
|
||||
{
|
||||
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
|
||||
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
|
||||
sys_reg_CRm(id) < 8);
|
||||
}
|
||||
|
||||
static inline bool is_vcpu_ftr_id_reg(u32 id)
|
||||
{
|
||||
return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
|
||||
}
|
||||
|
||||
static inline bool is_aa32_id_reg(u32 id)
|
||||
{
|
||||
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
|
||||
@ -3510,26 +3524,25 @@ void kvm_sys_regs_create_debugfs(struct kvm *kvm)
|
||||
&idregs_debug_fops);
|
||||
}
|
||||
|
||||
static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
|
||||
static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
|
||||
{
|
||||
const struct sys_reg_desc *idreg = first_idreg;
|
||||
u32 id = reg_to_encoding(idreg);
|
||||
u32 id = reg_to_encoding(reg);
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
|
||||
return;
|
||||
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
IDREG(kvm, id) = reg->reset(vcpu, reg);
|
||||
}
|
||||
|
||||
/* Initialize all idregs */
|
||||
while (is_id_reg(id)) {
|
||||
IDREG(kvm, id) = idreg->reset(vcpu, idreg);
|
||||
static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *reg)
|
||||
{
|
||||
if (kvm_vcpu_initialized(vcpu))
|
||||
return;
|
||||
|
||||
idreg++;
|
||||
id = reg_to_encoding(idreg);
|
||||
}
|
||||
|
||||
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
|
||||
reg->reset(vcpu, reg);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3541,19 +3554,24 @@ static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
unsigned long i;
|
||||
|
||||
kvm_reset_id_regs(vcpu);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
|
||||
const struct sys_reg_desc *r = &sys_reg_descs[i];
|
||||
|
||||
if (is_id_reg(reg_to_encoding(r)))
|
||||
if (!r->reset)
|
||||
continue;
|
||||
|
||||
if (r->reset)
|
||||
if (is_vm_ftr_id_reg(reg_to_encoding(r)))
|
||||
reset_vm_ftr_id_reg(vcpu, r);
|
||||
else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
|
||||
reset_vcpu_ftr_id_reg(vcpu, r);
|
||||
else
|
||||
r->reset(vcpu, r);
|
||||
}
|
||||
|
||||
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3979,14 +3997,6 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
sys_reg_CRm(r), \
|
||||
sys_reg_Op2(r))
|
||||
|
||||
static bool is_feature_id_reg(u32 encoding)
|
||||
{
|
||||
return (sys_reg_Op0(encoding) == 3 &&
|
||||
(sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
|
||||
sys_reg_CRn(encoding) == 0 &&
|
||||
sys_reg_CRm(encoding) <= 7);
|
||||
}
|
||||
|
||||
int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
|
||||
{
|
||||
const void *zero_page = page_to_virt(ZERO_PAGE(0));
|
||||
@ -4015,7 +4025,7 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *
|
||||
* compliant with a given revision of the architecture, but the
|
||||
* RES0/RES1 definitions allow us to do that.
|
||||
*/
|
||||
if (is_id_reg(encoding)) {
|
||||
if (is_vm_ftr_id_reg(encoding)) {
|
||||
if (!reg->val ||
|
||||
(is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0()))
|
||||
continue;
|
||||
|
@ -327,8 +327,8 @@ uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
|
||||
return ftr;
|
||||
}
|
||||
|
||||
static void test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
|
||||
const struct reg_ftr_bits *ftr_bits)
|
||||
static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
|
||||
const struct reg_ftr_bits *ftr_bits)
|
||||
{
|
||||
uint8_t shift = ftr_bits->shift;
|
||||
uint64_t mask = ftr_bits->mask;
|
||||
@ -346,6 +346,8 @@ static void test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
|
||||
vcpu_set_reg(vcpu, reg, val);
|
||||
vcpu_get_reg(vcpu, reg, &new_val);
|
||||
TEST_ASSERT_EQ(new_val, val);
|
||||
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
|
||||
@ -374,7 +376,15 @@ static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
|
||||
TEST_ASSERT_EQ(val, old_val);
|
||||
}
|
||||
|
||||
static void test_user_set_reg(struct kvm_vcpu *vcpu, bool aarch64_only)
|
||||
static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE];
|
||||
|
||||
#define encoding_to_range_idx(encoding) \
|
||||
KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(encoding), sys_reg_Op1(encoding), \
|
||||
sys_reg_CRn(encoding), sys_reg_CRm(encoding), \
|
||||
sys_reg_Op2(encoding))
|
||||
|
||||
|
||||
static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
|
||||
{
|
||||
uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
|
||||
struct reg_mask_range range = {
|
||||
@ -398,9 +408,7 @@ static void test_user_set_reg(struct kvm_vcpu *vcpu, bool aarch64_only)
|
||||
int idx;
|
||||
|
||||
/* Get the index to masks array for the idreg */
|
||||
idx = KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(reg_id), sys_reg_Op1(reg_id),
|
||||
sys_reg_CRn(reg_id), sys_reg_CRm(reg_id),
|
||||
sys_reg_Op2(reg_id));
|
||||
idx = encoding_to_range_idx(reg_id);
|
||||
|
||||
for (int j = 0; ftr_bits[j].type != FTR_END; j++) {
|
||||
/* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */
|
||||
@ -414,7 +422,9 @@ static void test_user_set_reg(struct kvm_vcpu *vcpu, bool aarch64_only)
|
||||
TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask);
|
||||
|
||||
test_reg_set_fail(vcpu, reg, &ftr_bits[j]);
|
||||
test_reg_set_success(vcpu, reg, &ftr_bits[j]);
|
||||
|
||||
test_reg_vals[idx] = test_reg_set_success(vcpu, reg,
|
||||
&ftr_bits[j]);
|
||||
|
||||
ksft_test_result_pass("%s\n", ftr_bits[j].name);
|
||||
}
|
||||
@ -425,7 +435,6 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
bool done = false;
|
||||
struct ucall uc;
|
||||
uint64_t val;
|
||||
|
||||
while (!done) {
|
||||
vcpu_run(vcpu);
|
||||
@ -436,8 +445,8 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
case UCALL_SYNC:
|
||||
/* Make sure the written values are seen by guest */
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(uc.args[2]), &val);
|
||||
TEST_ASSERT_EQ(val, uc.args[3]);
|
||||
TEST_ASSERT_EQ(test_reg_vals[encoding_to_range_idx(uc.args[2])],
|
||||
uc.args[3]);
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
done = true;
|
||||
@ -448,13 +457,85 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
/* Politely lifted from arch/arm64/include/asm/cache.h */
|
||||
/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
|
||||
#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1))
|
||||
#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level))
|
||||
#define CLIDR_CTYPE(clidr, level) \
|
||||
(((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
|
||||
|
||||
static void test_clidr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint64_t clidr;
|
||||
int level;
|
||||
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), &clidr);
|
||||
|
||||
/* find the first empty level in the cache hierarchy */
|
||||
for (level = 1; level < 7; level++) {
|
||||
if (!CLIDR_CTYPE(clidr, level))
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If you have a mind-boggling 7 levels of cache, congratulations, you
|
||||
* get to fix this.
|
||||
*/
|
||||
TEST_ASSERT(level <= 7, "can't find an empty level in cache hierarchy");
|
||||
|
||||
/* stick in a unified cache level */
|
||||
clidr |= BIT(2) << CLIDR_CTYPE_SHIFT(level);
|
||||
|
||||
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), clidr);
|
||||
test_reg_vals[encoding_to_range_idx(SYS_CLIDR_EL1)] = clidr;
|
||||
}
|
||||
|
||||
static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
test_clidr(vcpu);
|
||||
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &val);
|
||||
val++;
|
||||
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), val);
|
||||
|
||||
test_reg_vals[encoding_to_range_idx(SYS_MPIDR_EL1)] = val;
|
||||
ksft_test_result_pass("%s\n", __func__);
|
||||
}
|
||||
|
||||
static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encoding)
|
||||
{
|
||||
size_t idx = encoding_to_range_idx(encoding);
|
||||
uint64_t observed;
|
||||
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding), &observed);
|
||||
TEST_ASSERT_EQ(test_reg_vals[idx], observed);
|
||||
}
|
||||
|
||||
static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Calls KVM_ARM_VCPU_INIT behind the scenes, which will do an
|
||||
* architectural reset of the vCPU.
|
||||
*/
|
||||
aarch64_vcpu_setup(vcpu, NULL);
|
||||
|
||||
for (int i = 0; i < ARRAY_SIZE(test_regs); i++)
|
||||
test_assert_id_reg_unchanged(vcpu, test_regs[i].reg);
|
||||
|
||||
test_assert_id_reg_unchanged(vcpu, SYS_CLIDR_EL1);
|
||||
|
||||
ksft_test_result_pass("%s\n", __func__);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
bool aarch64_only;
|
||||
uint64_t val, el0;
|
||||
int ftr_cnt;
|
||||
int test_cnt;
|
||||
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
|
||||
|
||||
@ -467,18 +548,22 @@ int main(void)
|
||||
|
||||
ksft_print_header();
|
||||
|
||||
ftr_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) +
|
||||
ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) +
|
||||
ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
|
||||
ARRAY_SIZE(ftr_id_aa64mmfr0_el1) + ARRAY_SIZE(ftr_id_aa64mmfr1_el1) +
|
||||
ARRAY_SIZE(ftr_id_aa64mmfr2_el1) + ARRAY_SIZE(ftr_id_aa64zfr0_el1) -
|
||||
ARRAY_SIZE(test_regs);
|
||||
test_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) +
|
||||
ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) +
|
||||
ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
|
||||
ARRAY_SIZE(ftr_id_aa64mmfr0_el1) + ARRAY_SIZE(ftr_id_aa64mmfr1_el1) +
|
||||
ARRAY_SIZE(ftr_id_aa64mmfr2_el1) + ARRAY_SIZE(ftr_id_aa64zfr0_el1) -
|
||||
ARRAY_SIZE(test_regs) + 2;
|
||||
|
||||
ksft_set_plan(ftr_cnt);
|
||||
ksft_set_plan(test_cnt);
|
||||
|
||||
test_vm_ftr_id_regs(vcpu, aarch64_only);
|
||||
test_vcpu_ftr_id_regs(vcpu);
|
||||
|
||||
test_user_set_reg(vcpu, aarch64_only);
|
||||
test_guest_reg_read(vcpu);
|
||||
|
||||
test_reset_preserves_id_regs(vcpu);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
|
||||
ksft_finished();
|
||||
|
Loading…
x
Reference in New Issue
Block a user