arm64: KVM: Get rid of old vcpu_reg()
Using oldstyle vcpu_reg() accessor is proven to be inappropriate and unsafe on ARM64. This patch converts the rest of use cases to new accessors and completely removes vcpu_reg() on ARM64. Signed-off-by: Pavel Fedin <p.fedin@samsung.com> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
parent
2ec5be3dbf
commit
f6be563abb
@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
|||||||
unsigned long context_id;
|
unsigned long context_id;
|
||||||
phys_addr_t target_pc;
|
phys_addr_t target_pc;
|
||||||
|
|
||||||
cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
|
cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
|
||||||
if (vcpu_mode_is_32bit(source_vcpu))
|
if (vcpu_mode_is_32bit(source_vcpu))
|
||||||
cpu_id &= ~((u32) 0);
|
cpu_id &= ~((u32) 0);
|
||||||
|
|
||||||
@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
|||||||
return PSCI_RET_INVALID_PARAMS;
|
return PSCI_RET_INVALID_PARAMS;
|
||||||
}
|
}
|
||||||
|
|
||||||
target_pc = *vcpu_reg(source_vcpu, 2);
|
target_pc = vcpu_get_reg(source_vcpu, 2);
|
||||||
context_id = *vcpu_reg(source_vcpu, 3);
|
context_id = vcpu_get_reg(source_vcpu, 3);
|
||||||
|
|
||||||
kvm_reset_vcpu(vcpu);
|
kvm_reset_vcpu(vcpu);
|
||||||
|
|
||||||
@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
|||||||
* NOTE: We always update r0 (or x0) because for PSCI v0.1
|
* NOTE: We always update r0 (or x0) because for PSCI v0.1
|
||||||
* the general puspose registers are undefined upon CPU_ON.
|
* the general puspose registers are undefined upon CPU_ON.
|
||||||
*/
|
*/
|
||||||
*vcpu_reg(vcpu, 0) = context_id;
|
vcpu_set_reg(vcpu, 0, context_id);
|
||||||
vcpu->arch.power_off = false;
|
vcpu->arch.power_off = false;
|
||||||
smp_mb(); /* Make sure the above is visible */
|
smp_mb(); /* Make sure the above is visible */
|
||||||
|
|
||||||
@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
|
|||||||
struct kvm *kvm = vcpu->kvm;
|
struct kvm *kvm = vcpu->kvm;
|
||||||
struct kvm_vcpu *tmp;
|
struct kvm_vcpu *tmp;
|
||||||
|
|
||||||
target_affinity = *vcpu_reg(vcpu, 1);
|
target_affinity = vcpu_get_reg(vcpu, 1);
|
||||||
lowest_affinity_level = *vcpu_reg(vcpu, 2);
|
lowest_affinity_level = vcpu_get_reg(vcpu, 2);
|
||||||
|
|
||||||
/* Determine target affinity mask */
|
/* Determine target affinity mask */
|
||||||
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
|
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
|
||||||
@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
|
|||||||
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
|
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
|
|
||||||
switch (psci_fn) {
|
switch (psci_fn) {
|
||||||
@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
*vcpu_reg(vcpu, 0) = val;
|
vcpu_set_reg(vcpu, 0, val);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
|
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
|
|
||||||
switch (psci_fn) {
|
switch (psci_fn) {
|
||||||
@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
*vcpu_reg(vcpu, 0) = val;
|
vcpu_set_reg(vcpu, 0, val);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,15 +100,10 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vcpu_reg should always be passed a register number coming from a
|
* vcpu_get_reg and vcpu_set_reg should always be passed a register number
|
||||||
* read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
|
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
|
||||||
* with banked registers.
|
* AArch32 with banked registers.
|
||||||
*/
|
*/
|
||||||
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
|
|
||||||
{
|
|
||||||
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
|
static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
|
||||||
u8 reg_num)
|
u8 reg_num)
|
||||||
{
|
{
|
||||||
|
@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
|
trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
|
||||||
kvm_vcpu_hvc_get_imm(vcpu));
|
kvm_vcpu_hvc_get_imm(vcpu));
|
||||||
|
|
||||||
ret = kvm_psci_call(vcpu);
|
ret = kvm_psci_call(vcpu);
|
||||||
|
Loading…
Reference in New Issue
Block a user