KVM: arm64: Remove host_cpu_context member from vcpu structure

For very long, we have kept this pointer back to the per-cpu
host state, despite having working per-cpu accessors at EL2
for some time now.

Recent investigations have shown that this pointer is easy
to abuse in preemptible context, which is a sure sign that
it would better be gone. Not to mention that a per-cpu
pointer is faster to access at all times.

Reported-by: Andrew Scull <ascull@google.com>
Acked-by: Mark Rutland <mark.rutland@arm.com
Reviewed-by: Andrew Scull <ascull@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2020-06-05 14:08:13 +01:00
parent b990d37fdf
commit 07da1ffaa1
6 changed files with 11 additions and 19 deletions

View File

@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state; struct kvm_guest_debug_arch external_debug_state;
/* Pointer to host CPU context */
struct kvm_cpu_context *host_cpu_context;
struct thread_info *host_thread_info; /* hyp VA */ struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */

View File

@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
int *last_ran; int *last_ran;
kvm_host_data_t *cpu_data;
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
cpu_data = this_cpu_ptr(&kvm_host_data);
/* /*
* We might get preempted before the vCPU actually runs, but * We might get preempted before the vCPU actually runs, but
@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
} }
vcpu->cpu = cpu; vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
kvm_vgic_load(vcpu); kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu); kvm_timer_vcpu_load(vcpu);

View File

@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return; return;
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt; guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs; host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return; return;
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt; guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs; host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);

View File

@ -532,7 +532,7 @@ static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
!esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu))) !esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
return false; return false;
ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__ptrauth_save_key(ctxt->sys_regs, APIA); __ptrauth_save_key(ctxt->sys_regs, APIA);
__ptrauth_save_key(ctxt->sys_regs, APIB); __ptrauth_save_key(ctxt->sys_regs, APIB);
__ptrauth_save_key(ctxt->sys_regs, APDA); __ptrauth_save_key(ctxt->sys_regs, APDA);
@ -703,7 +703,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt; struct kvm_cpu_context *guest_ctxt;
u64 exit_code; u64 exit_code;
host_ctxt = vcpu->arch.host_cpu_context; host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu; host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt; guest_ctxt = &vcpu->arch.ctxt;
@ -808,7 +808,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
vcpu = kern_hyp_va(vcpu); vcpu = kern_hyp_va(vcpu);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu; host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt; guest_ctxt = &vcpu->arch.ctxt;

View File

@ -265,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
*/ */
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;
if (!has_vhe()) if (!has_vhe())
return; return;
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__sysreg_save_user_state(host_ctxt); __sysreg_save_user_state(host_ctxt);
/* /*
@ -301,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
*/ */
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;
if (!has_vhe()) if (!has_vhe())
return; return;
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
deactivate_traps_vhe_put(); deactivate_traps_vhe_put();
__sysreg_save_el1_state(guest_ctxt); __sysreg_save_el1_state(guest_ctxt);

View File

@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
*/ */
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host; struct kvm_host_data *host;
u32 events_guest, events_host; u32 events_guest, events_host;
if (!has_vhe()) if (!has_vhe())
return; return;
host_ctxt = vcpu->arch.host_cpu_context; host = this_cpu_ptr(&kvm_host_data);
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
events_guest = host->pmu_events.events_guest; events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host; events_host = host->pmu_events.events_host;
@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
*/ */
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host; struct kvm_host_data *host;
u32 events_guest, events_host; u32 events_guest, events_host;
if (!has_vhe()) if (!has_vhe())
return; return;
host_ctxt = vcpu->arch.host_cpu_context; host = this_cpu_ptr(&kvm_host_data);
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
events_guest = host->pmu_events.events_guest; events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host; events_host = host->pmu_events.events_host;