KVM: nSVM: Swap the parameter order for svm_copy_vmrun_state()/svm_copy_vmloadsave_state()
Make svm_copy_vmrun_state()/svm_copy_vmloadsave_state() interface match 'memcpy(dest, src)' to avoid any confusion. No functional change intended. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Message-Id: <20210719090322.625277-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
9a9e74819b
commit
2bb16bea5f
@ -702,8 +702,8 @@ out:
|
||||
}
|
||||
|
||||
/* Copy state save area fields which are handled by VMRUN */
|
||||
void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
|
||||
struct vmcb_save_area *to_save)
|
||||
void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
|
||||
struct vmcb_save_area *from_save)
|
||||
{
|
||||
to_save->es = from_save->es;
|
||||
to_save->cs = from_save->cs;
|
||||
@ -722,7 +722,7 @@ void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
|
||||
to_save->cpl = 0;
|
||||
}
|
||||
|
||||
void svm_copy_vmloadsave_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
|
||||
void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
|
||||
{
|
||||
to_vmcb->save.fs = from_vmcb->save.fs;
|
||||
to_vmcb->save.gs = from_vmcb->save.gs;
|
||||
@ -1385,7 +1385,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
|
||||
|
||||
svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
|
||||
|
||||
svm_copy_vmrun_state(save, &svm->vmcb01.ptr->save);
|
||||
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
|
||||
nested_load_control_from_vmcb12(svm, ctl);
|
||||
|
||||
svm_switch_vmcb(svm, &svm->nested.vmcb02);
|
||||
|
@ -2147,11 +2147,11 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
|
||||
ret = kvm_skip_emulated_instruction(vcpu);
|
||||
|
||||
if (vmload) {
|
||||
svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
|
||||
svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
|
||||
svm->sysenter_eip_hi = 0;
|
||||
svm->sysenter_esp_hi = 0;
|
||||
} else {
|
||||
svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
|
||||
svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
|
||||
}
|
||||
|
||||
kvm_vcpu_unmap(vcpu, &map, true);
|
||||
@ -4345,8 +4345,8 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
|
||||
|
||||
svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
|
||||
map_save.hva + 0x400);
|
||||
svm_copy_vmrun_state(map_save.hva + 0x400,
|
||||
&svm->vmcb01.ptr->save);
|
||||
|
||||
kvm_vcpu_unmap(vcpu, &map_save, true);
|
||||
}
|
||||
@ -4394,8 +4394,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
||||
&map_save) == -EINVAL)
|
||||
return 1;
|
||||
|
||||
svm_copy_vmrun_state(map_save.hva + 0x400,
|
||||
&svm->vmcb01.ptr->save);
|
||||
svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
|
||||
map_save.hva + 0x400);
|
||||
|
||||
kvm_vcpu_unmap(vcpu, &map_save, true);
|
||||
}
|
||||
|
@ -464,9 +464,9 @@ void svm_leave_nested(struct vcpu_svm *svm);
|
||||
void svm_free_nested(struct vcpu_svm *svm);
|
||||
int svm_allocate_nested(struct vcpu_svm *svm);
|
||||
int nested_svm_vmrun(struct kvm_vcpu *vcpu);
|
||||
void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
|
||||
struct vmcb_save_area *to_save);
|
||||
void svm_copy_vmloadsave_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
|
||||
void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
|
||||
struct vmcb_save_area *from_save);
|
||||
void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
|
||||
int nested_svm_vmexit(struct vcpu_svm *svm);
|
||||
|
||||
static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
|
||||
|
Loading…
x
Reference in New Issue
Block a user