KVM: x86: Rename GPR accessors to make mode-aware variants the defaults

Append raw to the direct variants of kvm_register_read/write(), and
drop the "l" from the mode-aware variants.  I.e. make the mode-aware
variants the default, and make the direct variants scary sounding so as
to discourage use.  Accessing the full 64-bit values irrespective of
mode is rarely the desired behavior.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210422022128.3464144-10-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2021-04-21 19:21:28 -07:00 committed by Paolo Bonzini
parent bc9eff67fc
commit 27b4a9c454
7 changed files with 41 additions and 36 deletions

View File

@ -62,7 +62,12 @@ static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
} }
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) /*
* The "raw" register helpers are only for cases where the full 64 bits of a
* register are read/written irrespective of current vCPU mode. In other words,
* odds are good you shouldn't be using the raw variants.
*/
static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
{ {
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
return 0; return 0;
@ -73,8 +78,8 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
return vcpu->arch.regs[reg]; return vcpu->arch.regs[reg];
} }
static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg, static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
unsigned long val) unsigned long val)
{ {
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
return; return;
@ -85,22 +90,22 @@ static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg,
static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
{ {
return kvm_register_read(vcpu, VCPU_REGS_RIP); return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
} }
static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
{ {
kvm_register_write(vcpu, VCPU_REGS_RIP, val); kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
} }
static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu) static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
{ {
return kvm_register_read(vcpu, VCPU_REGS_RSP); return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
} }
static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val) static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
{ {
kvm_register_write(vcpu, VCPU_REGS_RSP, val); kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
} }
static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)

View File

@ -2457,7 +2457,7 @@ static int cr_interception(struct kvm_vcpu *vcpu)
err = 0; err = 0;
if (cr >= 16) { /* mov to cr */ if (cr >= 16) { /* mov to cr */
cr -= 16; cr -= 16;
val = kvm_register_readl(vcpu, reg); val = kvm_register_read(vcpu, reg);
trace_kvm_cr_write(cr, val); trace_kvm_cr_write(cr, val);
switch (cr) { switch (cr) {
case 0: case 0:
@ -2503,7 +2503,7 @@ static int cr_interception(struct kvm_vcpu *vcpu)
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;
} }
kvm_register_writel(vcpu, reg, val); kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val); trace_kvm_cr_read(cr, val);
} }
return kvm_complete_insn_gp(vcpu, err); return kvm_complete_insn_gp(vcpu, err);
@ -2569,11 +2569,11 @@ static int dr_interception(struct kvm_vcpu *vcpu)
dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
if (dr >= 16) { /* mov to DRn */ if (dr >= 16) { /* mov to DRn */
dr -= 16; dr -= 16;
val = kvm_register_readl(vcpu, reg); val = kvm_register_read(vcpu, reg);
err = kvm_set_dr(vcpu, dr, val); err = kvm_set_dr(vcpu, dr, val);
} else { } else {
kvm_get_dr(vcpu, dr, &val); kvm_get_dr(vcpu, dr, &val);
kvm_register_writel(vcpu, reg, val); kvm_register_write(vcpu, reg, val);
} }
return kvm_complete_insn_gp(vcpu, err); return kvm_complete_insn_gp(vcpu, err);

View File

@ -4619,9 +4619,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
else if (addr_size == 0) else if (addr_size == 0)
off = (gva_t)sign_extend64(off, 15); off = (gva_t)sign_extend64(off, 15);
if (base_is_valid) if (base_is_valid)
off += kvm_register_readl(vcpu, base_reg); off += kvm_register_read(vcpu, base_reg);
if (index_is_valid) if (index_is_valid)
off += kvm_register_readl(vcpu, index_reg) << scaling; off += kvm_register_read(vcpu, index_reg) << scaling;
vmx_get_segment(vcpu, &s, seg_reg); vmx_get_segment(vcpu, &s, seg_reg);
/* /*
@ -5023,7 +5023,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
return nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
/* Decode instruction info and find the field to read */ /* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
offset = vmcs_field_to_offset(field); offset = vmcs_field_to_offset(field);
if (offset < 0) if (offset < 0)
@ -5041,7 +5041,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
* on the guest's mode (32 or 64 bit), not on the given field's length. * on the guest's mode (32 or 64 bit), not on the given field's length.
*/ */
if (instr_info & BIT(10)) { if (instr_info & BIT(10)) {
kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value); kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value);
} else { } else {
len = is_64_bit_mode(vcpu) ? 8 : 4; len = is_64_bit_mode(vcpu) ? 8 : 4;
if (get_vmx_mem_address(vcpu, exit_qualification, if (get_vmx_mem_address(vcpu, exit_qualification,
@ -5115,7 +5115,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
if (instr_info & BIT(10)) if (instr_info & BIT(10))
value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf)); value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf));
else { else {
len = is_64_bit_mode(vcpu) ? 8 : 4; len = is_64_bit_mode(vcpu) ? 8 : 4;
if (get_vmx_mem_address(vcpu, exit_qualification, if (get_vmx_mem_address(vcpu, exit_qualification,
@ -5126,7 +5126,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return kvm_handle_memory_failure(vcpu, r, &e); return kvm_handle_memory_failure(vcpu, r, &e);
} }
field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
offset = vmcs_field_to_offset(field); offset = vmcs_field_to_offset(field);
if (offset < 0) if (offset < 0)
@ -5323,7 +5323,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
return 1; return 1;
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
@ -5403,7 +5403,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return 1; return 1;
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
types = (vmx->nested.msrs.vpid_caps & types = (vmx->nested.msrs.vpid_caps &
VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
@ -5659,7 +5659,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
switch ((exit_qualification >> 4) & 3) { switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */ case 0: /* mov to cr */
reg = (exit_qualification >> 8) & 15; reg = (exit_qualification >> 8) & 15;
val = kvm_register_readl(vcpu, reg); val = kvm_register_read(vcpu, reg);
switch (cr) { switch (cr) {
case 0: case 0:
if (vmcs12->cr0_guest_host_mask & if (vmcs12->cr0_guest_host_mask &
@ -5745,7 +5745,7 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
/* Decode instruction info and find the field to access */ /* Decode instruction info and find the field to access */
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Out-of-range fields always cause a VM exit from L2 to L1 */ /* Out-of-range fields always cause a VM exit from L2 to L1 */
if (field >> 15) if (field >> 15)

View File

@ -5079,7 +5079,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
reg = (exit_qualification >> 8) & 15; reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) { switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */ case 0: /* mov to cr */
val = kvm_register_readl(vcpu, reg); val = kvm_register_read(vcpu, reg);
trace_kvm_cr_write(cr, val); trace_kvm_cr_write(cr, val);
switch (cr) { switch (cr) {
case 0: case 0:
@ -5121,12 +5121,12 @@ static int handle_cr(struct kvm_vcpu *vcpu)
case 3: case 3:
WARN_ON_ONCE(enable_unrestricted_guest); WARN_ON_ONCE(enable_unrestricted_guest);
val = kvm_read_cr3(vcpu); val = kvm_read_cr3(vcpu);
kvm_register_writel(vcpu, reg, val); kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val); trace_kvm_cr_read(cr, val);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
case 8: case 8:
val = kvm_get_cr8(vcpu); val = kvm_get_cr8(vcpu);
kvm_register_writel(vcpu, reg, val); kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val); trace_kvm_cr_read(cr, val);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
@ -5199,10 +5199,10 @@ static int handle_dr(struct kvm_vcpu *vcpu)
unsigned long val; unsigned long val;
kvm_get_dr(vcpu, dr, &val); kvm_get_dr(vcpu, dr, &val);
kvm_register_writel(vcpu, reg, val); kvm_register_write(vcpu, reg, val);
err = 0; err = 0;
} else { } else {
err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)); err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
} }
out: out:
@ -5554,7 +5554,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
} }
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
if (type > 3) { if (type > 3) {
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);

View File

@ -7016,12 +7016,12 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
{ {
return kvm_register_read(emul_to_vcpu(ctxt), reg); return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
} }
static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
{ {
kvm_register_write(emul_to_vcpu(ctxt), reg, val); kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val);
} }
static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
@ -8701,7 +8701,7 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i)); put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i));
kvm_get_dr(vcpu, 6, &val); kvm_get_dr(vcpu, 6, &val);
put_smstate(u32, buf, 0x7fcc, (u32)val); put_smstate(u32, buf, 0x7fcc, (u32)val);
@ -8747,7 +8747,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
int i; int i;
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i)); put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i));
put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));

View File

@ -232,19 +232,19 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
return false; return false;
} }
static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg) static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
{ {
unsigned long val = kvm_register_read(vcpu, reg); unsigned long val = kvm_register_read_raw(vcpu, reg);
return is_64_bit_mode(vcpu) ? val : (u32)val; return is_64_bit_mode(vcpu) ? val : (u32)val;
} }
static inline void kvm_register_writel(struct kvm_vcpu *vcpu, static inline void kvm_register_write(struct kvm_vcpu *vcpu,
int reg, unsigned long val) int reg, unsigned long val)
{ {
if (!is_64_bit_mode(vcpu)) if (!is_64_bit_mode(vcpu))
val = (u32)val; val = (u32)val;
return kvm_register_write(vcpu, reg, val); return kvm_register_write_raw(vcpu, reg, val);
} }
static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)

View File

@ -673,7 +673,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
bool longmode; bool longmode;
u64 input, params[6]; u64 input, params[6];
input = (u64)kvm_register_readl(vcpu, VCPU_REGS_RAX); input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
/* Hyper-V hypercalls get bit 31 set in EAX */ /* Hyper-V hypercalls get bit 31 set in EAX */
if ((input & 0x80000000) && if ((input & 0x80000000) &&