diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 606f5cc579b2..27a9f08e8386 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -314,6 +314,9 @@ struct hv_tsc_emulation_status { #define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001 #define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12 +/* Number of XMM registers used in hypercall input/output */ +#define HV_HYPERCALL_MAX_XMM_REGISTERS 6 + struct hv_nested_enlightenments_control { struct { __u32 directhypercall:1; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 7c7a2da591da..449589e283d6 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -36,6 +36,7 @@ #include "trace.h" #include "irq.h" +#include "fpu.h" /* "Hv#1" signature */ #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648 @@ -1640,10 +1641,13 @@ struct kvm_hv_hcall { u16 rep_idx; bool fast; bool rep; + sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS]; }; static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex) { + int i; + gpa_t gpa; struct kvm *kvm = vcpu->kvm; struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct hv_tlb_flush_ex flush_ex; @@ -1657,8 +1661,15 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool bool all_cpus; if (!ex) { - if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush, sizeof(flush)))) - return HV_STATUS_INVALID_HYPERCALL_INPUT; + if (hc->fast) { + flush.address_space = hc->ingpa; + flush.flags = hc->outgpa; + flush.processor_mask = sse128_lo(hc->xmm[0]); + } else { + if (unlikely(kvm_read_guest(kvm, hc->ingpa, + &flush, sizeof(flush)))) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } trace_kvm_hv_flush_tlb(flush.processor_mask, flush.address_space, flush.flags); @@ -1676,9 +1687,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) || flush.processor_mask == 0; } else { - if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex, - sizeof(flush_ex)))) - return HV_STATUS_INVALID_HYPERCALL_INPUT; + if (hc->fast) { + flush_ex.address_space = hc->ingpa; + flush_ex.flags = hc->outgpa; + memcpy(&flush_ex.hv_vp_set, + &hc->xmm[0], sizeof(hc->xmm[0])); + } else { + if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex, + sizeof(flush_ex)))) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask, flush_ex.hv_vp_set.format, @@ -1689,20 +1707,28 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool all_cpus = flush_ex.hv_vp_set.format != HV_GENERIC_SET_SPARSE_4K; - sparse_banks_len = - bitmap_weight((unsigned long *)&valid_bank_mask, 64) * - sizeof(sparse_banks[0]); + sparse_banks_len = bitmap_weight((unsigned long *)&valid_bank_mask, 64); if (!sparse_banks_len && !all_cpus) goto ret_success; - if (!all_cpus && - kvm_read_guest(kvm, - hc->ingpa + offsetof(struct hv_tlb_flush_ex, - hv_vp_set.bank_contents), - sparse_banks, - sparse_banks_len)) - return HV_STATUS_INVALID_HYPERCALL_INPUT; + if (!all_cpus) { + if (hc->fast) { + if (sparse_banks_len > HV_HYPERCALL_MAX_XMM_REGISTERS - 1) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + for (i = 0; i < sparse_banks_len; i += 2) { + sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]); + sparse_banks[i + 1] = sse128_hi(hc->xmm[i / 2 + 1]); + } + } else { + gpa = hc->ingpa + offsetof(struct hv_tlb_flush_ex, + hv_vp_set.bank_contents); + if (unlikely(kvm_read_guest(kvm, gpa, sparse_banks, + sparse_banks_len * + sizeof(sparse_banks[0])))) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } + } } cpumask_clear(&hv_vcpu->tlb_flush); @@ -1898,6 +1924,29 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *h return HV_STATUS_SUCCESS; } +static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc) +{ + switch (hc->code) { + case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST: + case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: + case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: + case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: + return true; + } + + return false; +} + +static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc) +{ + int reg; + + kvm_fpu_get(); + for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++) + _kvm_read_sse_reg(reg, &hc->xmm[reg]); + kvm_fpu_put(); +} + int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { struct kvm_hv_hcall hc; @@ -1934,6 +1983,9 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff; hc.rep = !!(hc.rep_cnt || hc.rep_idx); + if (hc.fast && is_xmm_fast_hypercall(&hc)) + kvm_hv_hypercall_read_xmm(&hc); + trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx, hc.ingpa, hc.outgpa); @@ -1969,28 +2021,28 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) kvm_hv_hypercall_complete_userspace; return 0; case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST: - if (unlikely(hc.fast || !hc.rep_cnt || hc.rep_idx)) { + if (unlikely(!hc.rep_cnt || hc.rep_idx)) { ret = HV_STATUS_INVALID_HYPERCALL_INPUT; break; } ret = kvm_hv_flush_tlb(vcpu, &hc, false); break; case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: - if (unlikely(hc.fast || hc.rep)) { + if (unlikely(hc.rep)) { ret = HV_STATUS_INVALID_HYPERCALL_INPUT; break; } ret = kvm_hv_flush_tlb(vcpu, &hc, false); break; case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: - if (unlikely(hc.fast || !hc.rep_cnt || hc.rep_idx)) { + if (unlikely(!hc.rep_cnt || hc.rep_idx)) { ret = HV_STATUS_INVALID_HYPERCALL_INPUT; break; } ret = kvm_hv_flush_tlb(vcpu, &hc, true); break; case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: - if (unlikely(hc.fast || hc.rep)) { + if (unlikely(hc.rep)) { ret = HV_STATUS_INVALID_HYPERCALL_INPUT; break; }