KVM Xen and pfncache changes for 6.9:
- Rip out the half-baked support for using gfn_to_pfn caches to manage pages that are "mapped" into guests via physical addresses. - Add support for using gfn_to_pfn caches with only a host virtual address, i.e. to bypass the "gfn" stage of the cache. The primary use case is overlay pages, where the guest may change the gfn used to reference the overlay page, but the backing hva+pfn remains the same. - Add an ioctl() to allow mapping Xen's shared_info page using an hva instead of a gpa, so that userspace doesn't need to reconfigure and invalidate the cache/mapping if the guest changes the gpa (but userspace keeps the resolved hva the same). - When possible, use a single host TSC value when computing the deadline for Xen timers in order to improve the accuracy of the timer emulation. - Inject pending upcall events when the vCPU software-enables its APIC to fix a bug where an upcall can be lost (and to follow Xen's behavior). - Fall back to the slow path instead of warning if "fast" IRQ delivery of Xen events fails, e.g. if the guest has aliased xAPIC IDs. - Extend gfn_to_pfn_cache's mutex to cover (de)activation (in addition to refresh), and drop a now-redundant acquisition of xen_lock (that was protecting the shared_info cache) to fix a deadlock due to recursively acquiring xen_lock. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEKTobbabEP7vbhhN9OlYIJqCjN/0FAmXrblYACgkQOlYIJqCj N/3K4Q/+KZ8lrnNXvdHNCQdosA5DDXpqUcRzhlTUp82fncpdJ0LqrSMzMots2Eh9 KC0jSPo8EkivF+Epug0+bpQBEaLXzTWhRcS1grePCDz2lBnxoHFSWjvaK2p14KlC LvxCJZjxyfLKHwKHpSndvO9hVFElCY3mvvE9KRcKeQAmrz1cz+DDMKelo1MuV8D+ GfymhYc+UXpY41+6hQdznx+WoGoXKRameo3iGYuBoJjvKOyl4Wxkx9WSXIxxxuqG kHxjiWTR/jF1ITJl6PeMrFcGl3cuGKM/UfTOM6W2h6Wi3mhLpXveoVLnqR1kipIj btSzSVHL7C4WTPwOcyhwPzap+dJmm31c6N0uPScT7r9yhs+q5BDj26vcVcyPZUHo efIwmsnO2eQvuw+f8C6QqWCPaxvw46N0zxzwgc5uA3jvAC93y0l4v+xlAQsC0wzV 0+BwU00cutH/3t3c/WPD5QcmRLH726VoFuTlaDufpoMU7gBVJ8rzjcusxR+5BKT+ GJcAgZxZhEgvnzmTKd4Ec/mt+xZ2Erd+kV3MKCHvDPyj8jqy8FQ4DAWKGBR+h3WR rqAs2k8NPHyh3i1a3FL1opmxEGsRS+Cnc6Bi77cj9DxTr22JkgDJEuFR+Ues1z6/ SpE889kt3w5zTo34+lNxNPlIKmO0ICwwhDL6pxJTWU7iWQnKypU= =GliW -----END PGP SIGNATURE----- Merge tag 'kvm-x86-xen-6.9' of https://github.com/kvm-x86/linux into HEAD KVM Xen and pfncache changes for 6.9: - Rip out the half-baked support for using gfn_to_pfn caches to manage pages that are "mapped" into guests via physical addresses. - Add support for using gfn_to_pfn caches with only a host virtual address, i.e. to bypass the "gfn" stage of the cache. The primary use case is overlay pages, where the guest may change the gfn used to reference the overlay page, but the backing hva+pfn remains the same. - Add an ioctl() to allow mapping Xen's shared_info page using an hva instead of a gpa, so that userspace doesn't need to reconfigure and invalidate the cache/mapping if the guest changes the gpa (but userspace keeps the resolved hva the same). - When possible, use a single host TSC value when computing the deadline for Xen timers in order to improve the accuracy of the timer emulation. - Inject pending upcall events when the vCPU software-enables its APIC to fix a bug where an upcall can be lost (and to follow Xen's behavior). - Fall back to the slow path instead of warning if "fast" IRQ delivery of Xen events fails, e.g. if the guest has aliased xAPIC IDs. - Extend gfn_to_pfn_cache's mutex to cover (de)activation (in addition to refresh), and drop a now-redundant acquisition of xen_lock (that was protecting the shared_info cache) to fix a deadlock due to recursively acquiring xen_lock.
This commit is contained in:
commit
e9a2bba476
@ -372,7 +372,7 @@ The bits in the dirty bitmap are cleared before the ioctl returns, unless
|
||||
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information,
|
||||
see the description of the capability.
|
||||
|
||||
Note that the Xen shared info page, if configured, shall always be assumed
|
||||
Note that the Xen shared_info page, if configured, shall always be assumed
|
||||
to be dirty. KVM will not explicitly mark it such.
|
||||
|
||||
|
||||
@ -5487,8 +5487,9 @@ KVM_PV_ASYNC_CLEANUP_PERFORM
|
||||
__u8 long_mode;
|
||||
__u8 vector;
|
||||
__u8 runstate_update_flag;
|
||||
struct {
|
||||
union {
|
||||
__u64 gfn;
|
||||
__u64 hva;
|
||||
} shared_info;
|
||||
struct {
|
||||
__u32 send_port;
|
||||
@ -5516,19 +5517,20 @@ type values:
|
||||
|
||||
KVM_XEN_ATTR_TYPE_LONG_MODE
|
||||
Sets the ABI mode of the VM to 32-bit or 64-bit (long mode). This
|
||||
determines the layout of the shared info pages exposed to the VM.
|
||||
determines the layout of the shared_info page exposed to the VM.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_SHARED_INFO
|
||||
Sets the guest physical frame number at which the Xen "shared info"
|
||||
Sets the guest physical frame number at which the Xen shared_info
|
||||
page resides. Note that although Xen places vcpu_info for the first
|
||||
32 vCPUs in the shared_info page, KVM does not automatically do so
|
||||
and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO be used
|
||||
explicitly even when the vcpu_info for a given vCPU resides at the
|
||||
"default" location in the shared_info page. This is because KVM may
|
||||
not be aware of the Xen CPU id which is used as the index into the
|
||||
vcpu_info[] array, so may know the correct default location.
|
||||
and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO or
|
||||
KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA be used explicitly even when
|
||||
the vcpu_info for a given vCPU resides at the "default" location
|
||||
in the shared_info page. This is because KVM may not be aware of
|
||||
the Xen CPU id which is used as the index into the vcpu_info[]
|
||||
array, so may know the correct default location.
|
||||
|
||||
Note that the shared info page may be constantly written to by KVM;
|
||||
Note that the shared_info page may be constantly written to by KVM;
|
||||
it contains the event channel bitmap used to deliver interrupts to
|
||||
a Xen guest, amongst other things. It is exempt from dirty tracking
|
||||
mechanisms — KVM will not explicitly mark the page as dirty each
|
||||
@ -5537,9 +5539,21 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
|
||||
any vCPU has been running or any event channel interrupts can be
|
||||
routed to the guest.
|
||||
|
||||
Setting the gfn to KVM_XEN_INVALID_GFN will disable the shared info
|
||||
Setting the gfn to KVM_XEN_INVALID_GFN will disable the shared_info
|
||||
page.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA
|
||||
If the KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA flag is also set in the
|
||||
Xen capabilities, then this attribute may be used to set the
|
||||
userspace address at which the shared_info page resides, which
|
||||
will always be fixed in the VMM regardless of where it is mapped
|
||||
in guest physical address space. This attribute should be used in
|
||||
preference to KVM_XEN_ATTR_TYPE_SHARED_INFO as it avoids
|
||||
unnecessary invalidation of an internal cache when the page is
|
||||
re-mapped in guest physcial address space.
|
||||
|
||||
Setting the hva to zero will disable the shared_info page.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_UPCALL_VECTOR
|
||||
Sets the exception vector used to deliver Xen event channel upcalls.
|
||||
This is the HVM-wide vector injected directly by the hypervisor
|
||||
@ -5636,6 +5650,21 @@ KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
|
||||
on dirty logging. Setting the gpa to KVM_XEN_INVALID_GPA will disable
|
||||
the vcpu_info.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA
|
||||
If the KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA flag is also set in the
|
||||
Xen capabilities, then this attribute may be used to set the
|
||||
userspace address of the vcpu_info for a given vCPU. It should
|
||||
only be used when the vcpu_info resides at the "default" location
|
||||
in the shared_info page. In this case it is safe to assume the
|
||||
userspace address will not change, because the shared_info page is
|
||||
an overlay on guest memory and remains at a fixed host address
|
||||
regardless of where it is mapped in guest physical address space
|
||||
and hence unnecessary invalidation of an internal cache may be
|
||||
avoided if the guest memory layout is modified.
|
||||
If the vcpu_info does not reside at the "default" location then
|
||||
it is not guaranteed to remain at the same host address and
|
||||
hence the aforementioned cache invalidation is required.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
|
||||
Sets the guest physical address of an additional pvclock structure
|
||||
for a given vCPU. This is typically used for guest vsyscall support.
|
||||
|
@ -102,7 +102,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
|
||||
parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, parm.token_addr))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
|
||||
vcpu->arch.pfault_token = parm.token_addr;
|
||||
|
@ -664,7 +664,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
case ASCE_TYPE_REGION1: {
|
||||
union region1_table_entry rfte;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &rfte.val))
|
||||
return -EFAULT;
|
||||
@ -682,7 +682,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
case ASCE_TYPE_REGION2: {
|
||||
union region2_table_entry rste;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &rste.val))
|
||||
return -EFAULT;
|
||||
@ -700,7 +700,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
case ASCE_TYPE_REGION3: {
|
||||
union region3_table_entry rtte;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &rtte.val))
|
||||
return -EFAULT;
|
||||
@ -728,7 +728,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
case ASCE_TYPE_SEGMENT: {
|
||||
union segment_table_entry ste;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &ste.val))
|
||||
return -EFAULT;
|
||||
@ -748,7 +748,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
|
||||
}
|
||||
}
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &pte.val))
|
||||
return -EFAULT;
|
||||
@ -770,7 +770,7 @@ absolute_address:
|
||||
*prot = PROT_TYPE_IEP;
|
||||
return PGM_PROTECTION;
|
||||
}
|
||||
if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, raddr.addr))
|
||||
return PGM_ADDRESSING;
|
||||
*gpa = raddr.addr;
|
||||
return 0;
|
||||
@ -957,7 +957,7 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
|
||||
return rc;
|
||||
} else {
|
||||
gpa = kvm_s390_real_to_abs(vcpu, ga);
|
||||
if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, gpa)) {
|
||||
rc = PGM_ADDRESSING;
|
||||
prot = PROT_NONE;
|
||||
}
|
||||
|
@ -2878,7 +2878,7 @@ static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
|
||||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
if (kvm_is_error_gpa(kvm, mop->gaddr)) {
|
||||
if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
|
||||
r = PGM_ADDRESSING;
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -2940,7 +2940,7 @@ static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *m
|
||||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
if (kvm_is_error_gpa(kvm, mop->gaddr)) {
|
||||
if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
|
||||
r = PGM_ADDRESSING;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
|
||||
* first page, since address is 8k aligned and memory pieces are always
|
||||
* at least 1MB aligned and have at least a size of 1MB.
|
||||
*/
|
||||
if (kvm_is_error_gpa(vcpu->kvm, address))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, address))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
|
||||
kvm_s390_set_prefix(vcpu, address);
|
||||
@ -464,7 +464,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
|
||||
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
||||
addr = kvm_s390_real_to_abs(vcpu, addr);
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, addr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, addr))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
/*
|
||||
* We don't expect errors on modern systems, and do not care
|
||||
|
@ -172,7 +172,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
|
||||
* first page, since address is 8k aligned and memory pieces are always
|
||||
* at least 1MB aligned and have at least a size of 1MB.
|
||||
*/
|
||||
if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, irq.u.prefix.address)) {
|
||||
*reg &= 0xffffffff00000000UL;
|
||||
*reg |= SIGP_STATUS_INVALID_PARAMETER;
|
||||
return SIGP_CC_STATUS_STORED;
|
||||
|
@ -549,6 +549,7 @@ struct kvm_x86_mce {
|
||||
#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
|
||||
#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
|
||||
#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
|
||||
#define KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA (1 << 8)
|
||||
|
||||
struct kvm_xen_hvm_config {
|
||||
__u32 flags;
|
||||
@ -567,9 +568,10 @@ struct kvm_xen_hvm_attr {
|
||||
__u8 long_mode;
|
||||
__u8 vector;
|
||||
__u8 runstate_update_flag;
|
||||
struct {
|
||||
union {
|
||||
__u64 gfn;
|
||||
#define KVM_XEN_INVALID_GFN ((__u64)-1)
|
||||
__u64 hva;
|
||||
} shared_info;
|
||||
struct {
|
||||
__u32 send_port;
|
||||
@ -611,6 +613,8 @@ struct kvm_xen_hvm_attr {
|
||||
#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG */
|
||||
#define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */
|
||||
#define KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA 0x6
|
||||
|
||||
struct kvm_xen_vcpu_attr {
|
||||
__u16 type;
|
||||
@ -618,6 +622,7 @@ struct kvm_xen_vcpu_attr {
|
||||
union {
|
||||
__u64 gpa;
|
||||
#define KVM_XEN_INVALID_GPA ((__u64)-1)
|
||||
__u64 hva;
|
||||
__u64 pad[8];
|
||||
struct {
|
||||
__u64 state;
|
||||
@ -648,6 +653,8 @@ struct kvm_xen_vcpu_attr {
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA 0x9
|
||||
|
||||
/* Secure Encrypted Virtualization command */
|
||||
enum sev_cmd_id {
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include "ioapic.h"
|
||||
#include "trace.h"
|
||||
#include "x86.h"
|
||||
#include "xen.h"
|
||||
#include "cpuid.h"
|
||||
#include "hyperv.h"
|
||||
#include "smm.h"
|
||||
@ -502,8 +503,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
|
||||
}
|
||||
|
||||
/* Check if there are APF page ready requests pending */
|
||||
if (enabled)
|
||||
if (enabled) {
|
||||
kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
|
||||
kvm_xen_sw_enable_lapic(apic->vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
|
||||
|
@ -2854,7 +2854,11 @@ static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
|
||||
return v * clock->mult;
|
||||
}
|
||||
|
||||
static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp)
|
||||
/*
|
||||
* As with get_kvmclock_base_ns(), this counts from boot time, at the
|
||||
* frequency of CLOCK_MONOTONIC_RAW (hence adding gtos->offs_boot).
|
||||
*/
|
||||
static int do_kvmclock_base(s64 *t, u64 *tsc_timestamp)
|
||||
{
|
||||
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
||||
unsigned long seq;
|
||||
@ -2873,6 +2877,29 @@ static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp)
|
||||
return mode;
|
||||
}
|
||||
|
||||
/*
|
||||
* This calculates CLOCK_MONOTONIC at the time of the TSC snapshot, with
|
||||
* no boot time offset.
|
||||
*/
|
||||
static int do_monotonic(s64 *t, u64 *tsc_timestamp)
|
||||
{
|
||||
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
||||
unsigned long seq;
|
||||
int mode;
|
||||
u64 ns;
|
||||
|
||||
do {
|
||||
seq = read_seqcount_begin(>od->seq);
|
||||
ns = gtod->clock.base_cycles;
|
||||
ns += vgettsc(>od->clock, tsc_timestamp, &mode);
|
||||
ns >>= gtod->clock.shift;
|
||||
ns += ktime_to_ns(gtod->clock.offset);
|
||||
} while (unlikely(read_seqcount_retry(>od->seq, seq)));
|
||||
*t = ns;
|
||||
|
||||
return mode;
|
||||
}
|
||||
|
||||
static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
|
||||
{
|
||||
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
||||
@ -2894,18 +2921,42 @@ static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
|
||||
return mode;
|
||||
}
|
||||
|
||||
/* returns true if host is using TSC based clocksource */
|
||||
/*
|
||||
* Calculates the kvmclock_base_ns (CLOCK_MONOTONIC_RAW + boot time) and
|
||||
* reports the TSC value from which it do so. Returns true if host is
|
||||
* using TSC based clocksource.
|
||||
*/
|
||||
static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
|
||||
{
|
||||
/* checked again under seqlock below */
|
||||
if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
|
||||
return false;
|
||||
|
||||
return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns,
|
||||
tsc_timestamp));
|
||||
return gtod_is_based_on_tsc(do_kvmclock_base(kernel_ns,
|
||||
tsc_timestamp));
|
||||
}
|
||||
|
||||
/* returns true if host is using TSC based clocksource */
|
||||
/*
|
||||
* Calculates CLOCK_MONOTONIC and reports the TSC value from which it did
|
||||
* so. Returns true if host is using TSC based clocksource.
|
||||
*/
|
||||
bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
|
||||
{
|
||||
/* checked again under seqlock below */
|
||||
if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
|
||||
return false;
|
||||
|
||||
return gtod_is_based_on_tsc(do_monotonic(kernel_ns,
|
||||
tsc_timestamp));
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculates CLOCK_REALTIME and reports the TSC value from which it did
|
||||
* so. Returns true if host is using TSC based clocksource.
|
||||
*
|
||||
* DO NOT USE this for anything related to migration. You want CLOCK_TAI
|
||||
* for that.
|
||||
*/
|
||||
static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
|
||||
u64 *tsc_timestamp)
|
||||
{
|
||||
@ -3152,7 +3203,7 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
|
||||
|
||||
guest_hv_clock->version = ++vcpu->hv_clock.version;
|
||||
|
||||
mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
|
||||
kvm_gpc_mark_dirty_in_slot(gpc);
|
||||
read_unlock_irqrestore(&gpc->lock, flags);
|
||||
|
||||
trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
|
||||
@ -4674,7 +4725,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
KVM_XEN_HVM_CONFIG_SHARED_INFO |
|
||||
KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL |
|
||||
KVM_XEN_HVM_CONFIG_EVTCHN_SEND |
|
||||
KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE;
|
||||
KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE |
|
||||
KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA;
|
||||
if (sched_info_on())
|
||||
r |= KVM_XEN_HVM_CONFIG_RUNSTATE |
|
||||
KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG;
|
||||
@ -12027,7 +12079,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.regs_avail = ~0;
|
||||
vcpu->arch.regs_dirty = ~0;
|
||||
|
||||
kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm);
|
||||
|
||||
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
|
@ -294,6 +294,7 @@ void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
|
||||
|
||||
u64 get_kvmclock_ns(struct kvm *kvm);
|
||||
uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm);
|
||||
bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp);
|
||||
|
||||
int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
|
||||
gva_t addr, void *val, unsigned int bytes,
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include "x86.h"
|
||||
#include "xen.h"
|
||||
#include "hyperv.h"
|
||||
#include "lapic.h"
|
||||
#include "irq.h"
|
||||
|
||||
#include <linux/eventfd.h>
|
||||
#include <linux/kvm_host.h>
|
||||
@ -24,6 +24,7 @@
|
||||
#include <xen/interface/sched.h>
|
||||
|
||||
#include <asm/xen/cpuid.h>
|
||||
#include <asm/pvclock.h>
|
||||
|
||||
#include "cpuid.h"
|
||||
#include "trace.h"
|
||||
@ -34,41 +35,32 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r);
|
||||
|
||||
DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
|
||||
|
||||
static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
|
||||
static int kvm_xen_shared_info_init(struct kvm *kvm)
|
||||
{
|
||||
struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
|
||||
struct pvclock_wall_clock *wc;
|
||||
gpa_t gpa = gfn_to_gpa(gfn);
|
||||
u32 *wc_sec_hi;
|
||||
u32 wc_version;
|
||||
u64 wall_nsec;
|
||||
int ret = 0;
|
||||
int idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
if (gfn == KVM_XEN_INVALID_GFN) {
|
||||
kvm_gpc_deactivate(gpc);
|
||||
goto out;
|
||||
}
|
||||
read_lock_irq(&gpc->lock);
|
||||
while (!kvm_gpc_check(gpc, PAGE_SIZE)) {
|
||||
read_unlock_irq(&gpc->lock);
|
||||
|
||||
do {
|
||||
ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE);
|
||||
ret = kvm_gpc_refresh(gpc, PAGE_SIZE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* This code mirrors kvm_write_wall_clock() except that it writes
|
||||
* directly through the pfn cache and doesn't mark the page dirty.
|
||||
*/
|
||||
wall_nsec = kvm_get_wall_clock_epoch(kvm);
|
||||
|
||||
/* It could be invalid again already, so we need to check */
|
||||
read_lock_irq(&gpc->lock);
|
||||
}
|
||||
|
||||
if (gpc->valid)
|
||||
break;
|
||||
|
||||
read_unlock_irq(&gpc->lock);
|
||||
} while (1);
|
||||
/*
|
||||
* This code mirrors kvm_write_wall_clock() except that it writes
|
||||
* directly through the pfn cache and doesn't mark the page dirty.
|
||||
*/
|
||||
wall_nsec = kvm_get_wall_clock_epoch(kvm);
|
||||
|
||||
/* Paranoia checks on the 32-bit struct layout */
|
||||
BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
|
||||
@ -158,8 +150,93 @@ static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns)
|
||||
static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs,
|
||||
bool linux_wa)
|
||||
{
|
||||
int64_t kernel_now, delta;
|
||||
uint64_t guest_now;
|
||||
|
||||
/*
|
||||
* The guest provides the requested timeout in absolute nanoseconds
|
||||
* of the KVM clock — as *it* sees it, based on the scaled TSC and
|
||||
* the pvclock information provided by KVM.
|
||||
*
|
||||
* The kernel doesn't support hrtimers based on CLOCK_MONOTONIC_RAW
|
||||
* so use CLOCK_MONOTONIC. In the timescales covered by timers, the
|
||||
* difference won't matter much as there is no cumulative effect.
|
||||
*
|
||||
* Calculate the time for some arbitrary point in time around "now"
|
||||
* in terms of both kvmclock and CLOCK_MONOTONIC. Calculate the
|
||||
* delta between the kvmclock "now" value and the guest's requested
|
||||
* timeout, apply the "Linux workaround" described below, and add
|
||||
* the resulting delta to the CLOCK_MONOTONIC "now" value, to get
|
||||
* the absolute CLOCK_MONOTONIC time at which the timer should
|
||||
* fire.
|
||||
*/
|
||||
if (vcpu->arch.hv_clock.version && vcpu->kvm->arch.use_master_clock &&
|
||||
static_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
||||
uint64_t host_tsc, guest_tsc;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_64BIT) ||
|
||||
!kvm_get_monotonic_and_clockread(&kernel_now, &host_tsc)) {
|
||||
/*
|
||||
* Don't fall back to get_kvmclock_ns() because it's
|
||||
* broken; it has a systemic error in its results
|
||||
* because it scales directly from host TSC to
|
||||
* nanoseconds, and doesn't scale first to guest TSC
|
||||
* and *then* to nanoseconds as the guest does.
|
||||
*
|
||||
* There is a small error introduced here because time
|
||||
* continues to elapse between the ktime_get() and the
|
||||
* subsequent rdtsc(). But not the systemic drift due
|
||||
* to get_kvmclock_ns().
|
||||
*/
|
||||
kernel_now = ktime_get(); /* This is CLOCK_MONOTONIC */
|
||||
host_tsc = rdtsc();
|
||||
}
|
||||
|
||||
/* Calculate the guest kvmclock as the guest would do it. */
|
||||
guest_tsc = kvm_read_l1_tsc(vcpu, host_tsc);
|
||||
guest_now = __pvclock_read_cycles(&vcpu->arch.hv_clock,
|
||||
guest_tsc);
|
||||
} else {
|
||||
/*
|
||||
* Without CONSTANT_TSC, get_kvmclock_ns() is the only option.
|
||||
*
|
||||
* Also if the guest PV clock hasn't been set up yet, as is
|
||||
* likely to be the case during migration when the vCPU has
|
||||
* not been run yet. It would be possible to calculate the
|
||||
* scaling factors properly in that case but there's not much
|
||||
* point in doing so. The get_kvmclock_ns() drift accumulates
|
||||
* over time, so it's OK to use it at startup. Besides, on
|
||||
* migration there's going to be a little bit of skew in the
|
||||
* precise moment at which timers fire anyway. Often they'll
|
||||
* be in the "past" by the time the VM is running again after
|
||||
* migration.
|
||||
*/
|
||||
guest_now = get_kvmclock_ns(vcpu->kvm);
|
||||
kernel_now = ktime_get();
|
||||
}
|
||||
|
||||
delta = guest_abs - guest_now;
|
||||
|
||||
/*
|
||||
* Xen has a 'Linux workaround' in do_set_timer_op() which checks for
|
||||
* negative absolute timeout values (caused by integer overflow), and
|
||||
* for values about 13 days in the future (2^50ns) which would be
|
||||
* caused by jiffies overflow. For those cases, Xen sets the timeout
|
||||
* 100ms in the future (not *too* soon, since if a guest really did
|
||||
* set a long timeout on purpose we don't want to keep churning CPU
|
||||
* time by waking it up). Emulate Xen's workaround when starting the
|
||||
* timer in response to __HYPERVISOR_set_timer_op.
|
||||
*/
|
||||
if (linux_wa &&
|
||||
unlikely((int64_t)guest_abs < 0 ||
|
||||
(delta > 0 && (uint32_t) (delta >> 50) != 0))) {
|
||||
delta = 100 * NSEC_PER_MSEC;
|
||||
guest_abs = guest_now + delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid races with the old timer firing. Checking timer_expires
|
||||
* to avoid calling hrtimer_cancel() will only have false positives
|
||||
@ -171,14 +248,12 @@ static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_
|
||||
atomic_set(&vcpu->arch.xen.timer_pending, 0);
|
||||
vcpu->arch.xen.timer_expires = guest_abs;
|
||||
|
||||
if (delta_ns <= 0) {
|
||||
if (delta <= 0)
|
||||
xen_timer_callback(&vcpu->arch.xen.timer);
|
||||
} else {
|
||||
ktime_t ktime_now = ktime_get();
|
||||
else
|
||||
hrtimer_start(&vcpu->arch.xen.timer,
|
||||
ktime_add_ns(ktime_now, delta_ns),
|
||||
ktime_add_ns(kernel_now, delta),
|
||||
HRTIMER_MODE_ABS_HARD);
|
||||
}
|
||||
}
|
||||
|
||||
static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
|
||||
@ -452,14 +527,13 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
if (user_len2)
|
||||
if (user_len2) {
|
||||
kvm_gpc_mark_dirty_in_slot(gpc2);
|
||||
read_unlock(&gpc2->lock);
|
||||
}
|
||||
|
||||
kvm_gpc_mark_dirty_in_slot(gpc1);
|
||||
read_unlock_irqrestore(&gpc1->lock, flags);
|
||||
|
||||
mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
|
||||
if (user_len2)
|
||||
mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
|
||||
@ -493,10 +567,9 @@ void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
|
||||
kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable);
|
||||
}
|
||||
|
||||
static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
|
||||
void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
|
||||
{
|
||||
struct kvm_lapic_irq irq = { };
|
||||
int r;
|
||||
|
||||
irq.dest_id = v->vcpu_id;
|
||||
irq.vector = v->arch.xen.upcall_vector;
|
||||
@ -505,8 +578,7 @@ static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
|
||||
irq.delivery_mode = APIC_DM_FIXED;
|
||||
irq.level = 1;
|
||||
|
||||
/* The fast version will always work for physical unicast */
|
||||
WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL));
|
||||
kvm_irq_delivery_to_apic(v->kvm, NULL, &irq, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -565,13 +637,13 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
|
||||
: "0" (evtchn_pending_sel32));
|
||||
WRITE_ONCE(vi->evtchn_upcall_pending, 1);
|
||||
}
|
||||
|
||||
kvm_gpc_mark_dirty_in_slot(gpc);
|
||||
read_unlock_irqrestore(&gpc->lock, flags);
|
||||
|
||||
/* For the per-vCPU lapic vector, deliver it as MSI. */
|
||||
if (v->arch.xen.upcall_vector)
|
||||
kvm_xen_inject_vcpu_vector(v);
|
||||
|
||||
mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||
@ -635,17 +707,59 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
|
||||
} else {
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
kvm->arch.xen.long_mode = !!data->u.long_mode;
|
||||
|
||||
/*
|
||||
* Re-initialize shared_info to put the wallclock in the
|
||||
* correct place. Whilst it's not necessary to do this
|
||||
* unless the mode is actually changed, it does no harm
|
||||
* to make the call anyway.
|
||||
*/
|
||||
r = kvm->arch.xen.shinfo_cache.active ?
|
||||
kvm_xen_shared_info_init(kvm) : 0;
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
r = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case KVM_XEN_ATTR_TYPE_SHARED_INFO:
|
||||
case KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA: {
|
||||
int idx;
|
||||
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
if (data->type == KVM_XEN_ATTR_TYPE_SHARED_INFO) {
|
||||
gfn_t gfn = data->u.shared_info.gfn;
|
||||
|
||||
if (gfn == KVM_XEN_INVALID_GFN) {
|
||||
kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);
|
||||
r = 0;
|
||||
} else {
|
||||
r = kvm_gpc_activate(&kvm->arch.xen.shinfo_cache,
|
||||
gfn_to_gpa(gfn), PAGE_SIZE);
|
||||
}
|
||||
} else {
|
||||
void __user * hva = u64_to_user_ptr(data->u.shared_info.hva);
|
||||
|
||||
if (!PAGE_ALIGNED(hva) || !access_ok(hva, PAGE_SIZE)) {
|
||||
r = -EINVAL;
|
||||
} else if (!hva) {
|
||||
kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);
|
||||
r = 0;
|
||||
} else {
|
||||
r = kvm_gpc_activate_hva(&kvm->arch.xen.shinfo_cache,
|
||||
(unsigned long)hva, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
|
||||
if (!r && kvm->arch.xen.shinfo_cache.active)
|
||||
r = kvm_xen_shared_info_init(kvm);
|
||||
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
break;
|
||||
|
||||
}
|
||||
case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
|
||||
if (data->u.vector && data->u.vector < 0x10)
|
||||
r = -EINVAL;
|
||||
@ -699,13 +813,21 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
|
||||
break;
|
||||
|
||||
case KVM_XEN_ATTR_TYPE_SHARED_INFO:
|
||||
if (kvm->arch.xen.shinfo_cache.active)
|
||||
if (kvm_gpc_is_gpa_active(&kvm->arch.xen.shinfo_cache))
|
||||
data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
|
||||
else
|
||||
data->u.shared_info.gfn = KVM_XEN_INVALID_GFN;
|
||||
r = 0;
|
||||
break;
|
||||
|
||||
case KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA:
|
||||
if (kvm_gpc_is_hva_active(&kvm->arch.xen.shinfo_cache))
|
||||
data->u.shared_info.hva = kvm->arch.xen.shinfo_cache.uhva;
|
||||
else
|
||||
data->u.shared_info.hva = 0;
|
||||
r = 0;
|
||||
break;
|
||||
|
||||
case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
|
||||
data->u.vector = kvm->arch.xen.upcall_vector;
|
||||
r = 0;
|
||||
@ -742,20 +864,33 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
|
||||
switch (data->type) {
|
||||
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
|
||||
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA:
|
||||
/* No compat necessary here. */
|
||||
BUILD_BUG_ON(sizeof(struct vcpu_info) !=
|
||||
sizeof(struct compat_vcpu_info));
|
||||
BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
|
||||
offsetof(struct compat_vcpu_info, time));
|
||||
|
||||
if (data->u.gpa == KVM_XEN_INVALID_GPA) {
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
|
||||
r = 0;
|
||||
break;
|
||||
if (data->type == KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO) {
|
||||
if (data->u.gpa == KVM_XEN_INVALID_GPA) {
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache,
|
||||
data->u.gpa, sizeof(struct vcpu_info));
|
||||
} else {
|
||||
if (data->u.hva == 0) {
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gpc_activate_hva(&vcpu->arch.xen.vcpu_info_cache,
|
||||
data->u.hva, sizeof(struct vcpu_info));
|
||||
}
|
||||
|
||||
r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache,
|
||||
data->u.gpa, sizeof(struct vcpu_info));
|
||||
if (!r)
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
|
||||
@ -944,9 +1079,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
|
||||
/* Start the timer if the new value has a valid vector+expiry. */
|
||||
if (data->u.timer.port && data->u.timer.expires_ns)
|
||||
kvm_xen_start_timer(vcpu, data->u.timer.expires_ns,
|
||||
data->u.timer.expires_ns -
|
||||
get_kvmclock_ns(vcpu->kvm));
|
||||
kvm_xen_start_timer(vcpu, data->u.timer.expires_ns, false);
|
||||
|
||||
r = 0;
|
||||
break;
|
||||
@ -977,13 +1110,21 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
|
||||
switch (data->type) {
|
||||
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
|
||||
if (vcpu->arch.xen.vcpu_info_cache.active)
|
||||
if (kvm_gpc_is_gpa_active(&vcpu->arch.xen.vcpu_info_cache))
|
||||
data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
|
||||
else
|
||||
data->u.gpa = KVM_XEN_INVALID_GPA;
|
||||
r = 0;
|
||||
break;
|
||||
|
||||
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA:
|
||||
if (kvm_gpc_is_hva_active(&vcpu->arch.xen.vcpu_info_cache))
|
||||
data->u.hva = vcpu->arch.xen.vcpu_info_cache.uhva;
|
||||
else
|
||||
data->u.hva = 0;
|
||||
r = 0;
|
||||
break;
|
||||
|
||||
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
|
||||
if (vcpu->arch.xen.vcpu_time_info_cache.active)
|
||||
data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
|
||||
@ -1093,9 +1234,24 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
|
||||
u32 page_num = data & ~PAGE_MASK;
|
||||
u64 page_addr = data & PAGE_MASK;
|
||||
bool lm = is_long_mode(vcpu);
|
||||
int r = 0;
|
||||
|
||||
/* Latch long_mode for shared_info pages etc. */
|
||||
vcpu->kvm->arch.xen.long_mode = lm;
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
if (kvm->arch.xen.long_mode != lm) {
|
||||
kvm->arch.xen.long_mode = lm;
|
||||
|
||||
/*
|
||||
* Re-initialize shared_info to put the wallclock in the
|
||||
* correct place.
|
||||
*/
|
||||
if (kvm->arch.xen.shinfo_cache.active &&
|
||||
kvm_xen_shared_info_init(kvm))
|
||||
r = 1;
|
||||
}
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* If Xen hypercall intercept is enabled, fill the hypercall
|
||||
@ -1396,7 +1552,6 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
|
||||
{
|
||||
struct vcpu_set_singleshot_timer oneshot;
|
||||
struct x86_exception e;
|
||||
s64 delta;
|
||||
|
||||
if (!kvm_xen_timer_enabled(vcpu))
|
||||
return false;
|
||||
@ -1430,9 +1585,7 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
|
||||
return true;
|
||||
}
|
||||
|
||||
/* A delta <= 0 results in an immediate callback, which is what we want */
|
||||
delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm);
|
||||
kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, delta);
|
||||
kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, false);
|
||||
*r = 0;
|
||||
return true;
|
||||
|
||||
@ -1455,29 +1608,10 @@ static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout,
|
||||
if (!kvm_xen_timer_enabled(vcpu))
|
||||
return false;
|
||||
|
||||
if (timeout) {
|
||||
uint64_t guest_now = get_kvmclock_ns(vcpu->kvm);
|
||||
int64_t delta = timeout - guest_now;
|
||||
|
||||
/* Xen has a 'Linux workaround' in do_set_timer_op() which
|
||||
* checks for negative absolute timeout values (caused by
|
||||
* integer overflow), and for values about 13 days in the
|
||||
* future (2^50ns) which would be caused by jiffies
|
||||
* overflow. For those cases, it sets the timeout 100ms in
|
||||
* the future (not *too* soon, since if a guest really did
|
||||
* set a long timeout on purpose we don't want to keep
|
||||
* churning CPU time by waking it up).
|
||||
*/
|
||||
if (unlikely((int64_t)timeout < 0 ||
|
||||
(delta > 0 && (uint32_t) (delta >> 50) != 0))) {
|
||||
delta = 100 * NSEC_PER_MSEC;
|
||||
timeout = guest_now + delta;
|
||||
}
|
||||
|
||||
kvm_xen_start_timer(vcpu, timeout, delta);
|
||||
} else {
|
||||
if (timeout)
|
||||
kvm_xen_start_timer(vcpu, timeout, true);
|
||||
else
|
||||
kvm_xen_stop_timer(vcpu);
|
||||
}
|
||||
|
||||
*r = 0;
|
||||
return true;
|
||||
@ -1621,9 +1755,6 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
|
||||
WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
|
||||
}
|
||||
|
||||
if (!vcpu->arch.xen.vcpu_info_cache.active)
|
||||
return -EINVAL;
|
||||
|
||||
if (xe->port >= max_evtchn_port(kvm))
|
||||
return -EINVAL;
|
||||
|
||||
@ -1731,8 +1862,6 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
|
||||
mm_borrowed = true;
|
||||
}
|
||||
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
|
||||
/*
|
||||
* It is theoretically possible for the page to be unmapped
|
||||
* and the MMU notifier to invalidate the shared_info before
|
||||
@ -1760,8 +1889,6 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
} while(!rc);
|
||||
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
|
||||
if (mm_borrowed)
|
||||
kthread_unuse_mm(kvm->mm);
|
||||
|
||||
@ -2109,14 +2236,10 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
|
||||
|
||||
timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
|
||||
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
|
||||
KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
|
||||
KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
|
||||
KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
|
||||
KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm);
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm);
|
||||
}
|
||||
|
||||
void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
@ -2159,7 +2282,7 @@ void kvm_xen_init_vm(struct kvm *kvm)
|
||||
{
|
||||
mutex_init(&kvm->arch.xen.xen_lock);
|
||||
idr_init(&kvm->arch.xen.evtchn_ports);
|
||||
kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm);
|
||||
}
|
||||
|
||||
void kvm_xen_destroy_vm(struct kvm *kvm)
|
||||
|
@ -18,6 +18,7 @@ extern struct static_key_false_deferred kvm_xen_enabled;
|
||||
|
||||
int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
|
||||
void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
|
||||
void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu);
|
||||
int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
|
||||
int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
|
||||
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
|
||||
@ -36,6 +37,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm,
|
||||
const struct kvm_irq_routing_entry *ue);
|
||||
void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* The local APIC is being enabled. If the per-vCPU upcall vector is
|
||||
* set and the vCPU's evtchn_upcall_pending flag is set, inject the
|
||||
* interrupt.
|
||||
*/
|
||||
if (static_branch_unlikely(&kvm_xen_enabled.key) &&
|
||||
vcpu->arch.xen.vcpu_info_cache.active &&
|
||||
vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu))
|
||||
kvm_xen_inject_vcpu_vector(vcpu);
|
||||
}
|
||||
|
||||
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
|
||||
{
|
||||
return static_branch_unlikely(&kvm_xen_enabled.key) &&
|
||||
@ -101,6 +115,10 @@ static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
|
||||
{
|
||||
return false;
|
||||
|
@ -148,6 +148,11 @@ static inline bool kvm_is_error_hva(unsigned long addr)
|
||||
|
||||
#endif
|
||||
|
||||
static inline bool kvm_is_error_gpa(gpa_t gpa)
|
||||
{
|
||||
return gpa == INVALID_GPA;
|
||||
}
|
||||
|
||||
#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
|
||||
|
||||
static inline bool is_error_page(struct page *page)
|
||||
@ -1318,21 +1323,12 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
*
|
||||
* @gpc: struct gfn_to_pfn_cache object.
|
||||
* @kvm: pointer to kvm instance.
|
||||
* @vcpu: vCPU to be used for marking pages dirty and to be woken on
|
||||
* invalidation.
|
||||
* @usage: indicates if the resulting host physical PFN is used while
|
||||
* the @vcpu is IN_GUEST_MODE (in which case invalidation of
|
||||
* the cache from MMU notifiers---but not for KVM memslot
|
||||
* changes!---will also force @vcpu to exit the guest and
|
||||
* refresh the cache); and/or if the PFN used directly
|
||||
* by KVM (and thus needs a kernel virtual mapping).
|
||||
*
|
||||
* This sets up a gfn_to_pfn_cache by initializing locks and assigning the
|
||||
* immutable attributes. Note, the cache must be zero-allocated (or zeroed by
|
||||
* the caller before init).
|
||||
*/
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
|
||||
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage);
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
|
||||
|
||||
/**
|
||||
* kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
|
||||
@ -1352,6 +1348,22 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
|
||||
*/
|
||||
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
|
||||
|
||||
/**
|
||||
* kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA.
|
||||
*
|
||||
* @gpc: struct gfn_to_pfn_cache object.
|
||||
* @hva: userspace virtual address to map.
|
||||
* @len: sanity check; the range being access must fit a single page.
|
||||
*
|
||||
* @return: 0 for success.
|
||||
* -EINVAL for a mapping which would cross a page boundary.
|
||||
* -EFAULT for an untranslatable guest physical address.
|
||||
*
|
||||
* The semantics of this function are the same as those of kvm_gpc_activate(). It
|
||||
* merely bypasses a layer of address translation.
|
||||
*/
|
||||
int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len);
|
||||
|
||||
/**
|
||||
* kvm_gpc_check - check validity of a gfn_to_pfn_cache.
|
||||
*
|
||||
@ -1398,6 +1410,16 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
|
||||
*/
|
||||
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
|
||||
|
||||
static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
return gpc->active && !kvm_is_error_gpa(gpc->gpa);
|
||||
}
|
||||
|
||||
static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
return gpc->active && kvm_is_error_gpa(gpc->gpa);
|
||||
}
|
||||
|
||||
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
|
||||
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
|
||||
|
||||
@ -1788,11 +1810,21 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
|
||||
return (hpa_t)pfn << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
|
||||
static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
|
||||
{
|
||||
unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
|
||||
|
||||
return kvm_is_error_hva(hva);
|
||||
return !kvm_is_error_hva(hva);
|
||||
}
|
||||
|
||||
static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
lockdep_assert_held(&gpc->lock);
|
||||
|
||||
if (!gpc->memslot)
|
||||
return;
|
||||
|
||||
mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa));
|
||||
}
|
||||
|
||||
enum kvm_stat_kind {
|
||||
|
@ -49,12 +49,6 @@ typedef u64 hfn_t;
|
||||
|
||||
typedef hfn_t kvm_pfn_t;
|
||||
|
||||
enum pfn_cache_usage {
|
||||
KVM_GUEST_USES_PFN = BIT(0),
|
||||
KVM_HOST_USES_PFN = BIT(1),
|
||||
KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
|
||||
};
|
||||
|
||||
struct gfn_to_hva_cache {
|
||||
u64 generation;
|
||||
gpa_t gpa;
|
||||
@ -69,13 +63,11 @@ struct gfn_to_pfn_cache {
|
||||
unsigned long uhva;
|
||||
struct kvm_memory_slot *memslot;
|
||||
struct kvm *kvm;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct list_head list;
|
||||
rwlock_t lock;
|
||||
struct mutex refresh_lock;
|
||||
void *khva;
|
||||
kvm_pfn_t pfn;
|
||||
enum pfn_cache_usage usage;
|
||||
bool active;
|
||||
bool valid;
|
||||
};
|
||||
|
@ -62,6 +62,7 @@ enum {
|
||||
TEST_POLL_TIMEOUT,
|
||||
TEST_POLL_MASKED,
|
||||
TEST_POLL_WAKE,
|
||||
SET_VCPU_INFO,
|
||||
TEST_TIMER_PAST,
|
||||
TEST_LOCKING_SEND_RACE,
|
||||
TEST_LOCKING_POLL_RACE,
|
||||
@ -321,6 +322,10 @@ static void guest_code(void)
|
||||
|
||||
GUEST_SYNC(TEST_POLL_WAKE);
|
||||
|
||||
/* Set the vcpu_info to point at exactly the place it already is to
|
||||
* make sure the attribute is functional. */
|
||||
GUEST_SYNC(SET_VCPU_INFO);
|
||||
|
||||
/* A timer wake an *unmasked* port which should wake us with an
|
||||
* actual interrupt, while we're polling on a different port. */
|
||||
ports[0]++;
|
||||
@ -389,6 +394,7 @@ static int cmp_timespec(struct timespec *a, struct timespec *b)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shared_info *shinfo;
|
||||
static struct vcpu_info *vinfo;
|
||||
static struct kvm_vcpu *vcpu;
|
||||
|
||||
@ -404,20 +410,38 @@ static void *juggle_shinfo_state(void *arg)
|
||||
{
|
||||
struct kvm_vm *vm = (struct kvm_vm *)arg;
|
||||
|
||||
struct kvm_xen_hvm_attr cache_activate = {
|
||||
struct kvm_xen_hvm_attr cache_activate_gfn = {
|
||||
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
|
||||
.u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE
|
||||
};
|
||||
|
||||
struct kvm_xen_hvm_attr cache_deactivate = {
|
||||
struct kvm_xen_hvm_attr cache_deactivate_gfn = {
|
||||
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
|
||||
.u.shared_info.gfn = KVM_XEN_INVALID_GFN
|
||||
};
|
||||
|
||||
struct kvm_xen_hvm_attr cache_activate_hva = {
|
||||
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA,
|
||||
.u.shared_info.hva = (unsigned long)shinfo
|
||||
};
|
||||
|
||||
struct kvm_xen_hvm_attr cache_deactivate_hva = {
|
||||
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
|
||||
.u.shared_info.hva = 0
|
||||
};
|
||||
|
||||
int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
|
||||
|
||||
for (;;) {
|
||||
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate);
|
||||
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate);
|
||||
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_gfn);
|
||||
pthread_testcancel();
|
||||
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_gfn);
|
||||
|
||||
if (xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) {
|
||||
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_hva);
|
||||
pthread_testcancel();
|
||||
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_hva);
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@ -442,6 +466,7 @@ int main(int argc, char *argv[])
|
||||
bool do_runstate_flag = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG);
|
||||
bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL);
|
||||
bool do_evtchn_tests = do_eventfd_tests && !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND);
|
||||
bool has_shinfo_hva = !!(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA);
|
||||
|
||||
clock_gettime(CLOCK_REALTIME, &min_ts);
|
||||
|
||||
@ -452,7 +477,7 @@ int main(int argc, char *argv[])
|
||||
SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 3, 0);
|
||||
virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 3);
|
||||
|
||||
struct shared_info *shinfo = addr_gpa2hva(vm, SHINFO_VADDR);
|
||||
shinfo = addr_gpa2hva(vm, SHINFO_VADDR);
|
||||
|
||||
int zero_fd = open("/dev/zero", O_RDONLY);
|
||||
TEST_ASSERT(zero_fd != -1, "Failed to open /dev/zero");
|
||||
@ -488,10 +513,16 @@ int main(int argc, char *argv[])
|
||||
"Failed to read back RUNSTATE_UPDATE_FLAG attr");
|
||||
}
|
||||
|
||||
struct kvm_xen_hvm_attr ha = {
|
||||
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
|
||||
.u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE,
|
||||
};
|
||||
struct kvm_xen_hvm_attr ha = {};
|
||||
|
||||
if (has_shinfo_hva) {
|
||||
ha.type = KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA;
|
||||
ha.u.shared_info.hva = (unsigned long)shinfo;
|
||||
} else {
|
||||
ha.type = KVM_XEN_ATTR_TYPE_SHARED_INFO;
|
||||
ha.u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE;
|
||||
}
|
||||
|
||||
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha);
|
||||
|
||||
/*
|
||||
@ -862,6 +893,16 @@ int main(int argc, char *argv[])
|
||||
alarm(1);
|
||||
break;
|
||||
|
||||
case SET_VCPU_INFO:
|
||||
if (has_shinfo_hva) {
|
||||
struct kvm_xen_vcpu_attr vih = {
|
||||
.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA,
|
||||
.u.hva = (unsigned long)vinfo
|
||||
};
|
||||
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vih);
|
||||
}
|
||||
break;
|
||||
|
||||
case TEST_TIMER_PAST:
|
||||
TEST_ASSERT(!evtchn_irq_expected,
|
||||
"Expected event channel IRQ but it didn't happen");
|
||||
|
@ -25,55 +25,36 @@
|
||||
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end, bool may_block)
|
||||
{
|
||||
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
struct gfn_to_pfn_cache *gpc;
|
||||
bool evict_vcpus = false;
|
||||
|
||||
spin_lock(&kvm->gpc_lock);
|
||||
list_for_each_entry(gpc, &kvm->gpc_list, list) {
|
||||
write_lock_irq(&gpc->lock);
|
||||
read_lock_irq(&gpc->lock);
|
||||
|
||||
/* Only a single page so no need to care about length */
|
||||
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
|
||||
gpc->uhva >= start && gpc->uhva < end) {
|
||||
gpc->valid = false;
|
||||
read_unlock_irq(&gpc->lock);
|
||||
|
||||
/*
|
||||
* If a guest vCPU could be using the physical address,
|
||||
* it needs to be forced out of guest mode.
|
||||
* There is a small window here where the cache could
|
||||
* be modified, and invalidation would no longer be
|
||||
* necessary. Hence check again whether invalidation
|
||||
* is still necessary once the write lock has been
|
||||
* acquired.
|
||||
*/
|
||||
if (gpc->usage & KVM_GUEST_USES_PFN) {
|
||||
if (!evict_vcpus) {
|
||||
evict_vcpus = true;
|
||||
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
}
|
||||
__set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
|
||||
}
|
||||
|
||||
write_lock_irq(&gpc->lock);
|
||||
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
|
||||
gpc->uhva >= start && gpc->uhva < end)
|
||||
gpc->valid = false;
|
||||
write_unlock_irq(&gpc->lock);
|
||||
continue;
|
||||
}
|
||||
write_unlock_irq(&gpc->lock);
|
||||
|
||||
read_unlock_irq(&gpc->lock);
|
||||
}
|
||||
spin_unlock(&kvm->gpc_lock);
|
||||
|
||||
if (evict_vcpus) {
|
||||
/*
|
||||
* KVM needs to ensure the vCPU is fully out of guest context
|
||||
* before allowing the invalidation to continue.
|
||||
*/
|
||||
unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
|
||||
bool called;
|
||||
|
||||
/*
|
||||
* If the OOM reaper is active, then all vCPUs should have
|
||||
* been stopped already, so perform the request without
|
||||
* KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
|
||||
*/
|
||||
if (!may_block)
|
||||
req &= ~KVM_REQUEST_WAIT;
|
||||
|
||||
called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
|
||||
|
||||
WARN_ON_ONCE(called && !may_block);
|
||||
}
|
||||
}
|
||||
|
||||
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
|
||||
@ -83,10 +64,17 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
|
||||
if (!gpc->active)
|
||||
return false;
|
||||
|
||||
if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE)
|
||||
/*
|
||||
* If the page was cached from a memslot, make sure the memslots have
|
||||
* not been re-configured.
|
||||
*/
|
||||
if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation)
|
||||
return false;
|
||||
|
||||
if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
|
||||
if (kvm_is_error_hva(gpc->uhva))
|
||||
return false;
|
||||
|
||||
if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
|
||||
return false;
|
||||
|
||||
if (!gpc->valid)
|
||||
@ -94,19 +82,33 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gpc_check);
|
||||
|
||||
static void gpc_unmap_khva(kvm_pfn_t pfn, void *khva)
|
||||
static void *gpc_map(kvm_pfn_t pfn)
|
||||
{
|
||||
if (pfn_valid(pfn))
|
||||
return kmap(pfn_to_page(pfn));
|
||||
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void gpc_unmap(kvm_pfn_t pfn, void *khva)
|
||||
{
|
||||
/* Unmap the old pfn/page if it was mapped before. */
|
||||
if (!is_error_noslot_pfn(pfn) && khva) {
|
||||
if (pfn_valid(pfn))
|
||||
kunmap(pfn_to_page(pfn));
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
else
|
||||
memunmap(khva);
|
||||
#endif
|
||||
if (is_error_noslot_pfn(pfn) || !khva)
|
||||
return;
|
||||
|
||||
if (pfn_valid(pfn)) {
|
||||
kunmap(pfn_to_page(pfn));
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
memunmap(khva);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
|
||||
@ -140,7 +142,7 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s
|
||||
static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
/* Note, the new page offset may be different than the old! */
|
||||
void *old_khva = gpc->khva - offset_in_page(gpc->khva);
|
||||
void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
|
||||
kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
|
||||
void *new_khva = NULL;
|
||||
unsigned long mmu_seq;
|
||||
@ -175,7 +177,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
|
||||
* the existing mapping and didn't create a new one.
|
||||
*/
|
||||
if (new_khva != old_khva)
|
||||
gpc_unmap_khva(new_pfn, new_khva);
|
||||
gpc_unmap(new_pfn, new_khva);
|
||||
|
||||
kvm_release_pfn_clean(new_pfn);
|
||||
|
||||
@ -192,20 +194,14 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
|
||||
* pfn. Note, kmap() and memremap() can both sleep, so this
|
||||
* too must be done outside of gpc->lock!
|
||||
*/
|
||||
if (gpc->usage & KVM_HOST_USES_PFN) {
|
||||
if (new_pfn == gpc->pfn) {
|
||||
new_khva = old_khva;
|
||||
} else if (pfn_valid(new_pfn)) {
|
||||
new_khva = kmap(pfn_to_page(new_pfn));
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
} else {
|
||||
new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
|
||||
#endif
|
||||
}
|
||||
if (!new_khva) {
|
||||
kvm_release_pfn_clean(new_pfn);
|
||||
goto out_error;
|
||||
}
|
||||
if (new_pfn == gpc->pfn)
|
||||
new_khva = old_khva;
|
||||
else
|
||||
new_khva = gpc_map(new_pfn);
|
||||
|
||||
if (!new_khva) {
|
||||
kvm_release_pfn_clean(new_pfn);
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
write_lock_irq(&gpc->lock);
|
||||
@ -219,7 +215,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
|
||||
|
||||
gpc->valid = true;
|
||||
gpc->pfn = new_pfn;
|
||||
gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
|
||||
gpc->khva = new_khva + offset_in_page(gpc->uhva);
|
||||
|
||||
/*
|
||||
* Put the reference to the _new_ pfn. The pfn is now tracked by the
|
||||
@ -236,30 +232,31 @@ out_error:
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
|
||||
unsigned long len)
|
||||
{
|
||||
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
|
||||
unsigned long page_offset = gpa & ~PAGE_MASK;
|
||||
unsigned long page_offset;
|
||||
bool unmap_old = false;
|
||||
unsigned long old_uhva;
|
||||
kvm_pfn_t old_pfn;
|
||||
bool hva_change = false;
|
||||
void *old_khva;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If must fit within a single page. The 'len' argument is
|
||||
* only to enforce that.
|
||||
*/
|
||||
if (page_offset + len > PAGE_SIZE)
|
||||
/* Either gpa or uhva must be valid, but not both */
|
||||
if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva)))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* If another task is refreshing the cache, wait for it to complete.
|
||||
* There is no guarantee that concurrent refreshes will see the same
|
||||
* gpa, memslots generation, etc..., so they must be fully serialized.
|
||||
* The cached acces must fit within a single page. The 'len' argument
|
||||
* exists only to enforce that.
|
||||
*/
|
||||
mutex_lock(&gpc->refresh_lock);
|
||||
page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
|
||||
offset_in_page(gpa);
|
||||
if (page_offset + len > PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
lockdep_assert_held(&gpc->refresh_lock);
|
||||
|
||||
write_lock_irq(&gpc->lock);
|
||||
|
||||
@ -269,30 +266,52 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
}
|
||||
|
||||
old_pfn = gpc->pfn;
|
||||
old_khva = gpc->khva - offset_in_page(gpc->khva);
|
||||
old_uhva = gpc->uhva;
|
||||
old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
|
||||
old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
|
||||
|
||||
/* If the userspace HVA is invalid, refresh that first */
|
||||
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
|
||||
kvm_is_error_hva(gpc->uhva)) {
|
||||
gfn_t gfn = gpa_to_gfn(gpa);
|
||||
if (kvm_is_error_gpa(gpa)) {
|
||||
gpc->gpa = INVALID_GPA;
|
||||
gpc->memslot = NULL;
|
||||
gpc->uhva = PAGE_ALIGN_DOWN(uhva);
|
||||
|
||||
gpc->gpa = gpa;
|
||||
gpc->generation = slots->generation;
|
||||
gpc->memslot = __gfn_to_memslot(slots, gfn);
|
||||
gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
|
||||
if (gpc->uhva != old_uhva)
|
||||
hva_change = true;
|
||||
} else {
|
||||
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
|
||||
|
||||
if (kvm_is_error_hva(gpc->uhva)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
|
||||
kvm_is_error_hva(gpc->uhva)) {
|
||||
gfn_t gfn = gpa_to_gfn(gpa);
|
||||
|
||||
gpc->gpa = gpa;
|
||||
gpc->generation = slots->generation;
|
||||
gpc->memslot = __gfn_to_memslot(slots, gfn);
|
||||
gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
|
||||
|
||||
if (kvm_is_error_hva(gpc->uhva)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Even if the GPA and/or the memslot generation changed, the
|
||||
* HVA may still be the same.
|
||||
*/
|
||||
if (gpc->uhva != old_uhva)
|
||||
hva_change = true;
|
||||
} else {
|
||||
gpc->uhva = old_uhva;
|
||||
}
|
||||
}
|
||||
|
||||
/* Note: the offset must be correct before calling hva_to_pfn_retry() */
|
||||
gpc->uhva += page_offset;
|
||||
|
||||
/*
|
||||
* If the userspace HVA changed or the PFN was already invalid,
|
||||
* drop the lock and do the HVA to PFN lookup again.
|
||||
*/
|
||||
if (!gpc->valid || old_uhva != gpc->uhva) {
|
||||
if (!gpc->valid || hva_change) {
|
||||
ret = hva_to_pfn_retry(gpc);
|
||||
} else {
|
||||
/*
|
||||
@ -323,41 +342,47 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
out_unlock:
|
||||
write_unlock_irq(&gpc->lock);
|
||||
|
||||
mutex_unlock(&gpc->refresh_lock);
|
||||
|
||||
if (unmap_old)
|
||||
gpc_unmap_khva(old_pfn, old_khva);
|
||||
gpc_unmap(old_pfn, old_khva);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
|
||||
{
|
||||
return __kvm_gpc_refresh(gpc, gpc->gpa, len);
|
||||
unsigned long uhva;
|
||||
|
||||
guard(mutex)(&gpc->refresh_lock);
|
||||
|
||||
/*
|
||||
* If the GPA is valid then ignore the HVA, as a cache can be GPA-based
|
||||
* or HVA-based, not both. For GPA-based caches, the HVA will be
|
||||
* recomputed during refresh if necessary.
|
||||
*/
|
||||
uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD;
|
||||
|
||||
return __kvm_gpc_refresh(gpc, gpc->gpa, uhva, len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
|
||||
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
|
||||
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
|
||||
{
|
||||
WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
|
||||
WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
|
||||
|
||||
rwlock_init(&gpc->lock);
|
||||
mutex_init(&gpc->refresh_lock);
|
||||
|
||||
gpc->kvm = kvm;
|
||||
gpc->vcpu = vcpu;
|
||||
gpc->usage = usage;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
gpc->gpa = INVALID_GPA;
|
||||
gpc->uhva = KVM_HVA_ERR_BAD;
|
||||
gpc->active = gpc->valid = false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gpc_init);
|
||||
|
||||
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
|
||||
static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
|
||||
unsigned long len)
|
||||
{
|
||||
struct kvm *kvm = gpc->kvm;
|
||||
|
||||
guard(mutex)(&gpc->refresh_lock);
|
||||
|
||||
if (!gpc->active) {
|
||||
if (KVM_BUG_ON(gpc->valid, kvm))
|
||||
return -EIO;
|
||||
@ -375,9 +400,18 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
|
||||
gpc->active = true;
|
||||
write_unlock_irq(&gpc->lock);
|
||||
}
|
||||
return __kvm_gpc_refresh(gpc, gpa, len);
|
||||
return __kvm_gpc_refresh(gpc, gpa, uhva, len);
|
||||
}
|
||||
|
||||
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
|
||||
{
|
||||
return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
|
||||
}
|
||||
|
||||
int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len)
|
||||
{
|
||||
return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gpc_activate);
|
||||
|
||||
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
@ -385,6 +419,8 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
|
||||
kvm_pfn_t old_pfn;
|
||||
void *old_khva;
|
||||
|
||||
guard(mutex)(&gpc->refresh_lock);
|
||||
|
||||
if (gpc->active) {
|
||||
/*
|
||||
* Deactivate the cache before removing it from the list, KVM
|
||||
@ -412,7 +448,6 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
|
||||
list_del(&gpc->list);
|
||||
spin_unlock(&kvm->gpc_lock);
|
||||
|
||||
gpc_unmap_khva(old_pfn, old_khva);
|
||||
gpc_unmap(old_pfn, old_khva);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);
|
||||
|
Loading…
Reference in New Issue
Block a user