ARM64:
* Fix a regression with pKVM when kmemleak is enabled * Add Oliver Upton as an official KVM/arm64 reviewer selftests: * deal with compiler optimizations around hypervisor exits x86: * MAINTAINERS reorganization * Two SEV fixes -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmK1cjIUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroMayQf+JOOggLacjPPa/t/CE8kIcbX0IWc+ epEdq/f0qgxJlAjUB9YKgMr2Io9jPScyTdY8t6uS0WyZ7Q1NyAogXfds/dF4wElm IMWWfLTSU3gzCmzPh8n6SfbWtRGJKsOukK0cDIIh86h5YnXDmeyVjJrDvEVQOnzG TjHOKYuFXGPj8/NKwcrxqBFHK9DBNxn9b/UBRArG+5AZM0mx3Jl8LJMYUDEIyAyO yhNfTh7gPPidEiJLkFDyHWKg5rhO3fbn8UrncY+eTmSBqMHvvY0+eka6urwihN0v ExmKqy00ES51c/6r+zqsqYICqVSqiaNNWF4lp1HTp7LrUBtxyZAqnkBbHQ== =U2ol -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "ARM64: - Fix a regression with pKVM when kmemleak is enabled - Add Oliver Upton as an official KVM/arm64 reviewer selftests: - deal with compiler optimizations around hypervisor exits x86: - MAINTAINERS reorganization - Two SEV fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: SEV: Init target VMCBs in sev_migrate_from KVM: x86/svm: add __GFP_ACCOUNT to __sev_dbg_{en,de}crypt_user() MAINTAINERS: Reorganize KVM/x86 maintainership selftests: KVM: Handle compiler optimizations in ucall KVM: arm64: Add Oliver as a reviewer KVM: arm64: Prevent kmemleak from accessing pKVM memory tools/kvm_stat: fix display of error when multiple processes are found
This commit is contained in:
commit
e946554905
42
MAINTAINERS
42
MAINTAINERS
@ -10846,6 +10846,7 @@ M: Marc Zyngier <maz@kernel.org>
|
||||
R: James Morse <james.morse@arm.com>
|
||||
R: Alexandru Elisei <alexandru.elisei@arm.com>
|
||||
R: Suzuki K Poulose <suzuki.poulose@arm.com>
|
||||
R: Oliver Upton <oliver.upton@linux.dev>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: kvmarm@lists.cs.columbia.edu (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
@ -10912,28 +10913,51 @@ F: tools/testing/selftests/kvm/*/s390x/
|
||||
F: tools/testing/selftests/kvm/s390x/
|
||||
|
||||
KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
|
||||
M: Sean Christopherson <seanjc@google.com>
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
R: Sean Christopherson <seanjc@google.com>
|
||||
R: Vitaly Kuznetsov <vkuznets@redhat.com>
|
||||
R: Wanpeng Li <wanpengli@tencent.com>
|
||||
R: Jim Mattson <jmattson@google.com>
|
||||
R: Joerg Roedel <joro@8bytes.org>
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.linux-kvm.org
|
||||
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
|
||||
F: arch/x86/include/asm/kvm*
|
||||
F: arch/x86/include/asm/pvclock-abi.h
|
||||
F: arch/x86/include/asm/svm.h
|
||||
F: arch/x86/include/asm/vmx*.h
|
||||
F: arch/x86/include/uapi/asm/kvm*
|
||||
F: arch/x86/include/uapi/asm/svm.h
|
||||
F: arch/x86/include/uapi/asm/vmx.h
|
||||
F: arch/x86/kernel/kvm.c
|
||||
F: arch/x86/kernel/kvmclock.c
|
||||
F: arch/x86/kvm/
|
||||
F: arch/x86/kvm/*/
|
||||
|
||||
KVM PARAVIRT (KVM/paravirt)
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
R: Wanpeng Li <wanpengli@tencent.com>
|
||||
R: Vitaly Kuznetsov <vkuznets@redhat.com>
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
|
||||
F: arch/x86/kernel/kvm.c
|
||||
F: arch/x86/kernel/kvmclock.c
|
||||
F: arch/x86/include/asm/pvclock-abi.h
|
||||
F: include/linux/kvm_para.h
|
||||
F: include/uapi/linux/kvm_para.h
|
||||
F: include/uapi/asm-generic/kvm_para.h
|
||||
F: include/asm-generic/kvm_para.h
|
||||
F: arch/um/include/asm/kvm_para.h
|
||||
F: arch/x86/include/asm/kvm_para.h
|
||||
F: arch/x86/include/uapi/asm/kvm_para.h
|
||||
|
||||
KVM X86 HYPER-V (KVM/hyper-v)
|
||||
M: Vitaly Kuznetsov <vkuznets@redhat.com>
|
||||
M: Sean Christopherson <seanjc@google.com>
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
|
||||
F: arch/x86/kvm/hyperv.*
|
||||
F: arch/x86/kvm/kvm_onhyperv.*
|
||||
F: arch/x86/kvm/svm/hyperv.*
|
||||
F: arch/x86/kvm/svm/svm_onhyperv.*
|
||||
F: arch/x86/kvm/vmx/evmcs.*
|
||||
|
||||
KERNFS
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
M: Tejun Heo <tj@kernel.org>
|
||||
|
@ -2112,11 +2112,11 @@ static int finalize_hyp_mode(void)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Exclude HYP BSS from kmemleak so that it doesn't get peeked
|
||||
* at, which would end badly once the section is inaccessible.
|
||||
* None of other sections should ever be introspected.
|
||||
* Exclude HYP sections from kmemleak so that they don't get peeked
|
||||
* at, which would end badly once inaccessible.
|
||||
*/
|
||||
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
|
||||
kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size);
|
||||
return pkvm_drop_host_privileges();
|
||||
}
|
||||
|
||||
|
@ -844,7 +844,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
|
||||
|
||||
/* If source buffer is not aligned then use an intermediate buffer */
|
||||
if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
|
||||
src_tpage = alloc_page(GFP_KERNEL);
|
||||
src_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!src_tpage)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -865,7 +865,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
|
||||
if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
|
||||
int dst_offset;
|
||||
|
||||
dst_tpage = alloc_page(GFP_KERNEL);
|
||||
dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!dst_tpage) {
|
||||
ret = -ENOMEM;
|
||||
goto e_free;
|
||||
@ -1665,19 +1665,24 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
|
||||
{
|
||||
struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
|
||||
struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
|
||||
struct kvm_vcpu *dst_vcpu, *src_vcpu;
|
||||
struct vcpu_svm *dst_svm, *src_svm;
|
||||
struct kvm_sev_info *mirror;
|
||||
unsigned long i;
|
||||
|
||||
dst->active = true;
|
||||
dst->asid = src->asid;
|
||||
dst->handle = src->handle;
|
||||
dst->pages_locked = src->pages_locked;
|
||||
dst->enc_context_owner = src->enc_context_owner;
|
||||
dst->es_active = src->es_active;
|
||||
|
||||
src->asid = 0;
|
||||
src->active = false;
|
||||
src->handle = 0;
|
||||
src->pages_locked = 0;
|
||||
src->enc_context_owner = NULL;
|
||||
src->es_active = false;
|
||||
|
||||
list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
|
||||
|
||||
@ -1704,27 +1709,22 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
|
||||
list_del(&src->mirror_entry);
|
||||
list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
|
||||
}
|
||||
}
|
||||
|
||||
static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
|
||||
{
|
||||
unsigned long i;
|
||||
struct kvm_vcpu *dst_vcpu, *src_vcpu;
|
||||
struct vcpu_svm *dst_svm, *src_svm;
|
||||
|
||||
if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
|
||||
return -EINVAL;
|
||||
|
||||
kvm_for_each_vcpu(i, src_vcpu, src) {
|
||||
if (!src_vcpu->arch.guest_state_protected)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kvm_for_each_vcpu(i, src_vcpu, src) {
|
||||
src_svm = to_svm(src_vcpu);
|
||||
dst_vcpu = kvm_get_vcpu(dst, i);
|
||||
kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
|
||||
dst_svm = to_svm(dst_vcpu);
|
||||
|
||||
sev_init_vmcb(dst_svm);
|
||||
|
||||
if (!dst->es_active)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Note, the source is not required to have the same number of
|
||||
* vCPUs as the destination when migrating a vanilla SEV VM.
|
||||
*/
|
||||
src_vcpu = kvm_get_vcpu(dst_kvm, i);
|
||||
src_svm = to_svm(src_vcpu);
|
||||
|
||||
/*
|
||||
* Transfer VMSA and GHCB state to the destination. Nullify and
|
||||
* clear source fields as appropriate, the state now belongs to
|
||||
@ -1740,8 +1740,23 @@ static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
|
||||
src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
|
||||
src_vcpu->arch.guest_state_protected = false;
|
||||
}
|
||||
to_kvm_svm(src)->sev_info.es_active = false;
|
||||
to_kvm_svm(dst)->sev_info.es_active = true;
|
||||
}
|
||||
|
||||
static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
|
||||
{
|
||||
struct kvm_vcpu *src_vcpu;
|
||||
unsigned long i;
|
||||
|
||||
if (!sev_es_guest(src))
|
||||
return 0;
|
||||
|
||||
if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
|
||||
return -EINVAL;
|
||||
|
||||
kvm_for_each_vcpu(i, src_vcpu, src) {
|
||||
if (!src_vcpu->arch.guest_state_protected)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1789,11 +1804,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
|
||||
if (ret)
|
||||
goto out_dst_vcpu;
|
||||
|
||||
if (sev_es_guest(source_kvm)) {
|
||||
ret = sev_es_migrate_from(kvm, source_kvm);
|
||||
if (ret)
|
||||
goto out_source_vcpu;
|
||||
}
|
||||
ret = sev_check_source_vcpus(kvm, source_kvm);
|
||||
if (ret)
|
||||
goto out_source_vcpu;
|
||||
|
||||
sev_migrate_from(kvm, source_kvm);
|
||||
kvm_vm_dead(source_kvm);
|
||||
@ -2914,7 +2927,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
|
||||
count, in);
|
||||
}
|
||||
|
||||
void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||
static void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
|
||||
@ -2967,6 +2980,15 @@ void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||
}
|
||||
}
|
||||
|
||||
void sev_init_vmcb(struct vcpu_svm *svm)
|
||||
{
|
||||
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
|
||||
clr_exception_intercept(svm, UD_VECTOR);
|
||||
|
||||
if (sev_es_guest(svm->vcpu.kvm))
|
||||
sev_es_init_vmcb(svm);
|
||||
}
|
||||
|
||||
void sev_es_vcpu_reset(struct vcpu_svm *svm)
|
||||
{
|
||||
/*
|
||||
|
@ -1212,15 +1212,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
|
||||
svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
|
||||
}
|
||||
|
||||
if (sev_guest(vcpu->kvm)) {
|
||||
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
|
||||
clr_exception_intercept(svm, UD_VECTOR);
|
||||
|
||||
if (sev_es_guest(vcpu->kvm)) {
|
||||
/* Perform SEV-ES specific VMCB updates */
|
||||
sev_es_init_vmcb(svm);
|
||||
}
|
||||
}
|
||||
if (sev_guest(vcpu->kvm))
|
||||
sev_init_vmcb(svm);
|
||||
|
||||
svm_hv_init_vmcb(vmcb);
|
||||
init_vmcb_after_set_cpuid(vcpu);
|
||||
|
@ -649,10 +649,10 @@ void __init sev_set_cpu_caps(void);
|
||||
void __init sev_hardware_setup(void);
|
||||
void sev_hardware_unsetup(void);
|
||||
int sev_cpu_init(struct svm_cpu_data *sd);
|
||||
void sev_init_vmcb(struct vcpu_svm *svm);
|
||||
void sev_free_vcpu(struct kvm_vcpu *vcpu);
|
||||
int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
|
||||
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
|
||||
void sev_es_init_vmcb(struct vcpu_svm *svm);
|
||||
void sev_es_vcpu_reset(struct vcpu_svm *svm);
|
||||
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
|
||||
void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
|
||||
|
@ -1646,7 +1646,8 @@ Press any other key to refresh statistics immediately.
|
||||
.format(values))
|
||||
if len(pids) > 1:
|
||||
sys.exit('Error: Multiple processes found (pids: {}). Use "-p"'
|
||||
' to specify the desired pid'.format(" ".join(pids)))
|
||||
' to specify the desired pid'
|
||||
.format(" ".join(map(str, pids))))
|
||||
namespace.pid = pids[0]
|
||||
|
||||
argparser = argparse.ArgumentParser(description=description_text,
|
||||
|
@ -73,20 +73,19 @@ void ucall_uninit(struct kvm_vm *vm)
|
||||
|
||||
void ucall(uint64_t cmd, int nargs, ...)
|
||||
{
|
||||
struct ucall uc = {
|
||||
.cmd = cmd,
|
||||
};
|
||||
struct ucall uc = {};
|
||||
va_list va;
|
||||
int i;
|
||||
|
||||
WRITE_ONCE(uc.cmd, cmd);
|
||||
nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
|
||||
|
||||
va_start(va, nargs);
|
||||
for (i = 0; i < nargs; ++i)
|
||||
uc.args[i] = va_arg(va, uint64_t);
|
||||
WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
|
||||
va_end(va);
|
||||
|
||||
*ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
|
||||
WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc);
|
||||
}
|
||||
|
||||
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
|
||||
|
Loading…
x
Reference in New Issue
Block a user