Merge branch kvm-arm64/misc into kvmarm/next
* kvm-arm64/misc: : Miscellaneous updates : : - Convert CPACR_EL1_TTA to the new, generated system register : definitions. : : - Serialize toggling CPACR_EL1.SMEN to avoid unexpected exceptions when : accessing SVCR in the host. : : - Avoid quiescing the guest if a vCPU accesses its own redistributor's : SGIs/PPIs, eliminating the need to IPI. Largely an optimization for : nested virtualization, as the L1 accesses the affected registers : rather often. : : - Conversion to kstrtobool() : : - Common definition of INVALID_GPA across architectures : : - Enable CONFIG_USERFAULTFD for CI runs of KVM selftests KVM: arm64: Fix non-kerneldoc comments KVM: selftests: Enable USERFAULTFD KVM: selftests: Remove redundant setbuf() arm64/sysreg: clean up some inconsistent indenting KVM: MMU: Make the definition of 'INVALID_GPA' common KVM: arm64: vgic-v3: Use kstrtobool() instead of strtobool() KVM: arm64: vgic-v3: Limit IPI-ing when accessing GICR_{C,S}ACTIVER0 KVM: arm64: Synchronize SMEN on vcpu schedule out KVM: arm64: Kill CPACR_EL1_TTA definition Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
commit
022d3f0800
@ -347,7 +347,6 @@
|
|||||||
ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
|
ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
|
||||||
ECN(BKPT32), ECN(VECTOR32), ECN(BRK64)
|
ECN(BKPT32), ECN(VECTOR32), ECN(BRK64)
|
||||||
|
|
||||||
#define CPACR_EL1_TTA (1 << 28)
|
|
||||||
#define CPACR_EL1_DEFAULT (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |\
|
#define CPACR_EL1_DEFAULT (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |\
|
||||||
CPACR_EL1_ZEN_EL1EN)
|
CPACR_EL1_ZEN_EL1EN)
|
||||||
|
|
||||||
|
@ -918,12 +918,12 @@ void kvm_arm_vmid_clear_active(void);
|
|||||||
|
|
||||||
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
|
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
|
||||||
{
|
{
|
||||||
vcpu_arch->steal.base = GPA_INVALID;
|
vcpu_arch->steal.base = INVALID_GPA;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
|
static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
|
||||||
{
|
{
|
||||||
return (vcpu_arch->steal.base != GPA_INVALID);
|
return (vcpu_arch->steal.base != INVALID_GPA);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
|
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
|
||||||
|
@ -1922,9 +1922,7 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/* Inits Hyp-mode on all online CPUs */
|
||||||
* Inits Hyp-mode on all online CPUs
|
|
||||||
*/
|
|
||||||
static int __init init_hyp_mode(void)
|
static int __init init_hyp_mode(void)
|
||||||
{
|
{
|
||||||
u32 hyp_va_bits;
|
u32 hyp_va_bits;
|
||||||
@ -2200,9 +2198,7 @@ void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
|
|||||||
kvm_arm_resume_guest(irqfd->kvm);
|
kvm_arm_resume_guest(irqfd->kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/* Initialize Hyp-mode and memory mappings on all CPUs */
|
||||||
* Initialize Hyp-mode and memory mappings on all CPUs.
|
|
||||||
*/
|
|
||||||
static __init int kvm_arm_init(void)
|
static __init int kvm_arm_init(void)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
@ -184,6 +184,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
|||||||
sysreg_clear_set(CPACR_EL1,
|
sysreg_clear_set(CPACR_EL1,
|
||||||
CPACR_EL1_SMEN_EL0EN,
|
CPACR_EL1_SMEN_EL0EN,
|
||||||
CPACR_EL1_SMEN_EL1EN);
|
CPACR_EL1_SMEN_EL1EN);
|
||||||
|
isb();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
|
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
|
||||||
|
@ -40,7 +40,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
|||||||
___activate_traps(vcpu);
|
___activate_traps(vcpu);
|
||||||
|
|
||||||
val = read_sysreg(cpacr_el1);
|
val = read_sysreg(cpacr_el1);
|
||||||
val |= CPACR_EL1_TTA;
|
val |= CPACR_ELx_TTA;
|
||||||
val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
|
val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
|
||||||
CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
|
CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
|
||||||
|
|
||||||
|
@ -198,7 +198,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
|
|||||||
break;
|
break;
|
||||||
case ARM_SMCCC_HV_PV_TIME_ST:
|
case ARM_SMCCC_HV_PV_TIME_ST:
|
||||||
gpa = kvm_init_stolen_time(vcpu);
|
gpa = kvm_init_stolen_time(vcpu);
|
||||||
if (gpa != GPA_INVALID)
|
if (gpa != INVALID_GPA)
|
||||||
val[0] = gpa;
|
val[0] = gpa;
|
||||||
break;
|
break;
|
||||||
case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
|
case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
|
||||||
|
@ -19,7 +19,7 @@ void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
|
|||||||
u64 steal = 0;
|
u64 steal = 0;
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
if (base == GPA_INVALID)
|
if (base == INVALID_GPA)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
idx = srcu_read_lock(&kvm->srcu);
|
idx = srcu_read_lock(&kvm->srcu);
|
||||||
@ -40,7 +40,7 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
|
|||||||
switch (feature) {
|
switch (feature) {
|
||||||
case ARM_SMCCC_HV_PV_TIME_FEATURES:
|
case ARM_SMCCC_HV_PV_TIME_FEATURES:
|
||||||
case ARM_SMCCC_HV_PV_TIME_ST:
|
case ARM_SMCCC_HV_PV_TIME_ST:
|
||||||
if (vcpu->arch.steal.base != GPA_INVALID)
|
if (vcpu->arch.steal.base != INVALID_GPA)
|
||||||
val = SMCCC_RET_SUCCESS;
|
val = SMCCC_RET_SUCCESS;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -54,7 +54,7 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
|
|||||||
struct kvm *kvm = vcpu->kvm;
|
struct kvm *kvm = vcpu->kvm;
|
||||||
u64 base = vcpu->arch.steal.base;
|
u64 base = vcpu->arch.steal.base;
|
||||||
|
|
||||||
if (base == GPA_INVALID)
|
if (base == INVALID_GPA)
|
||||||
return base;
|
return base;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -89,7 +89,7 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
if (!IS_ALIGNED(ipa, 64))
|
if (!IS_ALIGNED(ipa, 64))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (vcpu->arch.steal.base != GPA_INVALID)
|
if (vcpu->arch.steal.base != INVALID_GPA)
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
|
|
||||||
/* Check the address is in a valid memslot */
|
/* Check the address is in a valid memslot */
|
||||||
|
@ -473,9 +473,10 @@ int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
|
|||||||
* active state can be overwritten when the VCPU's state is synced coming back
|
* active state can be overwritten when the VCPU's state is synced coming back
|
||||||
* from the guest.
|
* from the guest.
|
||||||
*
|
*
|
||||||
* For shared interrupts as well as GICv3 private interrupts, we have to
|
* For shared interrupts as well as GICv3 private interrupts accessed from the
|
||||||
* stop all the VCPUs because interrupts can be migrated while we don't hold
|
* non-owning CPU, we have to stop all the VCPUs because interrupts can be
|
||||||
* the IRQ locks and we don't want to be chasing moving targets.
|
* migrated while we don't hold the IRQ locks and we don't want to be chasing
|
||||||
|
* moving targets.
|
||||||
*
|
*
|
||||||
* For GICv2 private interrupts we don't have to do anything because
|
* For GICv2 private interrupts we don't have to do anything because
|
||||||
* userspace accesses to the VGIC state already require all VCPUs to be
|
* userspace accesses to the VGIC state already require all VCPUs to be
|
||||||
@ -484,7 +485,8 @@ int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
|
|||||||
*/
|
*/
|
||||||
static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
|
static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
|
||||||
{
|
{
|
||||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
|
if ((vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 &&
|
||||||
|
vcpu != kvm_get_running_vcpu()) ||
|
||||||
intid >= VGIC_NR_PRIVATE_IRQS)
|
intid >= VGIC_NR_PRIVATE_IRQS)
|
||||||
kvm_arm_halt_guest(vcpu->kvm);
|
kvm_arm_halt_guest(vcpu->kvm);
|
||||||
}
|
}
|
||||||
@ -492,7 +494,8 @@ static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
|
|||||||
/* See vgic_access_active_prepare */
|
/* See vgic_access_active_prepare */
|
||||||
static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
|
static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
|
||||||
{
|
{
|
||||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
|
if ((vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 &&
|
||||||
|
vcpu != kvm_get_running_vcpu()) ||
|
||||||
intid >= VGIC_NR_PRIVATE_IRQS)
|
intid >= VGIC_NR_PRIVATE_IRQS)
|
||||||
kvm_arm_resume_guest(vcpu->kvm);
|
kvm_arm_resume_guest(vcpu->kvm);
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <linux/irqchip/arm-gic-v3.h>
|
#include <linux/irqchip/arm-gic-v3.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/irqdomain.h>
|
#include <linux/irqdomain.h>
|
||||||
|
#include <linux/kstrtox.h>
|
||||||
#include <linux/kvm.h>
|
#include <linux/kvm.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
#include <kvm/arm_vgic.h>
|
#include <kvm/arm_vgic.h>
|
||||||
@ -587,25 +588,25 @@ DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
|
|||||||
|
|
||||||
static int __init early_group0_trap_cfg(char *buf)
|
static int __init early_group0_trap_cfg(char *buf)
|
||||||
{
|
{
|
||||||
return strtobool(buf, &group0_trap);
|
return kstrtobool(buf, &group0_trap);
|
||||||
}
|
}
|
||||||
early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
|
early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
|
||||||
|
|
||||||
static int __init early_group1_trap_cfg(char *buf)
|
static int __init early_group1_trap_cfg(char *buf)
|
||||||
{
|
{
|
||||||
return strtobool(buf, &group1_trap);
|
return kstrtobool(buf, &group1_trap);
|
||||||
}
|
}
|
||||||
early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
|
early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
|
||||||
|
|
||||||
static int __init early_common_trap_cfg(char *buf)
|
static int __init early_common_trap_cfg(char *buf)
|
||||||
{
|
{
|
||||||
return strtobool(buf, &common_trap);
|
return kstrtobool(buf, &common_trap);
|
||||||
}
|
}
|
||||||
early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
|
early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
|
||||||
|
|
||||||
static int __init early_gicv4_enable(char *buf)
|
static int __init early_gicv4_enable(char *buf)
|
||||||
{
|
{
|
||||||
return strtobool(buf, &gicv4_enable);
|
return kstrtobool(buf, &gicv4_enable);
|
||||||
}
|
}
|
||||||
early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
|
early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
|
||||||
|
|
||||||
|
@ -134,8 +134,6 @@
|
|||||||
#define INVALID_PAGE (~(hpa_t)0)
|
#define INVALID_PAGE (~(hpa_t)0)
|
||||||
#define VALID_PAGE(x) ((x) != INVALID_PAGE)
|
#define VALID_PAGE(x) ((x) != INVALID_PAGE)
|
||||||
|
|
||||||
#define INVALID_GPA (~(gpa_t)0)
|
|
||||||
|
|
||||||
/* KVM Hugepage definitions for x86 */
|
/* KVM Hugepage definitions for x86 */
|
||||||
#define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G
|
#define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G
|
||||||
#define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
|
#define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
|
||||||
|
@ -40,7 +40,7 @@ typedef unsigned long gva_t;
|
|||||||
typedef u64 gpa_t;
|
typedef u64 gpa_t;
|
||||||
typedef u64 gfn_t;
|
typedef u64 gfn_t;
|
||||||
|
|
||||||
#define GPA_INVALID (~(gpa_t)0)
|
#define INVALID_GPA (~(gpa_t)0)
|
||||||
|
|
||||||
typedef unsigned long hva_t;
|
typedef unsigned long hva_t;
|
||||||
typedef u64 hpa_t;
|
typedef u64 hpa_t;
|
||||||
|
@ -1093,8 +1093,6 @@ int main(int argc, char *argv[])
|
|||||||
enum vm_mem_backing_src_type src_type;
|
enum vm_mem_backing_src_type src_type;
|
||||||
int opt;
|
int opt;
|
||||||
|
|
||||||
setbuf(stdout, NULL);
|
|
||||||
|
|
||||||
src_type = DEFAULT_VM_MEM_SRC;
|
src_type = DEFAULT_VM_MEM_SRC;
|
||||||
|
|
||||||
while ((opt = getopt(argc, argv, "hm:s:")) != -1) {
|
while ((opt = getopt(argc, argv, "hm:s:")) != -1) {
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
CONFIG_KVM=y
|
CONFIG_KVM=y
|
||||||
CONFIG_KVM_INTEL=y
|
CONFIG_KVM_INTEL=y
|
||||||
CONFIG_KVM_AMD=y
|
CONFIG_KVM_AMD=y
|
||||||
|
CONFIG_USERFAULTFD=y
|
||||||
|
@ -26,9 +26,6 @@ int main(int argc, char *argv[])
|
|||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
struct kvm_vm *vm;
|
struct kvm_vm *vm;
|
||||||
|
|
||||||
/* Tell stdout not to buffer its content */
|
|
||||||
setbuf(stdout, NULL);
|
|
||||||
|
|
||||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE));
|
TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE));
|
||||||
|
|
||||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user