2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2012-12-10 16:23:59 +00:00
/*
* Copyright ( C ) 2012 , 2013 - ARM Ltd
* Author : Marc Zyngier < marc . zyngier @ arm . com >
*
* Derived from arch / arm / kvm / reset . c
* Copyright ( C ) 2012 - Virtual Open Systems and Columbia University
* Author : Christoffer Dall < c . dall @ virtualopensystems . com >
*/
# include <linux/errno.h>
2019-02-28 18:56:50 +00:00
# include <linux/kernel.h>
2012-12-10 16:23:59 +00:00
# include <linux/kvm_host.h>
# include <linux/kvm.h>
2015-07-07 17:30:02 +01:00
# include <linux/hw_breakpoint.h>
2019-02-28 18:46:44 +00:00
# include <linux/slab.h>
2019-02-28 18:56:50 +00:00
# include <linux/string.h>
2019-02-28 18:46:44 +00:00
# include <linux/types.h>
2012-12-10 16:23:59 +00:00
2012-12-07 17:52:03 +00:00
# include <kvm/arm_arch_timer.h>
2018-09-26 17:32:43 +01:00
# include <asm/cpufeature.h>
2012-12-10 16:23:59 +00:00
# include <asm/cputype.h>
2019-02-28 18:46:44 +00:00
# include <asm/fpsimd.h>
2012-12-10 16:23:59 +00:00
# include <asm/ptrace.h>
# include <asm/kvm_arm.h>
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 17:47:05 +01:00
# include <asm/kvm_asm.h>
2018-12-20 11:36:07 +00:00
# include <asm/kvm_emulate.h>
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 17:47:05 +01:00
# include <asm/kvm_mmu.h>
2019-02-28 18:56:50 +00:00
# include <asm/virt.h>
2012-12-10 16:23:59 +00:00
2018-09-26 17:32:52 +01:00
/* Maximum phys_shift supported for any VM on this host */
static u32 kvm_ipa_limit ;
2012-12-10 16:23:59 +00:00
/*
* ARMv8 Reset Values
*/
2020-04-12 18:49:31 +01:00
# define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
PSR_F_BIT | PSR_D_BIT )
2012-12-10 16:23:59 +00:00
2020-04-12 18:49:31 +01:00
# define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
PSR_AA32_I_BIT | PSR_AA32_F_BIT )
2013-02-07 10:46:46 +00:00
2019-02-28 18:46:44 +00:00
unsigned int kvm_sve_max_vl ;
2019-04-12 15:30:58 +01:00
int kvm_arm_init_sve ( void )
2019-02-28 18:46:44 +00:00
{
if ( system_supports_sve ( ) ) {
kvm_sve_max_vl = sve_max_virtualisable_vl ;
/*
* The get_sve_reg ( ) / set_sve_reg ( ) ioctl interface will need
* to be extended with multiple register slice support in
* order to support vector lengths greater than
* SVE_VL_ARCH_MAX :
*/
if ( WARN_ON ( kvm_sve_max_vl > SVE_VL_ARCH_MAX ) )
kvm_sve_max_vl = SVE_VL_ARCH_MAX ;
/*
* Don ' t even try to make use of vector lengths that
* aren ' t available on all CPUs , for now :
*/
if ( kvm_sve_max_vl < sve_max_vl )
pr_warn ( " KVM: SVE vector length for guests limited to %u bytes \n " ,
kvm_sve_max_vl ) ;
}
return 0 ;
}
2019-02-28 18:56:50 +00:00
static int kvm_vcpu_enable_sve ( struct kvm_vcpu * vcpu )
{
if ( ! system_supports_sve ( ) )
return - EINVAL ;
vcpu - > arch . sve_max_vl = kvm_sve_max_vl ;
/*
* Userspace can still customize the vector lengths by writing
* KVM_REG_ARM64_SVE_VLS . Allocation is deferred until
* kvm_arm_vcpu_finalize ( ) , which freezes the configuration .
*/
vcpu - > arch . flags | = KVM_ARM64_GUEST_HAS_SVE ;
return 0 ;
}
2019-02-28 18:46:44 +00:00
/*
* Finalize vcpu ' s maximum SVE vector length , allocating
* vcpu - > arch . sve_state as necessary .
*/
static int kvm_vcpu_finalize_sve ( struct kvm_vcpu * vcpu )
{
void * buf ;
unsigned int vl ;
vl = vcpu - > arch . sve_max_vl ;
/*
2020-04-01 15:03:10 +01:00
* Responsibility for these properties is shared between
2019-02-28 18:46:44 +00:00
* kvm_arm_init_arch_resources ( ) , kvm_vcpu_enable_sve ( ) and
* set_sve_vls ( ) . Double - check here just to be sure :
*/
if ( WARN_ON ( ! sve_vl_valid ( vl ) | | vl > sve_max_virtualisable_vl | |
vl > SVE_VL_ARCH_MAX ) )
return - EIO ;
buf = kzalloc ( SVE_SIG_REGS_SIZE ( sve_vq_from_vl ( vl ) ) , GFP_KERNEL ) ;
if ( ! buf )
return - ENOMEM ;
vcpu - > arch . sve_state = buf ;
vcpu - > arch . flags | = KVM_ARM64_VCPU_SVE_FINALIZED ;
return 0 ;
}
2019-04-10 17:17:37 +01:00
int kvm_arm_vcpu_finalize ( struct kvm_vcpu * vcpu , int feature )
2019-02-28 18:46:44 +00:00
{
2019-04-10 17:17:37 +01:00
switch ( feature ) {
2019-02-28 18:46:44 +00:00
case KVM_ARM_VCPU_SVE :
if ( ! vcpu_has_sve ( vcpu ) )
return - EINVAL ;
if ( kvm_arm_vcpu_sve_finalized ( vcpu ) )
return - EPERM ;
return kvm_vcpu_finalize_sve ( vcpu ) ;
}
return - EINVAL ;
}
bool kvm_arm_vcpu_is_finalized ( struct kvm_vcpu * vcpu )
{
if ( vcpu_has_sve ( vcpu ) & & ! kvm_arm_vcpu_sve_finalized ( vcpu ) )
return false ;
return true ;
}
2019-12-18 13:55:27 -08:00
void kvm_arm_vcpu_destroy ( struct kvm_vcpu * vcpu )
2019-02-28 18:46:44 +00:00
{
kfree ( vcpu - > arch . sve_state ) ;
}
2019-02-28 18:56:50 +00:00
static void kvm_vcpu_reset_sve ( struct kvm_vcpu * vcpu )
{
if ( vcpu_has_sve ( vcpu ) )
memset ( vcpu - > arch . sve_state , 0 , vcpu_sve_state_size ( vcpu ) ) ;
}
2019-04-23 10:12:36 +05:30
static int kvm_vcpu_enable_ptrauth ( struct kvm_vcpu * vcpu )
{
/*
* For now make sure that both address / generic pointer authentication
2020-06-11 13:20:28 +01:00
* features are requested by the userspace together and the system
* supports these capabilities .
2019-04-23 10:12:36 +05:30
*/
if ( ! test_bit ( KVM_ARM_VCPU_PTRAUTH_ADDRESS , vcpu - > arch . features ) | |
2020-06-11 13:20:28 +01:00
! test_bit ( KVM_ARM_VCPU_PTRAUTH_GENERIC , vcpu - > arch . features ) | |
! system_has_full_ptr_auth ( ) )
2019-04-23 10:12:36 +05:30
return - EINVAL ;
vcpu - > arch . flags | = KVM_ARM64_GUEST_HAS_PTRAUTH ;
return 0 ;
}
2021-05-24 18:07:52 +01:00
static bool vcpu_allowed_register_width ( struct kvm_vcpu * vcpu )
{
struct kvm_vcpu * tmp ;
bool is32bit ;
int i ;
is32bit = vcpu_has_feature ( vcpu , KVM_ARM_VCPU_EL1_32BIT ) ;
if ( ! cpus_have_const_cap ( ARM64_HAS_32BIT_EL1 ) & & is32bit )
return false ;
2021-06-21 12:17:14 +01:00
/* MTE is incompatible with AArch32 */
if ( kvm_has_mte ( vcpu - > kvm ) & & is32bit )
return false ;
2021-05-24 18:07:52 +01:00
/* Check that the vcpus are either all 32bit or all 64bit */
kvm_for_each_vcpu ( i , tmp , vcpu - > kvm ) {
if ( vcpu_has_feature ( tmp , KVM_ARM_VCPU_EL1_32BIT ) ! = is32bit )
return false ;
}
return true ;
}
2012-12-10 16:23:59 +00:00
/**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @ vcpu : The VCPU pointer
*
* This function finds the right table above and sets the registers on
2016-05-21 13:53:14 +02:00
* the virtual CPU struct to their architecturally defined reset
2019-02-28 18:56:50 +00:00
* values , except for registers whose reset is deferred until
* kvm_arm_vcpu_finalize ( ) .
2018-12-20 12:44:05 +01:00
*
* Note : This function can be called from two paths : The KVM_ARM_VCPU_INIT
* ioctl or as part of handling a request issued by another VCPU in the PSCI
* handling code . In the first case , the VCPU will not be loaded , and in the
* second case the VCPU will be loaded . Because this function operates purely
2020-04-01 15:03:10 +01:00
* on the memory - backed values of system registers , we want to do a full put if
2018-12-20 12:44:05 +01:00
* we were loaded ( handling a request ) and load the values back at the end of
* the function . Otherwise we leave the state alone . In both cases , we
* disable preemption around the vcpu reset as we would otherwise race with
* preempt notifiers which also call put / load .
2012-12-10 16:23:59 +00:00
*/
int kvm_reset_vcpu ( struct kvm_vcpu * vcpu )
{
2020-06-17 11:54:56 +01:00
int ret ;
2018-12-20 12:44:05 +01:00
bool loaded ;
2020-04-12 18:49:31 +01:00
u32 pstate ;
2018-12-20 12:44:05 +01:00
2019-03-04 17:37:44 +00:00
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset ( vcpu ) ;
2018-12-20 12:44:05 +01:00
preempt_disable ( ) ;
loaded = ( vcpu - > cpu ! = - 1 ) ;
if ( loaded )
kvm_arch_vcpu_put ( vcpu ) ;
2012-12-10 16:23:59 +00:00
2019-02-28 18:56:50 +00:00
if ( ! kvm_arm_vcpu_sve_finalized ( vcpu ) ) {
if ( test_bit ( KVM_ARM_VCPU_SVE , vcpu - > arch . features ) ) {
ret = kvm_vcpu_enable_sve ( vcpu ) ;
if ( ret )
goto out ;
}
} else {
kvm_vcpu_reset_sve ( vcpu ) ;
}
2019-04-23 10:12:36 +05:30
if ( test_bit ( KVM_ARM_VCPU_PTRAUTH_ADDRESS , vcpu - > arch . features ) | |
test_bit ( KVM_ARM_VCPU_PTRAUTH_GENERIC , vcpu - > arch . features ) ) {
2020-06-17 11:54:56 +01:00
if ( kvm_vcpu_enable_ptrauth ( vcpu ) ) {
ret = - EINVAL ;
2019-04-23 10:12:36 +05:30
goto out ;
2020-06-17 11:54:56 +01:00
}
2019-04-23 10:12:36 +05:30
}
2021-05-24 18:07:52 +01:00
if ( ! vcpu_allowed_register_width ( vcpu ) ) {
ret = - EINVAL ;
goto out ;
}
2012-12-10 16:23:59 +00:00
switch ( vcpu - > arch . target ) {
default :
2013-02-07 10:46:46 +00:00
if ( test_bit ( KVM_ARM_VCPU_EL1_32BIT , vcpu - > arch . features ) ) {
2020-04-12 18:49:31 +01:00
pstate = VCPU_RESET_PSTATE_SVC ;
2013-02-07 10:46:46 +00:00
} else {
2020-04-12 18:49:31 +01:00
pstate = VCPU_RESET_PSTATE_EL1 ;
2013-02-07 10:46:46 +00:00
}
2020-11-12 18:13:27 +00:00
if ( kvm_vcpu_has_pmu ( vcpu ) & & ! kvm_arm_support_pmu_v3 ( ) ) {
ret = - EINVAL ;
goto out ;
}
2012-12-10 16:23:59 +00:00
break ;
}
/* Reset core registers */
2020-04-12 18:49:31 +01:00
memset ( vcpu_gp_regs ( vcpu ) , 0 , sizeof ( * vcpu_gp_regs ( vcpu ) ) ) ;
2021-04-07 18:54:16 +01:00
memset ( & vcpu - > arch . ctxt . fp_regs , 0 , sizeof ( vcpu - > arch . ctxt . fp_regs ) ) ;
vcpu - > arch . ctxt . spsr_abt = 0 ;
vcpu - > arch . ctxt . spsr_und = 0 ;
vcpu - > arch . ctxt . spsr_irq = 0 ;
vcpu - > arch . ctxt . spsr_fiq = 0 ;
2019-06-28 22:40:58 +01:00
vcpu_gp_regs ( vcpu ) - > pstate = pstate ;
2012-12-10 16:23:59 +00:00
/* Reset system registers */
kvm_reset_sys_regs ( vcpu ) ;
2018-12-20 11:36:07 +00:00
/*
* Additional reset state handling that PSCI may have imposed on us .
* Must be done after all the sys_reg reset .
*/
if ( vcpu - > arch . reset_state . reset ) {
unsigned long target_pc = vcpu - > arch . reset_state . pc ;
/* Gracefully handle Thumb2 entry point */
if ( vcpu_mode_is_32bit ( vcpu ) & & ( target_pc & 1 ) ) {
target_pc & = ~ 1UL ;
vcpu_set_thumb ( vcpu ) ;
}
/* Propagate caller endianness */
if ( vcpu - > arch . reset_state . be )
kvm_vcpu_set_be ( vcpu ) ;
* vcpu_pc ( vcpu ) = target_pc ;
vcpu_set_reg ( vcpu , 0 , vcpu - > arch . reset_state . r0 ) ;
vcpu - > arch . reset_state . reset = false ;
}
2012-12-07 17:52:03 +00:00
/* Reset timer */
2018-12-20 12:44:05 +01:00
ret = kvm_timer_vcpu_reset ( vcpu ) ;
out :
if ( loaded )
kvm_arch_vcpu_load ( vcpu , smp_processor_id ( ) ) ;
preempt_enable ( ) ;
return ret ;
2012-12-10 16:23:59 +00:00
}
2018-09-26 17:32:42 +01:00
2020-05-12 07:27:27 +05:30
u32 get_kvm_ipa_limit ( void )
{
return kvm_ipa_limit ;
}
2020-05-28 14:12:58 +01:00
int kvm_set_ipa_limit ( void )
2018-09-26 17:32:52 +01:00
{
2020-09-11 14:25:29 +01:00
unsigned int parange , tgran_2 ;
2020-05-13 14:33:34 +05:30
u64 mmfr0 ;
2018-09-26 17:32:52 +01:00
2020-05-13 14:33:34 +05:30
mmfr0 = read_sanitised_ftr_reg ( SYS_ID_AA64MMFR0_EL1 ) ;
parange = cpuid_feature_extract_unsigned_field ( mmfr0 ,
ID_AA64MMFR0_PARANGE_SHIFT ) ;
2020-05-28 14:12:58 +01:00
/*
* Check with ARMv8 .5 - GTG that our PAGE_SIZE is supported at
* Stage - 2. If not , things will stop very quickly .
*/
switch ( PAGE_SIZE ) {
default :
case SZ_4K :
tgran_2 = ID_AA64MMFR0_TGRAN4_2_SHIFT ;
break ;
case SZ_16K :
tgran_2 = ID_AA64MMFR0_TGRAN16_2_SHIFT ;
break ;
case SZ_64K :
tgran_2 = ID_AA64MMFR0_TGRAN64_2_SHIFT ;
break ;
}
switch ( cpuid_feature_extract_unsigned_field ( mmfr0 , tgran_2 ) ) {
2021-03-10 11:23:10 +05:30
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE :
2020-05-28 14:12:58 +01:00
kvm_err ( " PAGE_SIZE not supported at Stage-2, giving up \n " ) ;
return - EINVAL ;
2021-03-10 11:23:10 +05:30
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT :
2020-05-28 14:12:58 +01:00
kvm_debug ( " PAGE_SIZE supported at Stage-2 (default) \n " ) ;
break ;
2021-03-10 11:23:10 +05:30
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN . . . ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX :
2020-05-28 14:12:58 +01:00
kvm_debug ( " PAGE_SIZE supported at Stage-2 (advertised) \n " ) ;
break ;
2021-03-10 11:23:10 +05:30
default :
kvm_err ( " Unsupported value for TGRAN_2, giving up \n " ) ;
return - EINVAL ;
2020-05-28 14:12:58 +01:00
}
2020-09-11 14:25:29 +01:00
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift ( parange ) ;
2021-03-11 10:00:15 +00:00
kvm_info ( " IPA Size Limit: %d bits%s \n " , kvm_ipa_limit ,
( ( kvm_ipa_limit < KVM_PHYS_SHIFT ) ?
" (Reduced IPA size, limited VM/VMM compatibility) " : " " ) ) ;
2020-05-28 14:12:58 +01:00
return 0 ;
2018-09-26 17:32:52 +01:00
}
2018-10-01 13:40:36 +01:00
int kvm_arm_setup_stage2 ( struct kvm * kvm , unsigned long type )
2018-09-26 17:32:42 +01:00
{
2021-03-19 10:01:30 +00:00
u64 mmfr0 , mmfr1 ;
u32 phys_shift ;
2018-09-26 17:32:43 +01:00
2018-09-26 17:32:54 +01:00
if ( type & ~ KVM_VM_TYPE_ARM_IPA_SIZE_MASK )
2018-09-26 17:32:42 +01:00
return - EINVAL ;
2018-09-26 17:32:43 +01:00
2018-09-26 17:32:54 +01:00
phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE ( type ) ;
if ( phys_shift ) {
if ( phys_shift > kvm_ipa_limit | |
phys_shift < 32 )
return - EINVAL ;
} else {
phys_shift = KVM_PHYS_SHIFT ;
2021-03-11 10:00:15 +00:00
if ( phys_shift > kvm_ipa_limit ) {
pr_warn_once ( " %s using unsupported default IPA limit, upgrade your VMM \n " ,
current - > comm ) ;
return - EINVAL ;
}
2018-09-26 17:32:54 +01:00
}
2020-05-13 14:33:34 +05:30
mmfr0 = read_sanitised_ftr_reg ( SYS_ID_AA64MMFR0_EL1 ) ;
2021-03-19 10:01:30 +00:00
mmfr1 = read_sanitised_ftr_reg ( SYS_ID_AA64MMFR1_EL1 ) ;
kvm - > arch . vtcr = kvm_get_vtcr ( mmfr0 , mmfr1 , phys_shift ) ;
2018-09-26 17:32:43 +01:00
2018-09-26 17:32:42 +01:00
return 0 ;
}