2019-06-03 08:44:50 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-01-21 03:28:13 +04:00
/*
* Copyright ( C ) 2012 - ARM Ltd
* Author : Marc Zyngier < marc . zyngier @ arm . com >
*/
2018-02-06 20:56:12 +03:00
# include <linux/arm-smccc.h>
2014-10-16 19:00:18 +04:00
# include <linux/preempt.h>
2013-01-21 03:28:13 +04:00
# include <linux/kvm_host.h>
2018-01-21 19:42:56 +03:00
# include <linux/uaccess.h>
2013-01-21 03:28:13 +04:00
# include <linux/wait.h>
2013-10-18 21:19:03 +04:00
# include <asm/cputype.h>
2013-01-21 03:28:13 +04:00
# include <asm/kvm_emulate.h>
2018-02-06 20:56:08 +03:00
# include <kvm/arm_psci.h>
2019-10-21 18:28:15 +03:00
# include <kvm/arm_hypercalls.h>
2018-02-06 20:56:08 +03:00
2013-01-21 03:28:13 +04:00
/*
* This is an implementation of the Power State Coordination Interface
* as described in ARM document number ARM DEN 0022 A .
*/
2014-04-29 09:54:21 +04:00
# define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
static unsigned long psci_affinity_mask ( unsigned long affinity_level )
{
if ( affinity_level < = 3 )
return MPIDR_HWID_BITMASK & AFFINITY_MASK ( affinity_level ) ;
return 0 ;
}
2014-04-29 09:54:24 +04:00
static unsigned long kvm_psci_vcpu_suspend ( struct kvm_vcpu * vcpu )
{
/*
* NOTE : For simplicity , we make VCPU suspend emulation to be
* same - as WFI ( Wait - for - interrupt ) emulation .
*
* This means for KVM the wakeup events are interrupts and
* this is consistent with intended use of StateID as described
* in section 5.4 .1 of PSCI v0 .2 specification ( ARM DEN 0022 A ) .
*
* Further , we also treat power - down request to be same as
* stand - by request as - per section 5.4 .2 clause 3 of PSCI v0 .2
* specification ( ARM DEN 0022 A ) . This means all suspend states
* for KVM will preserve the register state .
*/
2021-10-09 05:12:06 +03:00
kvm_vcpu_halt ( vcpu ) ;
2017-06-04 15:43:54 +03:00
kvm_clear_request ( KVM_REQ_UNHALT , vcpu ) ;
2014-04-29 09:54:24 +04:00
return PSCI_RET_SUCCESS ;
}
2013-01-21 03:28:13 +04:00
static void kvm_psci_vcpu_off ( struct kvm_vcpu * vcpu )
{
2015-09-26 00:41:14 +03:00
vcpu - > arch . power_off = true ;
2017-06-04 15:43:58 +03:00
kvm_make_request ( KVM_REQ_SLEEP , vcpu ) ;
2017-06-04 15:43:57 +03:00
kvm_vcpu_kick ( vcpu ) ;
2013-01-21 03:28:13 +04:00
}
2021-08-18 23:21:32 +03:00
static inline bool kvm_psci_valid_affinity ( struct kvm_vcpu * vcpu ,
unsigned long affinity )
{
return ! ( affinity & ~ MPIDR_HWID_BITMASK ) ;
}
2013-01-21 03:28:13 +04:00
static unsigned long kvm_psci_vcpu_on ( struct kvm_vcpu * source_vcpu )
{
2018-12-20 14:36:07 +03:00
struct vcpu_reset_state * reset_state ;
2013-01-21 03:28:13 +04:00
struct kvm * kvm = source_vcpu - > kvm ;
2014-06-02 17:37:13 +04:00
struct kvm_vcpu * vcpu = NULL ;
2013-01-21 03:28:13 +04:00
unsigned long cpu_id ;
2021-08-18 23:21:32 +03:00
cpu_id = smccc_get_arg1 ( source_vcpu ) ;
if ( ! kvm_psci_valid_affinity ( source_vcpu , cpu_id ) )
return PSCI_RET_INVALID_PARAMS ;
2013-01-21 03:28:13 +04:00
2014-06-02 17:37:13 +04:00
vcpu = kvm_mpidr_to_vcpu ( kvm , cpu_id ) ;
2013-10-18 21:19:03 +04:00
2013-11-20 05:43:19 +04:00
/*
* Make sure the caller requested a valid CPU and that the CPU is
* turned off .
*/
2014-04-29 09:54:23 +04:00
if ( ! vcpu )
2014-04-29 09:54:16 +04:00
return PSCI_RET_INVALID_PARAMS ;
2015-09-26 00:41:14 +03:00
if ( ! vcpu - > arch . power_off ) {
2018-02-06 20:56:13 +03:00
if ( kvm_psci_version ( source_vcpu , kvm ) ! = KVM_ARM_PSCI_0_1 )
2014-04-29 09:54:23 +04:00
return PSCI_RET_ALREADY_ON ;
else
return PSCI_RET_INVALID_PARAMS ;
}
2013-01-21 03:28:13 +04:00
2018-12-20 14:36:07 +03:00
reset_state = & vcpu - > arch . reset_state ;
2013-01-21 03:28:13 +04:00
2018-12-20 14:36:07 +03:00
reset_state - > pc = smccc_get_arg2 ( source_vcpu ) ;
2013-01-21 03:28:13 +04:00
2013-11-05 18:12:15 +04:00
/* Propagate caller endianness */
2018-12-20 14:36:07 +03:00
reset_state - > be = kvm_vcpu_is_be ( source_vcpu ) ;
2013-11-05 18:12:15 +04:00
2014-04-29 09:54:23 +04:00
/*
* NOTE : We always update r0 ( or x0 ) because for PSCI v0 .1
2020-04-01 17:03:10 +03:00
* the general purpose registers are undefined upon CPU_ON .
2014-04-29 09:54:23 +04:00
*/
2018-12-20 14:36:07 +03:00
reset_state - > r0 = smccc_get_arg3 ( source_vcpu ) ;
WRITE_ONCE ( reset_state - > reset , true ) ;
kvm_make_request ( KVM_REQ_VCPU_RESET , vcpu ) ;
2013-01-21 03:28:13 +04:00
2018-12-20 14:36:07 +03:00
/*
* Make sure the reset request is observed if the change to
2021-12-08 22:32:57 +03:00
* power_off is observed .
2018-12-20 14:36:07 +03:00
*/
smp_wmb ( ) ;
vcpu - > arch . power_off = false ;
kvm_vcpu_wake_up ( vcpu ) ;
2013-01-21 03:28:13 +04:00
2014-04-29 09:54:16 +04:00
return PSCI_RET_SUCCESS ;
2013-01-21 03:28:13 +04:00
}
2014-04-29 09:54:21 +04:00
static unsigned long kvm_psci_vcpu_affinity_info ( struct kvm_vcpu * vcpu )
{
2021-11-16 19:04:02 +03:00
int matching_cpus = 0 ;
unsigned long i , mpidr ;
2014-04-29 09:54:21 +04:00
unsigned long target_affinity ;
unsigned long target_affinity_mask ;
unsigned long lowest_affinity_level ;
struct kvm * kvm = vcpu - > kvm ;
struct kvm_vcpu * tmp ;
2018-02-06 20:56:10 +03:00
target_affinity = smccc_get_arg1 ( vcpu ) ;
lowest_affinity_level = smccc_get_arg2 ( vcpu ) ;
2014-04-29 09:54:21 +04:00
2021-08-18 23:21:32 +03:00
if ( ! kvm_psci_valid_affinity ( vcpu , target_affinity ) )
return PSCI_RET_INVALID_PARAMS ;
2014-04-29 09:54:21 +04:00
/* Determine target affinity mask */
target_affinity_mask = psci_affinity_mask ( lowest_affinity_level ) ;
if ( ! target_affinity_mask )
return PSCI_RET_INVALID_PARAMS ;
/* Ignore other bits of target affinity */
target_affinity & = target_affinity_mask ;
/*
* If one or more VCPU matching target affinity are running
* then ON else OFF
*/
kvm_for_each_vcpu ( i , tmp , kvm ) {
2014-06-02 17:37:13 +04:00
mpidr = kvm_vcpu_get_mpidr_aff ( tmp ) ;
2015-09-04 18:06:24 +03:00
if ( ( mpidr & target_affinity_mask ) = = target_affinity ) {
matching_cpus + + ;
2015-09-26 00:41:14 +03:00
if ( ! tmp - > arch . power_off )
2015-09-04 18:06:24 +03:00
return PSCI_0_2_AFFINITY_LEVEL_ON ;
2014-04-29 09:54:21 +04:00
}
}
2015-09-04 18:06:24 +03:00
if ( ! matching_cpus )
return PSCI_RET_INVALID_PARAMS ;
2014-04-29 09:54:21 +04:00
return PSCI_0_2_AFFINITY_LEVEL_OFF ;
}
2014-04-29 09:54:20 +04:00
static void kvm_prepare_system_event ( struct kvm_vcpu * vcpu , u32 type )
{
2021-11-16 19:04:02 +03:00
unsigned long i ;
2014-10-16 19:00:18 +04:00
struct kvm_vcpu * tmp ;
/*
* The KVM ABI specifies that a system event exit may call KVM_RUN
* again and may perform shutdown / reboot at a later time that when the
* actual request is made . Since we are implementing PSCI and a
* caller of PSCI reboot and shutdown expects that the system shuts
* down or reboots immediately , let ' s make sure that VCPUs are not run
* after this call is handled and before the VCPUs have been
* re - initialized .
*/
2017-06-04 15:43:56 +03:00
kvm_for_each_vcpu ( i , tmp , vcpu - > kvm )
2015-09-26 00:41:14 +03:00
tmp - > arch . power_off = true ;
2017-06-04 15:43:58 +03:00
kvm_make_all_cpus_request ( vcpu - > kvm , KVM_REQ_SLEEP ) ;
2014-10-16 19:00:18 +04:00
2014-04-29 09:54:20 +04:00
memset ( & vcpu - > run - > system_event , 0 , sizeof ( vcpu - > run - > system_event ) ) ;
vcpu - > run - > system_event . type = type ;
vcpu - > run - > exit_reason = KVM_EXIT_SYSTEM_EVENT ;
}
static void kvm_psci_system_off ( struct kvm_vcpu * vcpu )
{
kvm_prepare_system_event ( vcpu , KVM_SYSTEM_EVENT_SHUTDOWN ) ;
}
static void kvm_psci_system_reset ( struct kvm_vcpu * vcpu )
{
kvm_prepare_system_event ( vcpu , KVM_SYSTEM_EVENT_RESET ) ;
}
2020-04-01 14:25:05 +03:00
static void kvm_psci_narrow_to_32bit ( struct kvm_vcpu * vcpu )
{
int i ;
/*
* Zero the input registers ' upper 32 bits . They will be fully
* zeroed on exit , so we ' re fine changing them in place .
*/
for ( i = 1 ; i < 4 ; i + + )
vcpu_set_reg ( vcpu , i , lower_32_bits ( vcpu_get_reg ( vcpu , i ) ) ) ;
}
2020-04-01 14:38:49 +03:00
static unsigned long kvm_psci_check_allowed_function ( struct kvm_vcpu * vcpu , u32 fn )
{
switch ( fn ) {
case PSCI_0_2_FN64_CPU_SUSPEND :
case PSCI_0_2_FN64_CPU_ON :
case PSCI_0_2_FN64_AFFINITY_INFO :
/* Disallow these functions for 32bit guests */
if ( vcpu_mode_is_32bit ( vcpu ) )
return PSCI_RET_NOT_SUPPORTED ;
break ;
}
return 0 ;
}
2014-04-29 09:54:18 +04:00
static int kvm_psci_0_2_call ( struct kvm_vcpu * vcpu )
2014-04-29 09:54:16 +04:00
{
2017-04-18 18:59:58 +03:00
struct kvm * kvm = vcpu - > kvm ;
2018-02-06 20:56:10 +03:00
u32 psci_fn = smccc_get_function ( vcpu ) ;
2014-04-29 09:54:16 +04:00
unsigned long val ;
2017-04-18 18:59:58 +03:00
int ret = 1 ;
2014-04-29 09:54:16 +04:00
2020-04-01 14:38:49 +03:00
val = kvm_psci_check_allowed_function ( vcpu , psci_fn ) ;
if ( val )
goto out ;
2014-04-29 09:54:16 +04:00
switch ( psci_fn ) {
case PSCI_0_2_FN_PSCI_VERSION :
/*
* Bits [ 31 : 16 ] = Major Version = 0
* Bits [ 15 : 0 ] = Minor Version = 2
*/
2018-02-06 20:56:09 +03:00
val = KVM_ARM_PSCI_0_2 ;
2014-04-29 09:54:16 +04:00
break ;
2014-04-29 09:54:24 +04:00
case PSCI_0_2_FN_CPU_SUSPEND :
case PSCI_0_2_FN64_CPU_SUSPEND :
val = kvm_psci_vcpu_suspend ( vcpu ) ;
break ;
2014-04-29 09:54:16 +04:00
case PSCI_0_2_FN_CPU_OFF :
kvm_psci_vcpu_off ( vcpu ) ;
val = PSCI_RET_SUCCESS ;
break ;
case PSCI_0_2_FN_CPU_ON :
2020-04-01 14:25:05 +03:00
kvm_psci_narrow_to_32bit ( vcpu ) ;
fallthrough ;
2014-04-29 09:54:16 +04:00
case PSCI_0_2_FN64_CPU_ON :
2017-04-18 18:59:58 +03:00
mutex_lock ( & kvm - > lock ) ;
2014-04-29 09:54:16 +04:00
val = kvm_psci_vcpu_on ( vcpu ) ;
2017-04-18 18:59:58 +03:00
mutex_unlock ( & kvm - > lock ) ;
2014-04-29 09:54:16 +04:00
break ;
2014-04-29 09:54:21 +04:00
case PSCI_0_2_FN_AFFINITY_INFO :
2020-04-01 14:25:05 +03:00
kvm_psci_narrow_to_32bit ( vcpu ) ;
fallthrough ;
2014-04-29 09:54:21 +04:00
case PSCI_0_2_FN64_AFFINITY_INFO :
val = kvm_psci_vcpu_affinity_info ( vcpu ) ;
break ;
2014-04-29 09:54:22 +04:00
case PSCI_0_2_FN_MIGRATE_INFO_TYPE :
/*
* Trusted OS is MP hence does not require migration
* or
* Trusted OS is not present
*/
val = PSCI_0_2_TOS_MP ;
break ;
2014-04-29 09:54:20 +04:00
case PSCI_0_2_FN_SYSTEM_OFF :
kvm_psci_system_off ( vcpu ) ;
/*
2020-04-01 17:03:10 +03:00
* We shouldn ' t be going back to guest VCPU after
2014-04-29 09:54:20 +04:00
* receiving SYSTEM_OFF request .
*
2020-04-01 17:03:10 +03:00
* If user space accidentally / deliberately resumes
2014-04-29 09:54:20 +04:00
* guest VCPU after SYSTEM_OFF request then guest
* VCPU should see internal failure from PSCI return
* value . To achieve this , we preload r0 ( or x0 ) with
* PSCI return value INTERNAL_FAILURE .
*/
val = PSCI_RET_INTERNAL_FAILURE ;
ret = 0 ;
break ;
case PSCI_0_2_FN_SYSTEM_RESET :
kvm_psci_system_reset ( vcpu ) ;
/*
* Same reason as SYSTEM_OFF for preloading r0 ( or x0 )
* with PSCI return value INTERNAL_FAILURE .
*/
val = PSCI_RET_INTERNAL_FAILURE ;
ret = 0 ;
break ;
2014-04-29 09:54:16 +04:00
default :
2015-06-10 17:19:24 +03:00
val = PSCI_RET_NOT_SUPPORTED ;
break ;
2014-04-29 09:54:16 +04:00
}
2020-04-01 14:38:49 +03:00
out :
2018-02-06 20:56:10 +03:00
smccc_set_retval ( vcpu , val , 0 , 0 , 0 ) ;
2014-04-29 09:54:20 +04:00
return ret ;
2014-04-29 09:54:16 +04:00
}
2022-02-21 18:35:22 +03:00
static int kvm_psci_1_x_call ( struct kvm_vcpu * vcpu , u32 minor )
2018-02-06 20:56:11 +03:00
{
u32 psci_fn = smccc_get_function ( vcpu ) ;
u32 feature ;
unsigned long val ;
int ret = 1 ;
2022-02-21 18:35:22 +03:00
if ( minor > 1 )
return - EINVAL ;
2018-02-06 20:56:11 +03:00
switch ( psci_fn ) {
case PSCI_0_2_FN_PSCI_VERSION :
2022-02-21 18:35:22 +03:00
val = minor = = 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1 ;
2018-02-06 20:56:11 +03:00
break ;
case PSCI_1_0_FN_PSCI_FEATURES :
feature = smccc_get_arg1 ( vcpu ) ;
2020-04-01 14:38:49 +03:00
val = kvm_psci_check_allowed_function ( vcpu , feature ) ;
if ( val )
break ;
2018-02-06 20:56:11 +03:00
switch ( feature ) {
case PSCI_0_2_FN_PSCI_VERSION :
case PSCI_0_2_FN_CPU_SUSPEND :
case PSCI_0_2_FN64_CPU_SUSPEND :
case PSCI_0_2_FN_CPU_OFF :
case PSCI_0_2_FN_CPU_ON :
case PSCI_0_2_FN64_CPU_ON :
case PSCI_0_2_FN_AFFINITY_INFO :
case PSCI_0_2_FN64_AFFINITY_INFO :
case PSCI_0_2_FN_MIGRATE_INFO_TYPE :
case PSCI_0_2_FN_SYSTEM_OFF :
case PSCI_0_2_FN_SYSTEM_RESET :
case PSCI_1_0_FN_PSCI_FEATURES :
2018-02-06 20:56:12 +03:00
case ARM_SMCCC_VERSION_FUNC_ID :
2018-02-06 20:56:11 +03:00
val = 0 ;
break ;
default :
val = PSCI_RET_NOT_SUPPORTED ;
break ;
}
break ;
default :
return kvm_psci_0_2_call ( vcpu ) ;
}
smccc_set_retval ( vcpu , val , 0 , 0 , 0 ) ;
return ret ;
}
2014-04-29 09:54:18 +04:00
static int kvm_psci_0_1_call ( struct kvm_vcpu * vcpu )
2013-01-21 03:28:13 +04:00
{
2017-04-18 18:59:58 +03:00
struct kvm * kvm = vcpu - > kvm ;
2018-02-06 20:56:10 +03:00
u32 psci_fn = smccc_get_function ( vcpu ) ;
2013-01-21 03:28:13 +04:00
unsigned long val ;
switch ( psci_fn ) {
case KVM_PSCI_FN_CPU_OFF :
kvm_psci_vcpu_off ( vcpu ) ;
2014-04-29 09:54:16 +04:00
val = PSCI_RET_SUCCESS ;
2013-01-21 03:28:13 +04:00
break ;
case KVM_PSCI_FN_CPU_ON :
2017-04-18 18:59:58 +03:00
mutex_lock ( & kvm - > lock ) ;
2013-01-21 03:28:13 +04:00
val = kvm_psci_vcpu_on ( vcpu ) ;
2017-04-18 18:59:58 +03:00
mutex_unlock ( & kvm - > lock ) ;
2013-01-21 03:28:13 +04:00
break ;
2015-06-10 17:19:24 +03:00
default :
2014-04-29 09:54:16 +04:00
val = PSCI_RET_NOT_SUPPORTED ;
2013-01-21 03:28:13 +04:00
break ;
}
2018-02-06 20:56:10 +03:00
smccc_set_retval ( vcpu , val , 0 , 0 , 0 ) ;
2014-04-29 09:54:18 +04:00
return 1 ;
2013-01-21 03:28:13 +04:00
}
2014-04-29 09:54:16 +04:00
/**
* kvm_psci_call - handle PSCI call if r0 value is in range
* @ vcpu : Pointer to the VCPU struct
*
* Handle PSCI calls from guests through traps from HVC instructions .
2014-04-29 09:54:18 +04:00
* The calling convention is similar to SMC calls to the secure world
* where the function number is placed in r0 .
*
* This function returns : > 0 ( success ) , 0 ( success but exit to user
* space ) , and < 0 ( errors )
*
* Errors :
* - EINVAL : Unrecognized PSCI function
2014-04-29 09:54:16 +04:00
*/
2019-10-21 18:28:15 +03:00
int kvm_psci_call ( struct kvm_vcpu * vcpu )
2014-04-29 09:54:16 +04:00
{
2018-02-06 20:56:13 +03:00
switch ( kvm_psci_version ( vcpu , vcpu - > kvm ) ) {
2022-02-21 18:35:22 +03:00
case KVM_ARM_PSCI_1_1 :
return kvm_psci_1_x_call ( vcpu , 1 ) ;
2018-02-06 20:56:11 +03:00
case KVM_ARM_PSCI_1_0 :
2022-02-21 18:35:22 +03:00
return kvm_psci_1_x_call ( vcpu , 0 ) ;
2014-04-29 09:54:16 +04:00
case KVM_ARM_PSCI_0_2 :
return kvm_psci_0_2_call ( vcpu ) ;
case KVM_ARM_PSCI_0_1 :
return kvm_psci_0_1_call ( vcpu ) ;
default :
2014-04-29 09:54:18 +04:00
return - EINVAL ;
2014-04-29 09:54:16 +04:00
} ;
}
2018-02-06 20:56:12 +03:00
2018-01-21 19:42:56 +03:00
int kvm_arm_get_fw_num_regs ( struct kvm_vcpu * vcpu )
{
2019-05-03 17:27:49 +03:00
return 3 ; /* PSCI version and two workaround registers */
2018-01-21 19:42:56 +03:00
}
int kvm_arm_copy_fw_reg_indices ( struct kvm_vcpu * vcpu , u64 __user * uindices )
{
2019-05-03 17:27:49 +03:00
if ( put_user ( KVM_REG_ARM_PSCI_VERSION , uindices + + ) )
return - EFAULT ;
if ( put_user ( KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 , uindices + + ) )
return - EFAULT ;
if ( put_user ( KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 , uindices + + ) )
2018-01-21 19:42:56 +03:00
return - EFAULT ;
return 0 ;
}
2019-05-03 17:27:49 +03:00
# define KVM_REG_FEATURE_LEVEL_WIDTH 4
# define KVM_REG_FEATURE_LEVEL_MASK (BIT(KVM_REG_FEATURE_LEVEL_WIDTH) - 1)
/*
* Convert the workaround level into an easy - to - compare number , where higher
* values mean better protection .
*/
static int get_kernel_wa_level ( u64 regid )
{
switch ( regid ) {
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 :
2020-09-16 01:30:17 +03:00
switch ( arm64_get_spectre_v2_state ( ) ) {
case SPECTRE_VULNERABLE :
2019-05-03 17:27:49 +03:00
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL ;
2020-09-16 01:30:17 +03:00
case SPECTRE_MITIGATED :
2019-05-03 17:27:49 +03:00
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL ;
2020-09-16 01:30:17 +03:00
case SPECTRE_UNAFFECTED :
2019-05-03 17:27:49 +03:00
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED ;
}
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL ;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 :
2020-09-18 16:08:54 +03:00
switch ( arm64_get_spectre_v4_state ( ) ) {
case SPECTRE_MITIGATED :
/*
* As for the hypercall discovery , we pretend we
* don ' t have any FW mitigation if SSBS is there at
* all times .
*/
if ( cpus_have_final_cap ( ARM64_SSBS ) )
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL ;
fallthrough ;
case SPECTRE_UNAFFECTED :
2019-05-03 17:27:49 +03:00
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED ;
2020-09-18 16:08:54 +03:00
case SPECTRE_VULNERABLE :
2020-09-18 14:25:40 +03:00
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL ;
2019-05-03 17:27:49 +03:00
}
}
return - EINVAL ;
}
2018-01-21 19:42:56 +03:00
int kvm_arm_get_fw_reg ( struct kvm_vcpu * vcpu , const struct kvm_one_reg * reg )
{
2019-05-03 17:27:49 +03:00
void __user * uaddr = ( void __user * ) ( long ) reg - > addr ;
u64 val ;
2018-01-21 19:42:56 +03:00
2019-05-03 17:27:49 +03:00
switch ( reg - > id ) {
case KVM_REG_ARM_PSCI_VERSION :
2018-01-21 19:42:56 +03:00
val = kvm_psci_version ( vcpu , vcpu - > kvm ) ;
2019-05-03 17:27:49 +03:00
break ;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 :
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 :
val = get_kernel_wa_level ( reg - > id ) & KVM_REG_FEATURE_LEVEL_MASK ;
break ;
default :
return - ENOENT ;
2018-01-21 19:42:56 +03:00
}
2019-05-03 17:27:49 +03:00
if ( copy_to_user ( uaddr , & val , KVM_REG_SIZE ( reg - > id ) ) )
return - EFAULT ;
return 0 ;
2018-01-21 19:42:56 +03:00
}
int kvm_arm_set_fw_reg ( struct kvm_vcpu * vcpu , const struct kvm_one_reg * reg )
{
2019-05-03 17:27:49 +03:00
void __user * uaddr = ( void __user * ) ( long ) reg - > addr ;
u64 val ;
int wa_level ;
if ( copy_from_user ( & val , uaddr , KVM_REG_SIZE ( reg - > id ) ) )
return - EFAULT ;
2018-01-21 19:42:56 +03:00
2019-05-03 17:27:49 +03:00
switch ( reg - > id ) {
case KVM_REG_ARM_PSCI_VERSION :
{
bool wants_02 ;
2018-01-21 19:42:56 +03:00
wants_02 = test_bit ( KVM_ARM_VCPU_PSCI_0_2 , vcpu - > arch . features ) ;
switch ( val ) {
case KVM_ARM_PSCI_0_1 :
if ( wants_02 )
return - EINVAL ;
vcpu - > kvm - > arch . psci_version = val ;
return 0 ;
case KVM_ARM_PSCI_0_2 :
case KVM_ARM_PSCI_1_0 :
2022-02-21 18:35:22 +03:00
case KVM_ARM_PSCI_1_1 :
2018-01-21 19:42:56 +03:00
if ( ! wants_02 )
return - EINVAL ;
vcpu - > kvm - > arch . psci_version = val ;
return 0 ;
}
2019-05-03 17:27:49 +03:00
break ;
}
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 :
if ( val & ~ KVM_REG_FEATURE_LEVEL_MASK )
return - EINVAL ;
if ( get_kernel_wa_level ( reg - > id ) < val )
return - EINVAL ;
return 0 ;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 :
if ( val & ~ ( KVM_REG_FEATURE_LEVEL_MASK |
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED ) )
return - EINVAL ;
/* The enabled bit must not be set unless the level is AVAIL. */
2020-09-18 14:25:40 +03:00
if ( ( val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED ) & &
( val & KVM_REG_FEATURE_LEVEL_MASK ) ! = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL )
2019-05-03 17:27:49 +03:00
return - EINVAL ;
/*
2020-09-18 14:25:40 +03:00
* Map all the possible incoming states to the only two we
* really want to deal with .
2019-05-03 17:27:49 +03:00
*/
2020-09-18 14:25:40 +03:00
switch ( val & KVM_REG_FEATURE_LEVEL_MASK ) {
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL :
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN :
wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL ;
2019-05-03 17:27:49 +03:00
break ;
2020-09-18 14:25:40 +03:00
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL :
2019-05-03 17:27:49 +03:00
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED :
2020-09-18 14:25:40 +03:00
wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED ;
2019-05-03 17:27:49 +03:00
break ;
2020-09-18 14:25:40 +03:00
default :
return - EINVAL ;
2019-05-03 17:27:49 +03:00
}
2020-09-18 14:25:40 +03:00
/*
* We can deal with NOT_AVAIL on NOT_REQUIRED , but not the
* other way around .
*/
if ( get_kernel_wa_level ( reg - > id ) < wa_level )
return - EINVAL ;
2019-05-03 17:27:49 +03:00
return 0 ;
default :
return - ENOENT ;
2018-01-21 19:42:56 +03:00
}
return - EINVAL ;
}