2018-04-06 14:55:59 +01:00
// SPDX-License-Identifier: GPL-2.0
/*
* arch / arm64 / kvm / fpsimd . c : Guest / host FPSIMD context coordination helpers
*
* Copyright 2018 Arm Limited
* Author : Dave Martin < Dave . Martin @ arm . com >
*/
2018-06-15 16:47:24 +01:00
# include <linux/irqflags.h>
2018-04-06 14:55:59 +01:00
# include <linux/sched.h>
# include <linux/kvm_host.h>
2018-09-28 14:39:11 +01:00
# include <asm/fpsimd.h>
2018-04-06 14:55:59 +01:00
# include <asm/kvm_asm.h>
2021-03-11 13:51:44 +00:00
# include <asm/kvm_hyp.h>
2018-04-06 14:55:59 +01:00
# include <asm/kvm_mmu.h>
2018-06-15 16:47:25 +01:00
# include <asm/sysreg.h>
2018-04-06 14:55:59 +01:00
2021-12-15 16:12:31 +00:00
void kvm_vcpu_unshare_task_fp ( struct kvm_vcpu * vcpu )
{
struct task_struct * p = vcpu - > arch . parent_task ;
struct user_fpsimd_state * fpsimd ;
if ( ! is_protected_kvm_enabled ( ) | | ! p )
return ;
fpsimd = & p - > thread . uw . fpsimd_state ;
kvm_unshare_hyp ( fpsimd , fpsimd + 1 ) ;
put_task_struct ( p ) ;
}
2018-04-06 14:55:59 +01:00
/*
* Called on entry to KVM_RUN unless this vcpu previously ran at least
* once and the most recent prior KVM_RUN for this vcpu was called from
* the same task as current ( highly likely ) .
*
* This is guaranteed to execute before kvm_arch_vcpu_load_fp ( vcpu ) ,
* such that on entering hyp the relevant parts of current are already
* mapped .
*/
int kvm_arch_vcpu_run_map_fp ( struct kvm_vcpu * vcpu )
{
int ret ;
struct user_fpsimd_state * fpsimd = & current - > thread . uw . fpsimd_state ;
2021-12-15 16:12:31 +00:00
kvm_vcpu_unshare_task_fp ( vcpu ) ;
2021-10-21 14:18:00 +01:00
/* Make sure the host task fpsimd state is visible to hyp: */
2021-12-15 16:12:23 +00:00
ret = kvm_share_hyp ( fpsimd , fpsimd + 1 ) ;
2021-12-15 16:12:31 +00:00
if ( ret )
return ret ;
vcpu - > arch . host_fpsimd_state = kern_hyp_va ( fpsimd ) ;
/*
* We need to keep current ' s task_struct pinned until its data has been
* unshared with the hypervisor to make sure it is not re - used by the
* kernel and donated to someone else while already shared - - see
* kvm_vcpu_unshare_task_fp ( ) for the matching put_task_struct ( ) .
*/
if ( is_protected_kvm_enabled ( ) ) {
get_task_struct ( current ) ;
vcpu - > arch . parent_task = current ;
}
2018-04-06 14:55:59 +01:00
2021-12-15 16:12:31 +00:00
return 0 ;
2018-04-06 14:55:59 +01:00
}
/*
* Prepare vcpu for saving the host ' s FPSIMD state and loading the guest ' s .
* The actual loading is done by the FPSIMD access trap taken to hyp .
*
* Here , we just set the correct metadata to indicate that the FPSIMD
* state in the cpu regs ( if any ) belongs to current on the host .
*/
void kvm_arch_vcpu_load_fp ( struct kvm_vcpu * vcpu )
{
BUG_ON ( ! current - > mm ) ;
2021-10-27 11:18:00 +01:00
BUG_ON ( test_thread_flag ( TIF_SVE ) ) ;
2018-04-06 14:55:59 +01:00
2021-10-27 11:18:00 +01:00
vcpu - > arch . flags & = ~ KVM_ARM64_FP_ENABLED ;
2018-04-06 14:55:59 +01:00
vcpu - > arch . flags | = KVM_ARM64_FP_HOST ;
2018-06-15 16:47:25 +01:00
if ( read_sysreg ( cpacr_el1 ) & CPACR_EL1_ZEN_EL0EN )
vcpu - > arch . flags | = KVM_ARM64_HOST_SVE_ENABLED ;
2018-04-06 14:55:59 +01:00
}
2021-10-21 14:10:35 +01:00
void kvm_arch_vcpu_ctxflush_fp ( struct kvm_vcpu * vcpu )
{
if ( test_thread_flag ( TIF_FOREIGN_FPSTATE ) )
vcpu - > arch . flags | = KVM_ARM64_FP_FOREIGN_FPSTATE ;
else
vcpu - > arch . flags & = ~ KVM_ARM64_FP_FOREIGN_FPSTATE ;
}
2018-04-06 14:55:59 +01:00
/*
* If the guest FPSIMD state was loaded , update the host ' s context
* tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu
* so that they will be written back if the kernel clobbers them due to
* kernel - mode NEON before re - entry into the guest .
*/
void kvm_arch_vcpu_ctxsync_fp ( struct kvm_vcpu * vcpu )
{
WARN_ON_ONCE ( ! irqs_disabled ( ) ) ;
if ( vcpu - > arch . flags & KVM_ARM64_FP_ENABLED ) {
2019-06-28 22:40:58 +01:00
fpsimd_bind_state_to_cpu ( & vcpu - > arch . ctxt . fp_regs ,
2018-09-28 14:39:17 +01:00
vcpu - > arch . sve_state ,
vcpu - > arch . sve_max_vl ) ;
2018-09-28 14:39:11 +01:00
2018-04-06 14:55:59 +01:00
clear_thread_flag ( TIF_FOREIGN_FPSTATE ) ;
2018-09-28 14:39:17 +01:00
update_thread_flag ( TIF_SVE , vcpu_has_sve ( vcpu ) ) ;
2018-04-06 14:55:59 +01:00
}
}
/*
* Write back the vcpu FPSIMD regs if they are dirty , and invalidate the
* cpu FPSIMD regs so that they can ' t be spuriously reused if this vcpu
* disappears and another task or vcpu appears that recycles the same
* struct fpsimd_state .
*/
void kvm_arch_vcpu_put_fp ( struct kvm_vcpu * vcpu )
{
2018-06-15 16:47:24 +01:00
unsigned long flags ;
local_irq_save ( flags ) ;
2018-04-06 14:55:59 +01:00
if ( vcpu - > arch . flags & KVM_ARM64_FP_ENABLED ) {
2021-10-27 11:18:00 +01:00
if ( vcpu_has_sve ( vcpu ) ) {
2021-03-11 13:51:44 +00:00
__vcpu_sys_reg ( vcpu , ZCR_EL1 ) = read_sysreg_el1 ( SYS_ZCR ) ;
2021-03-12 14:30:52 +00:00
2021-03-11 18:29:55 +00:00
/* Restore the VL that was saved when bound to the CPU */
if ( ! has_vhe ( ) )
sve_cond_update_zcr_vq ( vcpu_sve_max_vq ( vcpu ) - 1 ,
SYS_ZCR_EL1 ) ;
}
2021-03-12 14:30:52 +00:00
fpsimd_save_and_flush_cpu_state ( ) ;
2021-10-27 11:18:00 +01:00
} else if ( has_vhe ( ) & & system_supports_sve ( ) ) {
2018-06-15 16:47:25 +01:00
/*
* The FPSIMD / SVE state in the CPU has not been touched , and we
* have SVE ( and VHE ) : CPACR_EL1 ( alias CPTR_EL2 ) has been
* reset to CPACR_EL1_DEFAULT by the Hyp code , disabling SVE
* for EL0 . To avoid spurious traps , restore the trap state
* seen by kvm_arch_vcpu_load_fp ( ) :
*/
if ( vcpu - > arch . flags & KVM_ARM64_HOST_SVE_ENABLED )
sysreg_clear_set ( CPACR_EL1 , 0 , CPACR_EL1_ZEN_EL0EN ) ;
else
sysreg_clear_set ( CPACR_EL1 , CPACR_EL1_ZEN_EL0EN , 0 ) ;
2018-04-06 14:55:59 +01:00
}
2021-10-27 11:18:00 +01:00
update_thread_flag ( TIF_SVE , 0 ) ;
2018-06-15 16:47:26 +01:00
2018-06-15 16:47:24 +01:00
local_irq_restore ( flags ) ;
2018-04-06 14:55:59 +01:00
}