2018-04-06 14:55:59 +01:00
// SPDX-License-Identifier: GPL-2.0
/*
* arch / arm64 / kvm / fpsimd . c : Guest / host FPSIMD context coordination helpers
*
* Copyright 2018 Arm Limited
* Author : Dave Martin < Dave . Martin @ arm . com >
*/
2018-06-15 16:47:24 +01:00
# include <linux/irqflags.h>
2018-04-06 14:55:59 +01:00
# include <linux/sched.h>
# include <linux/thread_info.h>
# include <linux/kvm_host.h>
2018-09-28 14:39:11 +01:00
# include <asm/fpsimd.h>
2018-04-06 14:55:59 +01:00
# include <asm/kvm_asm.h>
# include <asm/kvm_host.h>
# include <asm/kvm_mmu.h>
2018-06-15 16:47:25 +01:00
# include <asm/sysreg.h>
2018-04-06 14:55:59 +01:00
/*
* Called on entry to KVM_RUN unless this vcpu previously ran at least
* once and the most recent prior KVM_RUN for this vcpu was called from
* the same task as current ( highly likely ) .
*
* This is guaranteed to execute before kvm_arch_vcpu_load_fp ( vcpu ) ,
* such that on entering hyp the relevant parts of current are already
* mapped .
*/
int kvm_arch_vcpu_run_map_fp ( struct kvm_vcpu * vcpu )
{
int ret ;
struct thread_info * ti = & current - > thread_info ;
struct user_fpsimd_state * fpsimd = & current - > thread . uw . fpsimd_state ;
/*
* Make sure the host task thread flags and fpsimd state are
* visible to hyp :
*/
ret = create_hyp_mappings ( ti , ti + 1 , PAGE_HYP ) ;
if ( ret )
goto error ;
ret = create_hyp_mappings ( fpsimd , fpsimd + 1 , PAGE_HYP ) ;
if ( ret )
goto error ;
vcpu - > arch . host_thread_info = kern_hyp_va ( ti ) ;
vcpu - > arch . host_fpsimd_state = kern_hyp_va ( fpsimd ) ;
error :
return ret ;
}
/*
* Prepare vcpu for saving the host ' s FPSIMD state and loading the guest ' s .
* The actual loading is done by the FPSIMD access trap taken to hyp .
*
* Here , we just set the correct metadata to indicate that the FPSIMD
* state in the cpu regs ( if any ) belongs to current on the host .
*
* TIF_SVE is backed up here , since it may get clobbered with guest state .
* This flag is restored by kvm_arch_vcpu_put_fp ( vcpu ) .
*/
void kvm_arch_vcpu_load_fp ( struct kvm_vcpu * vcpu )
{
BUG_ON ( ! current - > mm ) ;
2018-06-15 16:47:25 +01:00
vcpu - > arch . flags & = ~ ( KVM_ARM64_FP_ENABLED |
KVM_ARM64_HOST_SVE_IN_USE |
KVM_ARM64_HOST_SVE_ENABLED ) ;
2018-04-06 14:55:59 +01:00
vcpu - > arch . flags | = KVM_ARM64_FP_HOST ;
2018-06-15 16:47:25 +01:00
2018-04-06 14:55:59 +01:00
if ( test_thread_flag ( TIF_SVE ) )
vcpu - > arch . flags | = KVM_ARM64_HOST_SVE_IN_USE ;
2018-06-15 16:47:25 +01:00
if ( read_sysreg ( cpacr_el1 ) & CPACR_EL1_ZEN_EL0EN )
vcpu - > arch . flags | = KVM_ARM64_HOST_SVE_ENABLED ;
2018-04-06 14:55:59 +01:00
}
/*
* If the guest FPSIMD state was loaded , update the host ' s context
* tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu
* so that they will be written back if the kernel clobbers them due to
* kernel - mode NEON before re - entry into the guest .
*/
void kvm_arch_vcpu_ctxsync_fp ( struct kvm_vcpu * vcpu )
{
WARN_ON_ONCE ( ! irqs_disabled ( ) ) ;
if ( vcpu - > arch . flags & KVM_ARM64_FP_ENABLED ) {
2018-09-28 14:39:11 +01:00
fpsimd_bind_state_to_cpu ( & vcpu - > arch . ctxt . gp_regs . fp_regs ,
NULL , SVE_VL_MIN ) ;
2018-04-06 14:55:59 +01:00
clear_thread_flag ( TIF_FOREIGN_FPSTATE ) ;
clear_thread_flag ( TIF_SVE ) ;
}
}
/*
* Write back the vcpu FPSIMD regs if they are dirty , and invalidate the
* cpu FPSIMD regs so that they can ' t be spuriously reused if this vcpu
* disappears and another task or vcpu appears that recycles the same
* struct fpsimd_state .
*/
void kvm_arch_vcpu_put_fp ( struct kvm_vcpu * vcpu )
{
2018-06-15 16:47:24 +01:00
unsigned long flags ;
2018-09-28 14:39:16 +01:00
bool host_has_sve = system_supports_sve ( ) ;
bool guest_has_sve = vcpu_has_sve ( vcpu ) ;
2018-06-15 16:47:24 +01:00
local_irq_save ( flags ) ;
2018-04-06 14:55:59 +01:00
if ( vcpu - > arch . flags & KVM_ARM64_FP_ENABLED ) {
2018-09-28 14:39:16 +01:00
u64 * guest_zcr = & vcpu - > arch . ctxt . sys_regs [ ZCR_EL1 ] ;
2018-04-06 14:55:59 +01:00
/* Clean guest FP state to memory and invalidate cpu view */
fpsimd_save ( ) ;
fpsimd_flush_cpu_state ( ) ;
2018-09-28 14:39:16 +01:00
if ( guest_has_sve )
* guest_zcr = read_sysreg_s ( SYS_ZCR_EL12 ) ;
} else if ( host_has_sve ) {
2018-06-15 16:47:25 +01:00
/*
* The FPSIMD / SVE state in the CPU has not been touched , and we
* have SVE ( and VHE ) : CPACR_EL1 ( alias CPTR_EL2 ) has been
* reset to CPACR_EL1_DEFAULT by the Hyp code , disabling SVE
* for EL0 . To avoid spurious traps , restore the trap state
* seen by kvm_arch_vcpu_load_fp ( ) :
*/
if ( vcpu - > arch . flags & KVM_ARM64_HOST_SVE_ENABLED )
sysreg_clear_set ( CPACR_EL1 , 0 , CPACR_EL1_ZEN_EL0EN ) ;
else
sysreg_clear_set ( CPACR_EL1 , CPACR_EL1_ZEN_EL0EN , 0 ) ;
2018-04-06 14:55:59 +01:00
}
2018-06-15 16:47:26 +01:00
update_thread_flag ( TIF_SVE ,
vcpu - > arch . flags & KVM_ARM64_HOST_SVE_IN_USE ) ;
2018-06-15 16:47:24 +01:00
local_irq_restore ( flags ) ;
2018-04-06 14:55:59 +01:00
}