2019-05-27 08:55:21 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2017-01-26 19:50:51 +05:30
/*
* VGIC system registers handling functions for AArch64 mode
*/
# include <linux/irqchip/arm-gic-v3.h>
# include <linux/kvm.h>
# include <linux/kvm_host.h>
# include <asm/kvm_emulate.h>
2020-05-13 11:40:34 +01:00
# include "vgic/vgic.h"
2017-01-26 19:50:51 +05:30
# include "sys_regs.h"
2022-07-04 09:57:38 +01:00
static int set_gic_ctlr ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 val )
2017-01-26 19:50:51 +05:30
{
u32 host_pri_bits , host_id_bits , host_seis , host_a3v , seis , a3v ;
struct vgic_cpu * vgic_v3_cpu = & vcpu - > arch . vgic_cpu ;
struct vgic_vmcr vmcr ;
2022-07-04 09:57:38 +01:00
vgic_get_vmcr ( vcpu , & vmcr ) ;
/*
* Disallow restoring VM state if not supported by this
* hardware .
*/
2022-07-05 08:11:54 +01:00
host_pri_bits = FIELD_GET ( ICC_CTLR_EL1_PRI_BITS_MASK , val ) + 1 ;
2022-07-04 09:57:38 +01:00
if ( host_pri_bits > vgic_v3_cpu - > num_pri_bits )
return - EINVAL ;
vgic_v3_cpu - > num_pri_bits = host_pri_bits ;
2022-07-05 08:11:54 +01:00
host_id_bits = FIELD_GET ( ICC_CTLR_EL1_ID_BITS_MASK , val ) ;
2022-07-04 09:57:38 +01:00
if ( host_id_bits > vgic_v3_cpu - > num_id_bits )
return - EINVAL ;
vgic_v3_cpu - > num_id_bits = host_id_bits ;
2022-07-05 08:11:54 +01:00
host_seis = FIELD_GET ( ICH_VTR_SEIS_MASK , kvm_vgic_global_state . ich_vtr_el2 ) ;
seis = FIELD_GET ( ICC_CTLR_EL1_SEIS_MASK , val ) ;
2022-07-04 09:57:38 +01:00
if ( host_seis ! = seis )
return - EINVAL ;
2022-07-05 08:11:54 +01:00
host_a3v = FIELD_GET ( ICH_VTR_A3V_MASK , kvm_vgic_global_state . ich_vtr_el2 ) ;
a3v = FIELD_GET ( ICC_CTLR_EL1_A3V_MASK , val ) ;
2022-07-04 09:57:38 +01:00
if ( host_a3v ! = a3v )
return - EINVAL ;
/*
* Here set VMCR . CTLR in ICC_CTLR_EL1 layout .
* The vgic_set_vmcr ( ) will convert to ICH_VMCR layout .
*/
2022-07-05 08:11:54 +01:00
vmcr . cbpr = FIELD_GET ( ICC_CTLR_EL1_CBPR_MASK , val ) ;
vmcr . eoim = FIELD_GET ( ICC_CTLR_EL1_EOImode_MASK , val ) ;
2022-07-04 09:57:38 +01:00
vgic_set_vmcr ( vcpu , & vmcr ) ;
return 0 ;
}
static int get_gic_ctlr ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 * valp )
{
struct vgic_cpu * vgic_v3_cpu = & vcpu - > arch . vgic_cpu ;
struct vgic_vmcr vmcr ;
2017-01-26 19:50:51 +05:30
u64 val ;
vgic_get_vmcr ( vcpu , & vmcr ) ;
2022-07-04 09:57:38 +01:00
val = 0 ;
2022-07-05 08:11:54 +01:00
val | = FIELD_PREP ( ICC_CTLR_EL1_PRI_BITS_MASK , vgic_v3_cpu - > num_pri_bits - 1 ) ;
val | = FIELD_PREP ( ICC_CTLR_EL1_ID_BITS_MASK , vgic_v3_cpu - > num_id_bits ) ;
val | = FIELD_PREP ( ICC_CTLR_EL1_SEIS_MASK ,
FIELD_GET ( ICH_VTR_SEIS_MASK ,
kvm_vgic_global_state . ich_vtr_el2 ) ) ;
val | = FIELD_PREP ( ICC_CTLR_EL1_A3V_MASK ,
FIELD_GET ( ICH_VTR_A3V_MASK , kvm_vgic_global_state . ich_vtr_el2 ) ) ;
2022-07-04 09:57:38 +01:00
/*
* The VMCR . CTLR value is in ICC_CTLR_EL1 layout .
* Extract it directly using ICC_CTLR_EL1 reg definitions .
*/
2022-07-05 08:11:54 +01:00
val | = FIELD_PREP ( ICC_CTLR_EL1_CBPR_MASK , vmcr . cbpr ) ;
val | = FIELD_PREP ( ICC_CTLR_EL1_EOImode_MASK , vmcr . eoim ) ;
2022-07-04 09:57:38 +01:00
* valp = val ;
2017-01-26 19:50:51 +05:30
2022-07-04 09:57:38 +01:00
return 0 ;
2017-01-26 19:50:51 +05:30
}
2022-07-04 09:57:38 +01:00
static int set_gic_pmr ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 val )
2017-01-26 19:50:51 +05:30
{
struct vgic_vmcr vmcr ;
vgic_get_vmcr ( vcpu , & vmcr ) ;
2022-07-05 08:11:54 +01:00
vmcr . pmr = FIELD_GET ( ICC_PMR_EL1_MASK , val ) ;
2022-07-04 09:57:38 +01:00
vgic_set_vmcr ( vcpu , & vmcr ) ;
2017-01-26 19:50:51 +05:30
2022-07-04 09:57:38 +01:00
return 0 ;
2017-01-26 19:50:51 +05:30
}
2022-07-04 09:57:38 +01:00
static int get_gic_pmr ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 * val )
2017-01-26 19:50:51 +05:30
{
struct vgic_vmcr vmcr ;
vgic_get_vmcr ( vcpu , & vmcr ) ;
2022-07-05 08:11:54 +01:00
* val = FIELD_PREP ( ICC_PMR_EL1_MASK , vmcr . pmr ) ;
2017-01-26 19:50:51 +05:30
2022-07-04 09:57:38 +01:00
return 0 ;
2017-01-26 19:50:51 +05:30
}
2022-07-04 09:57:38 +01:00
static int set_gic_bpr0 ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 val )
2017-01-26 19:50:51 +05:30
{
struct vgic_vmcr vmcr ;
2022-07-04 09:57:38 +01:00
vgic_get_vmcr ( vcpu , & vmcr ) ;
2022-07-05 08:11:54 +01:00
vmcr . bpr = FIELD_GET ( ICC_BPR0_EL1_MASK , val ) ;
2022-07-04 09:57:38 +01:00
vgic_set_vmcr ( vcpu , & vmcr ) ;
return 0 ;
}
static int get_gic_bpr0 ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 * val )
{
struct vgic_vmcr vmcr ;
2017-01-26 19:50:51 +05:30
vgic_get_vmcr ( vcpu , & vmcr ) ;
2022-07-05 08:11:54 +01:00
* val = FIELD_PREP ( ICC_BPR0_EL1_MASK , vmcr . bpr ) ;
2017-01-26 19:50:51 +05:30
2022-07-04 09:57:38 +01:00
return 0 ;
2017-01-26 19:50:51 +05:30
}
2022-07-04 09:57:38 +01:00
static int set_gic_bpr1 ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 val )
2017-01-26 19:50:51 +05:30
{
struct vgic_vmcr vmcr ;
vgic_get_vmcr ( vcpu , & vmcr ) ;
2022-07-04 09:57:38 +01:00
if ( ! vmcr . cbpr ) {
2022-07-05 08:11:54 +01:00
vmcr . abpr = FIELD_GET ( ICC_BPR1_EL1_MASK , val ) ;
2017-01-26 19:50:51 +05:30
vgic_set_vmcr ( vcpu , & vmcr ) ;
}
2022-07-04 09:57:38 +01:00
return 0 ;
2017-01-26 19:50:51 +05:30
}
2022-07-04 09:57:38 +01:00
static int get_gic_bpr1 ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 * val )
2017-01-26 19:50:51 +05:30
{
struct vgic_vmcr vmcr ;
vgic_get_vmcr ( vcpu , & vmcr ) ;
2022-07-04 09:57:38 +01:00
if ( ! vmcr . cbpr )
2022-07-05 08:11:54 +01:00
* val = FIELD_PREP ( ICC_BPR1_EL1_MASK , vmcr . abpr ) ;
2022-07-04 09:57:38 +01:00
else
* val = min ( ( vmcr . bpr + 1 ) , 7U ) ;
return 0 ;
}
static int set_gic_grpen0 ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 val )
{
struct vgic_vmcr vmcr ;
vgic_get_vmcr ( vcpu , & vmcr ) ;
2022-07-05 08:11:54 +01:00
vmcr . grpen0 = FIELD_GET ( ICC_IGRPEN0_EL1_MASK , val ) ;
2022-07-04 09:57:38 +01:00
vgic_set_vmcr ( vcpu , & vmcr ) ;
return 0 ;
}
static int get_gic_grpen0 ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 * val )
{
struct vgic_vmcr vmcr ;
vgic_get_vmcr ( vcpu , & vmcr ) ;
2022-07-05 08:11:54 +01:00
* val = FIELD_PREP ( ICC_IGRPEN0_EL1_MASK , vmcr . grpen0 ) ;
2022-07-04 09:57:38 +01:00
return 0 ;
}
static int set_gic_grpen1 ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 val )
{
struct vgic_vmcr vmcr ;
2017-01-26 19:50:51 +05:30
2022-07-04 09:57:38 +01:00
vgic_get_vmcr ( vcpu , & vmcr ) ;
2022-07-05 08:11:54 +01:00
vmcr . grpen1 = FIELD_GET ( ICC_IGRPEN1_EL1_MASK , val ) ;
2022-07-04 09:57:38 +01:00
vgic_set_vmcr ( vcpu , & vmcr ) ;
return 0 ;
2017-01-26 19:50:51 +05:30
}
2022-07-04 09:57:38 +01:00
static int get_gic_grpen1 ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 * val )
{
struct vgic_vmcr vmcr ;
vgic_get_vmcr ( vcpu , & vmcr ) ;
2022-07-05 08:11:54 +01:00
* val = FIELD_GET ( ICC_IGRPEN1_EL1_MASK , vmcr . grpen1 ) ;
2022-07-04 09:57:38 +01:00
return 0 ;
}
static void set_apr_reg ( struct kvm_vcpu * vcpu , u64 val , u8 apr , u8 idx )
2017-01-26 19:50:51 +05:30
{
struct vgic_v3_cpu_if * vgicv3 = & vcpu - > arch . vgic_cpu . vgic_v3 ;
if ( apr )
2022-07-04 09:57:38 +01:00
vgicv3 - > vgic_ap1r [ idx ] = val ;
2017-01-26 19:50:51 +05:30
else
2022-07-04 09:57:38 +01:00
vgicv3 - > vgic_ap0r [ idx ] = val ;
}
static u64 get_apr_reg ( struct kvm_vcpu * vcpu , u8 apr , u8 idx )
{
struct vgic_v3_cpu_if * vgicv3 = & vcpu - > arch . vgic_cpu . vgic_v3 ;
2017-01-26 19:50:51 +05:30
2022-07-04 09:57:38 +01:00
if ( apr )
return vgicv3 - > vgic_ap1r [ idx ] ;
2017-01-26 19:50:51 +05:30
else
2022-07-04 09:57:38 +01:00
return vgicv3 - > vgic_ap0r [ idx ] ;
}
static int set_gic_ap0r ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 val )
{
u8 idx = r - > Op2 & 3 ;
if ( idx > vgic_v3_max_apr_idx ( vcpu ) )
return - EINVAL ;
set_apr_reg ( vcpu , val , 0 , idx ) ;
return 0 ;
2017-01-26 19:50:51 +05:30
}
2022-07-04 09:57:38 +01:00
static int get_gic_ap0r ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 * val )
2017-01-26 19:50:51 +05:30
{
u8 idx = r - > Op2 & 3 ;
2017-09-01 11:41:52 +02:00
if ( idx > vgic_v3_max_apr_idx ( vcpu ) )
2022-07-04 09:57:38 +01:00
return - EINVAL ;
2017-01-26 19:50:51 +05:30
2022-07-04 09:57:38 +01:00
* val = get_apr_reg ( vcpu , 0 , idx ) ;
2017-01-26 19:50:51 +05:30
2022-07-04 09:57:38 +01:00
return 0 ;
2017-01-26 19:50:51 +05:30
}
2022-07-04 09:57:38 +01:00
static int set_gic_ap1r ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 val )
2017-01-26 19:50:51 +05:30
{
2022-07-04 09:57:38 +01:00
u8 idx = r - > Op2 & 3 ;
if ( idx > vgic_v3_max_apr_idx ( vcpu ) )
return - EINVAL ;
set_apr_reg ( vcpu , val , 1 , idx ) ;
return 0 ;
}
static int get_gic_ap1r ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 * val )
{
u8 idx = r - > Op2 & 3 ;
if ( idx > vgic_v3_max_apr_idx ( vcpu ) )
return - EINVAL ;
* val = get_apr_reg ( vcpu , 1 , idx ) ;
return 0 ;
2017-01-26 19:50:51 +05:30
}
2022-07-04 09:57:38 +01:00
static int set_gic_sre ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 val )
2017-01-26 19:50:51 +05:30
{
2022-07-04 09:57:38 +01:00
/* Validate SRE bit */
if ( ! ( val & ICC_SRE_EL1_SRE ) )
return - EINVAL ;
return 0 ;
2017-01-26 19:50:51 +05:30
}
2022-07-04 09:57:38 +01:00
static int get_gic_sre ( struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
u64 * val )
2017-01-26 19:50:51 +05:30
{
struct vgic_v3_cpu_if * vgicv3 = & vcpu - > arch . vgic_cpu . vgic_v3 ;
2022-07-04 09:57:38 +01:00
* val = vgicv3 - > vgic_sre ;
2017-01-26 19:50:51 +05:30
2022-07-04 09:57:38 +01:00
return 0 ;
2017-01-26 19:50:51 +05:30
}
2022-07-04 09:57:38 +01:00
2017-01-26 19:50:51 +05:30
static const struct sys_reg_desc gic_v3_icc_reg_descs [ ] = {
2022-07-04 09:57:38 +01:00
{ SYS_DESC ( SYS_ICC_PMR_EL1 ) ,
. set_user = set_gic_pmr , . get_user = get_gic_pmr , } ,
{ SYS_DESC ( SYS_ICC_BPR0_EL1 ) ,
. set_user = set_gic_bpr0 , . get_user = get_gic_bpr0 , } ,
{ SYS_DESC ( SYS_ICC_AP0R0_EL1 ) ,
. set_user = set_gic_ap0r , . get_user = get_gic_ap0r , } ,
{ SYS_DESC ( SYS_ICC_AP0R1_EL1 ) ,
. set_user = set_gic_ap0r , . get_user = get_gic_ap0r , } ,
{ SYS_DESC ( SYS_ICC_AP0R2_EL1 ) ,
. set_user = set_gic_ap0r , . get_user = get_gic_ap0r , } ,
{ SYS_DESC ( SYS_ICC_AP0R3_EL1 ) ,
. set_user = set_gic_ap0r , . get_user = get_gic_ap0r , } ,
{ SYS_DESC ( SYS_ICC_AP1R0_EL1 ) ,
. set_user = set_gic_ap1r , . get_user = get_gic_ap1r , } ,
{ SYS_DESC ( SYS_ICC_AP1R1_EL1 ) ,
. set_user = set_gic_ap1r , . get_user = get_gic_ap1r , } ,
{ SYS_DESC ( SYS_ICC_AP1R2_EL1 ) ,
. set_user = set_gic_ap1r , . get_user = get_gic_ap1r , } ,
{ SYS_DESC ( SYS_ICC_AP1R3_EL1 ) ,
. set_user = set_gic_ap1r , . get_user = get_gic_ap1r , } ,
{ SYS_DESC ( SYS_ICC_BPR1_EL1 ) ,
. set_user = set_gic_bpr1 , . get_user = get_gic_bpr1 , } ,
{ SYS_DESC ( SYS_ICC_CTLR_EL1 ) ,
. set_user = set_gic_ctlr , . get_user = get_gic_ctlr , } ,
{ SYS_DESC ( SYS_ICC_SRE_EL1 ) ,
. set_user = set_gic_sre , . get_user = get_gic_sre , } ,
{ SYS_DESC ( SYS_ICC_IGRPEN0_EL1 ) ,
. set_user = set_gic_grpen0 , . get_user = get_gic_grpen0 , } ,
{ SYS_DESC ( SYS_ICC_IGRPEN1_EL1 ) ,
. set_user = set_gic_grpen1 , . get_user = get_gic_grpen1 , } ,
2017-01-26 19:50:51 +05:30
} ;
2022-07-03 14:57:29 +01:00
static u64 attr_to_id ( u64 attr )
2017-01-26 19:50:51 +05:30
{
2022-07-03 14:57:29 +01:00
return ARM64_SYS_REG ( FIELD_GET ( KVM_REG_ARM_VGIC_SYSREG_OP0_MASK , attr ) ,
FIELD_GET ( KVM_REG_ARM_VGIC_SYSREG_OP1_MASK , attr ) ,
FIELD_GET ( KVM_REG_ARM_VGIC_SYSREG_CRN_MASK , attr ) ,
FIELD_GET ( KVM_REG_ARM_VGIC_SYSREG_CRM_MASK , attr ) ,
FIELD_GET ( KVM_REG_ARM_VGIC_SYSREG_OP2_MASK , attr ) ) ;
}
2017-01-26 19:50:51 +05:30
2022-07-03 14:57:29 +01:00
int vgic_v3_has_cpu_sysregs_attr ( struct kvm_vcpu * vcpu , struct kvm_device_attr * attr )
{
if ( get_reg_by_id ( attr_to_id ( attr - > attr ) , gic_v3_icc_reg_descs ,
2022-07-03 14:08:46 +01:00
ARRAY_SIZE ( gic_v3_icc_reg_descs ) ) )
2017-01-26 19:50:51 +05:30
return 0 ;
return - ENXIO ;
}
2022-07-04 08:07:44 +01:00
int vgic_v3_cpu_sysregs_uaccess ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr ,
bool is_write )
2017-01-26 19:50:51 +05:30
{
2022-07-04 09:57:38 +01:00
struct kvm_one_reg reg = {
. id = attr_to_id ( attr - > attr ) ,
. addr = attr - > addr ,
} ;
if ( is_write )
return kvm_sys_reg_set_user ( vcpu , & reg , gic_v3_icc_reg_descs ,
ARRAY_SIZE ( gic_v3_icc_reg_descs ) ) ;
else
return kvm_sys_reg_get_user ( vcpu , & reg , gic_v3_icc_reg_descs ,
ARRAY_SIZE ( gic_v3_icc_reg_descs ) ) ;
2017-01-26 19:50:51 +05:30
}