2019-05-27 09:55:21 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2016-04-26 23:32:49 +03:00
/*
* VGICv2 MMIO handling functions
*/
# include <linux/irqchip/arm-gic.h>
# include <linux/kvm.h>
# include <linux/kvm_host.h>
2018-04-25 19:13:42 +03:00
# include <linux/nospec.h>
2016-04-26 23:32:49 +03:00
# include <kvm/iodev.h>
# include <kvm/arm_vgic.h>
# include "vgic.h"
# include "vgic-mmio.h"
2018-07-16 16:06:20 +03:00
/*
* The Revision field in the IIDR have the following meanings :
*
* Revision 1 : Report GICv2 interrupts as group 0 instead of group 1
2018-07-16 16:06:25 +03:00
* Revision 2 : Interrupt groups are guest - configurable and signaled using
* their configured groups .
2018-07-16 16:06:20 +03:00
*/
2016-04-26 13:06:47 +03:00
static unsigned long vgic_mmio_read_v2_misc ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len )
{
2018-07-16 16:06:19 +03:00
struct vgic_dist * vgic = & vcpu - > kvm - > arch . vgic ;
2016-04-26 13:06:47 +03:00
u32 value ;
switch ( addr & 0x0c ) {
case GIC_DIST_CTRL :
2018-07-16 16:06:19 +03:00
value = vgic - > enabled ? GICD_ENABLE : 0 ;
2016-04-26 13:06:47 +03:00
break ;
case GIC_DIST_CTR :
2018-07-16 16:06:19 +03:00
value = vgic - > nr_spis + VGIC_NR_PRIVATE_IRQS ;
2016-04-26 13:06:47 +03:00
value = ( value > > 5 ) - 1 ;
value | = ( atomic_read ( & vcpu - > kvm - > online_vcpus ) - 1 ) < < 5 ;
break ;
case GIC_DIST_IIDR :
2018-07-16 16:06:18 +03:00
value = ( PRODUCT_ID_KVM < < GICD_IIDR_PRODUCT_ID_SHIFT ) |
2018-07-16 16:06:19 +03:00
( vgic - > implementation_rev < < GICD_IIDR_REVISION_SHIFT ) |
2018-07-16 16:06:18 +03:00
( IMPLEMENTER_ARM < < GICD_IIDR_IMPLEMENTER_SHIFT ) ;
2016-04-26 13:06:47 +03:00
break ;
default :
return 0 ;
}
return value ;
}
static void vgic_mmio_write_v2_misc ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len ,
unsigned long val )
{
struct vgic_dist * dist = & vcpu - > kvm - > arch . vgic ;
bool was_enabled = dist - > enabled ;
switch ( addr & 0x0c ) {
case GIC_DIST_CTRL :
dist - > enabled = val & GICD_ENABLE ;
if ( ! was_enabled & & dist - > enabled )
vgic_kick_vcpus ( vcpu - > kvm ) ;
break ;
case GIC_DIST_CTR :
case GIC_DIST_IIDR :
/* Nothing to do */
return ;
}
}
2018-07-16 16:06:24 +03:00
static int vgic_mmio_uaccess_write_v2_misc ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len ,
unsigned long val )
{
switch ( addr & 0x0c ) {
case GIC_DIST_IIDR :
if ( val ! = vgic_mmio_read_v2_misc ( vcpu , addr , len ) )
return - EINVAL ;
2018-07-16 16:06:26 +03:00
/*
* If we observe a write to GICD_IIDR we know that userspace
* has been updated and has had a chance to cope with older
* kernels ( VGICv2 IIDR . Revision = = 0 ) incorrectly reporting
* interrupts as group 1 , and therefore we now allow groups to
* be user writable . Doing this by default would break
* migration from old kernels to new kernels with legacy
* userspace .
*/
vcpu - > kvm - > arch . vgic . v2_groups_user_writable = true ;
return 0 ;
2018-07-16 16:06:24 +03:00
}
vgic_mmio_write_v2_misc ( vcpu , addr , len , val ) ;
return 0 ;
}
2018-07-16 16:06:25 +03:00
static int vgic_mmio_uaccess_write_v2_group ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len ,
unsigned long val )
{
2018-07-16 16:06:26 +03:00
if ( vcpu - > kvm - > arch . vgic . v2_groups_user_writable )
vgic_mmio_write_group ( vcpu , addr , len , val ) ;
2018-07-16 16:06:25 +03:00
return 0 ;
}
2015-12-01 15:42:05 +03:00
static void vgic_mmio_write_sgir ( struct kvm_vcpu * source_vcpu ,
gpa_t addr , unsigned int len ,
unsigned long val )
{
int nr_vcpus = atomic_read ( & source_vcpu - > kvm - > online_vcpus ) ;
int intid = val & 0xf ;
int targets = ( val > > 16 ) & 0xff ;
int mode = ( val > > 24 ) & 0x03 ;
int c ;
struct kvm_vcpu * vcpu ;
2016-10-16 23:19:11 +03:00
unsigned long flags ;
2015-12-01 15:42:05 +03:00
switch ( mode ) {
case 0x0 : /* as specified by targets */
break ;
case 0x1 :
targets = ( 1U < < nr_vcpus ) - 1 ; /* all, ... */
targets & = ~ ( 1U < < source_vcpu - > vcpu_id ) ; /* but self */
break ;
case 0x2 : /* this very vCPU only */
targets = ( 1U < < source_vcpu - > vcpu_id ) ;
break ;
case 0x3 : /* reserved */
return ;
}
kvm_for_each_vcpu ( c , vcpu , source_vcpu - > kvm ) {
struct vgic_irq * irq ;
if ( ! ( targets & ( 1U < < c ) ) )
continue ;
irq = vgic_get_irq ( source_vcpu - > kvm , vcpu , intid ) ;
2019-01-07 18:06:15 +03:00
raw_spin_lock_irqsave ( & irq - > irq_lock , flags ) ;
2017-01-23 16:07:18 +03:00
irq - > pending_latch = true ;
2015-12-01 15:42:05 +03:00
irq - > source | = 1U < < source_vcpu - > vcpu_id ;
2016-10-16 23:19:11 +03:00
vgic_queue_irq_unlock ( source_vcpu - > kvm , irq , flags ) ;
2016-07-15 14:43:27 +03:00
vgic_put_irq ( source_vcpu - > kvm , irq ) ;
2015-12-01 15:42:05 +03:00
}
}
2015-12-01 15:41:55 +03:00
static unsigned long vgic_mmio_read_target ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len )
{
u32 intid = VGIC_ADDR_TO_INTID ( addr , 8 ) ;
int i ;
u64 val = 0 ;
for ( i = 0 ; i < len ; i + + ) {
struct vgic_irq * irq = vgic_get_irq ( vcpu - > kvm , vcpu , intid + i ) ;
val | = ( u64 ) irq - > targets < < ( i * 8 ) ;
2016-07-15 14:43:27 +03:00
vgic_put_irq ( vcpu - > kvm , irq ) ;
2015-12-01 15:41:55 +03:00
}
return val ;
}
static void vgic_mmio_write_target ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len ,
unsigned long val )
{
u32 intid = VGIC_ADDR_TO_INTID ( addr , 8 ) ;
2016-11-16 20:57:16 +03:00
u8 cpu_mask = GENMASK ( atomic_read ( & vcpu - > kvm - > online_vcpus ) - 1 , 0 ) ;
2015-12-01 15:41:55 +03:00
int i ;
2016-10-16 23:19:11 +03:00
unsigned long flags ;
2015-12-01 15:41:55 +03:00
/* GICD_ITARGETSR[0-7] are read-only */
if ( intid < VGIC_NR_PRIVATE_IRQS )
return ;
for ( i = 0 ; i < len ; i + + ) {
struct vgic_irq * irq = vgic_get_irq ( vcpu - > kvm , NULL , intid + i ) ;
int target ;
2019-01-07 18:06:15 +03:00
raw_spin_lock_irqsave ( & irq - > irq_lock , flags ) ;
2015-12-01 15:41:55 +03:00
2016-11-16 20:57:16 +03:00
irq - > targets = ( val > > ( i * 8 ) ) & cpu_mask ;
2015-12-01 15:41:55 +03:00
target = irq - > targets ? __ffs ( irq - > targets ) : 0 ;
irq - > target_vcpu = kvm_get_vcpu ( vcpu - > kvm , target ) ;
2019-01-07 18:06:15 +03:00
raw_spin_unlock_irqrestore ( & irq - > irq_lock , flags ) ;
2016-07-15 14:43:27 +03:00
vgic_put_irq ( vcpu - > kvm , irq ) ;
2015-12-01 15:41:55 +03:00
}
}
2015-12-09 19:21:37 +03:00
static unsigned long vgic_mmio_read_sgipend ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len )
{
u32 intid = addr & 0x0f ;
int i ;
u64 val = 0 ;
for ( i = 0 ; i < len ; i + + ) {
struct vgic_irq * irq = vgic_get_irq ( vcpu - > kvm , vcpu , intid + i ) ;
val | = ( u64 ) irq - > source < < ( i * 8 ) ;
2016-07-15 14:43:27 +03:00
vgic_put_irq ( vcpu - > kvm , irq ) ;
2015-12-09 19:21:37 +03:00
}
return val ;
}
static void vgic_mmio_write_sgipendc ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len ,
unsigned long val )
{
u32 intid = addr & 0x0f ;
int i ;
2016-10-16 23:19:11 +03:00
unsigned long flags ;
2015-12-09 19:21:37 +03:00
for ( i = 0 ; i < len ; i + + ) {
struct vgic_irq * irq = vgic_get_irq ( vcpu - > kvm , vcpu , intid + i ) ;
2019-01-07 18:06:15 +03:00
raw_spin_lock_irqsave ( & irq - > irq_lock , flags ) ;
2015-12-09 19:21:37 +03:00
irq - > source & = ~ ( ( val > > ( i * 8 ) ) & 0xff ) ;
if ( ! irq - > source )
2017-01-23 16:07:18 +03:00
irq - > pending_latch = false ;
2015-12-09 19:21:37 +03:00
2019-01-07 18:06:15 +03:00
raw_spin_unlock_irqrestore ( & irq - > irq_lock , flags ) ;
2016-07-15 14:43:27 +03:00
vgic_put_irq ( vcpu - > kvm , irq ) ;
2015-12-09 19:21:37 +03:00
}
}
static void vgic_mmio_write_sgipends ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len ,
unsigned long val )
{
u32 intid = addr & 0x0f ;
int i ;
2016-10-16 23:19:11 +03:00
unsigned long flags ;
2015-12-09 19:21:37 +03:00
for ( i = 0 ; i < len ; i + + ) {
struct vgic_irq * irq = vgic_get_irq ( vcpu - > kvm , vcpu , intid + i ) ;
2019-01-07 18:06:15 +03:00
raw_spin_lock_irqsave ( & irq - > irq_lock , flags ) ;
2015-12-09 19:21:37 +03:00
irq - > source | = ( val > > ( i * 8 ) ) & 0xff ;
if ( irq - > source ) {
2017-01-23 16:07:18 +03:00
irq - > pending_latch = true ;
2016-10-16 23:19:11 +03:00
vgic_queue_irq_unlock ( vcpu - > kvm , irq , flags ) ;
2015-12-09 19:21:37 +03:00
} else {
2019-01-07 18:06:15 +03:00
raw_spin_unlock_irqrestore ( & irq - > irq_lock , flags ) ;
2015-12-09 19:21:37 +03:00
}
2016-07-15 14:43:27 +03:00
vgic_put_irq ( vcpu - > kvm , irq ) ;
2015-12-09 19:21:37 +03:00
}
}
2015-12-03 14:48:42 +03:00
# define GICC_ARCH_VERSION_V2 0x2
/* These are for userland accesses only, there is no guest-facing emulation. */
static unsigned long vgic_mmio_read_vcpuif ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len )
{
struct vgic_vmcr vmcr ;
u32 val ;
vgic_get_vmcr ( vcpu , & vmcr ) ;
switch ( addr & 0xff ) {
case GIC_CPU_CTRL :
2017-05-20 15:12:34 +03:00
val = vmcr . grpen0 < < GIC_CPU_CTRL_EnableGrp0_SHIFT ;
val | = vmcr . grpen1 < < GIC_CPU_CTRL_EnableGrp1_SHIFT ;
val | = vmcr . ackctl < < GIC_CPU_CTRL_AckCtl_SHIFT ;
val | = vmcr . fiqen < < GIC_CPU_CTRL_FIQEn_SHIFT ;
val | = vmcr . cbpr < < GIC_CPU_CTRL_CBPR_SHIFT ;
val | = vmcr . eoim < < GIC_CPU_CTRL_EOImodeNS_SHIFT ;
2015-12-03 14:48:42 +03:00
break ;
case GIC_CPU_PRIMASK :
2017-03-22 00:05:22 +03:00
/*
* Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
* the PMR field as GICH_VMCR . VMPriMask rather than
* GICC_PMR . Priority , so we expose the upper five bits of
* priority mask to userspace using the lower bits in the
* unsigned long .
*/
val = ( vmcr . pmr & GICV_PMR_PRIORITY_MASK ) > >
GICV_PMR_PRIORITY_SHIFT ;
2015-12-03 14:48:42 +03:00
break ;
case GIC_CPU_BINPOINT :
val = vmcr . bpr ;
break ;
case GIC_CPU_ALIAS_BINPOINT :
val = vmcr . abpr ;
break ;
case GIC_CPU_IDENT :
val = ( ( PRODUCT_ID_KVM < < 20 ) |
( GICC_ARCH_VERSION_V2 < < 16 ) |
IMPLEMENTER_ARM ) ;
break ;
default :
return 0 ;
}
return val ;
}
static void vgic_mmio_write_vcpuif ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len ,
unsigned long val )
{
struct vgic_vmcr vmcr ;
vgic_get_vmcr ( vcpu , & vmcr ) ;
switch ( addr & 0xff ) {
case GIC_CPU_CTRL :
2017-05-20 15:12:34 +03:00
vmcr . grpen0 = ! ! ( val & GIC_CPU_CTRL_EnableGrp0 ) ;
vmcr . grpen1 = ! ! ( val & GIC_CPU_CTRL_EnableGrp1 ) ;
vmcr . ackctl = ! ! ( val & GIC_CPU_CTRL_AckCtl ) ;
vmcr . fiqen = ! ! ( val & GIC_CPU_CTRL_FIQEn ) ;
vmcr . cbpr = ! ! ( val & GIC_CPU_CTRL_CBPR ) ;
vmcr . eoim = ! ! ( val & GIC_CPU_CTRL_EOImodeNS ) ;
2015-12-03 14:48:42 +03:00
break ;
case GIC_CPU_PRIMASK :
2017-03-22 00:05:22 +03:00
/*
* Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
* the PMR field as GICH_VMCR . VMPriMask rather than
* GICC_PMR . Priority , so we expose the upper five bits of
* priority mask to userspace using the lower bits in the
* unsigned long .
*/
vmcr . pmr = ( val < < GICV_PMR_PRIORITY_SHIFT ) &
GICV_PMR_PRIORITY_MASK ;
2015-12-03 14:48:42 +03:00
break ;
case GIC_CPU_BINPOINT :
vmcr . bpr = val ;
break ;
case GIC_CPU_ALIAS_BINPOINT :
vmcr . abpr = val ;
break ;
}
vgic_set_vmcr ( vcpu , & vmcr ) ;
}
2017-08-31 23:24:25 +03:00
static unsigned long vgic_mmio_read_apr ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len )
{
int n ; /* which APRn is this */
n = ( addr > > 2 ) & 0x3 ;
if ( kvm_vgic_global_state . type = = VGIC_V2 ) {
/* GICv2 hardware systems support max. 32 groups */
if ( n ! = 0 )
return 0 ;
return vcpu - > arch . vgic_cpu . vgic_v2 . vgic_apr ;
} else {
struct vgic_v3_cpu_if * vgicv3 = & vcpu - > arch . vgic_cpu . vgic_v3 ;
if ( n > vgic_v3_max_apr_idx ( vcpu ) )
return 0 ;
2018-04-25 19:13:42 +03:00
n = array_index_nospec ( n , 4 ) ;
2017-08-31 23:24:25 +03:00
/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
return vgicv3 - > vgic_ap1r [ n ] ;
}
}
static void vgic_mmio_write_apr ( struct kvm_vcpu * vcpu ,
gpa_t addr , unsigned int len ,
unsigned long val )
{
int n ; /* which APRn is this */
n = ( addr > > 2 ) & 0x3 ;
if ( kvm_vgic_global_state . type = = VGIC_V2 ) {
/* GICv2 hardware systems support max. 32 groups */
if ( n ! = 0 )
return ;
vcpu - > arch . vgic_cpu . vgic_v2 . vgic_apr = val ;
} else {
struct vgic_v3_cpu_if * vgicv3 = & vcpu - > arch . vgic_cpu . vgic_v3 ;
if ( n > vgic_v3_max_apr_idx ( vcpu ) )
return ;
2018-07-10 21:01:23 +03:00
n = array_index_nospec ( n , 4 ) ;
2017-08-31 23:24:25 +03:00
/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
vgicv3 - > vgic_ap1r [ n ] = val ;
}
}
2016-04-26 23:32:49 +03:00
static const struct vgic_register_region vgic_v2_dist_registers [ ] = {
2018-07-16 16:06:24 +03:00
REGISTER_DESC_WITH_LENGTH_UACCESS ( GIC_DIST_CTRL ,
vgic_mmio_read_v2_misc , vgic_mmio_write_v2_misc ,
NULL , vgic_mmio_uaccess_write_v2_misc ,
12 , VGIC_ACCESS_32bit ) ,
2016-04-26 23:32:49 +03:00
REGISTER_DESC_WITH_BITS_PER_IRQ ( GIC_DIST_IGROUP ,
2018-07-16 16:06:25 +03:00
vgic_mmio_read_group , vgic_mmio_write_group ,
NULL , vgic_mmio_uaccess_write_v2_group , 1 ,
2016-04-26 23:32:49 +03:00
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_BITS_PER_IRQ ( GIC_DIST_ENABLE_SET ,
2020-04-09 15:05:26 +03:00
vgic_mmio_read_enable , vgic_mmio_write_senable ,
NULL , vgic_uaccess_write_senable , 1 ,
2016-04-26 23:32:49 +03:00
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_BITS_PER_IRQ ( GIC_DIST_ENABLE_CLEAR ,
2020-04-09 15:05:26 +03:00
vgic_mmio_read_enable , vgic_mmio_write_cenable ,
NULL , vgic_uaccess_write_cenable , 1 ,
2016-04-26 23:32:49 +03:00
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_BITS_PER_IRQ ( GIC_DIST_PENDING_SET ,
2020-04-09 15:05:26 +03:00
vgic_mmio_read_pending , vgic_mmio_write_spending ,
NULL , vgic_uaccess_write_spending , 1 ,
2016-04-26 23:32:49 +03:00
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_BITS_PER_IRQ ( GIC_DIST_PENDING_CLEAR ,
2020-04-09 15:05:26 +03:00
vgic_mmio_read_pending , vgic_mmio_write_cpending ,
NULL , vgic_uaccess_write_cpending , 1 ,
2016-04-26 23:32:49 +03:00
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_BITS_PER_IRQ ( GIC_DIST_ACTIVE_SET ,
2017-05-16 10:44:39 +03:00
vgic_mmio_read_active , vgic_mmio_write_sactive ,
2020-04-06 18:21:20 +03:00
vgic_uaccess_read_active , vgic_mmio_uaccess_write_sactive , 1 ,
2016-04-26 23:32:49 +03:00
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_BITS_PER_IRQ ( GIC_DIST_ACTIVE_CLEAR ,
2017-05-16 10:44:39 +03:00
vgic_mmio_read_active , vgic_mmio_write_cactive ,
2020-04-06 18:21:20 +03:00
vgic_uaccess_read_active , vgic_mmio_uaccess_write_cactive , 1 ,
2016-04-26 23:32:49 +03:00
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_BITS_PER_IRQ ( GIC_DIST_PRI ,
2017-05-16 10:31:58 +03:00
vgic_mmio_read_priority , vgic_mmio_write_priority , NULL , NULL ,
8 , VGIC_ACCESS_32bit | VGIC_ACCESS_8bit ) ,
2016-04-26 23:32:49 +03:00
REGISTER_DESC_WITH_BITS_PER_IRQ ( GIC_DIST_TARGET ,
2017-05-16 10:31:58 +03:00
vgic_mmio_read_target , vgic_mmio_write_target , NULL , NULL , 8 ,
2016-04-26 23:32:49 +03:00
VGIC_ACCESS_32bit | VGIC_ACCESS_8bit ) ,
REGISTER_DESC_WITH_BITS_PER_IRQ ( GIC_DIST_CONFIG ,
2017-05-16 10:31:58 +03:00
vgic_mmio_read_config , vgic_mmio_write_config , NULL , NULL , 2 ,
2016-04-26 23:32:49 +03:00
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_LENGTH ( GIC_DIST_SOFTINT ,
2015-12-01 15:42:05 +03:00
vgic_mmio_read_raz , vgic_mmio_write_sgir , 4 ,
2016-04-26 23:32:49 +03:00
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_LENGTH ( GIC_DIST_SGI_PENDING_CLEAR ,
2015-12-09 19:21:37 +03:00
vgic_mmio_read_sgipend , vgic_mmio_write_sgipendc , 16 ,
2016-04-26 23:32:49 +03:00
VGIC_ACCESS_32bit | VGIC_ACCESS_8bit ) ,
REGISTER_DESC_WITH_LENGTH ( GIC_DIST_SGI_PENDING_SET ,
2015-12-09 19:21:37 +03:00
vgic_mmio_read_sgipend , vgic_mmio_write_sgipends , 16 ,
2016-04-26 23:32:49 +03:00
VGIC_ACCESS_32bit | VGIC_ACCESS_8bit ) ,
} ;
2015-12-03 14:48:42 +03:00
static const struct vgic_register_region vgic_v2_cpu_registers [ ] = {
REGISTER_DESC_WITH_LENGTH ( GIC_CPU_CTRL ,
vgic_mmio_read_vcpuif , vgic_mmio_write_vcpuif , 4 ,
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_LENGTH ( GIC_CPU_PRIMASK ,
vgic_mmio_read_vcpuif , vgic_mmio_write_vcpuif , 4 ,
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_LENGTH ( GIC_CPU_BINPOINT ,
vgic_mmio_read_vcpuif , vgic_mmio_write_vcpuif , 4 ,
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_LENGTH ( GIC_CPU_ALIAS_BINPOINT ,
vgic_mmio_read_vcpuif , vgic_mmio_write_vcpuif , 4 ,
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_LENGTH ( GIC_CPU_ACTIVEPRIO ,
2017-08-31 23:24:25 +03:00
vgic_mmio_read_apr , vgic_mmio_write_apr , 16 ,
2015-12-03 14:48:42 +03:00
VGIC_ACCESS_32bit ) ,
REGISTER_DESC_WITH_LENGTH ( GIC_CPU_IDENT ,
vgic_mmio_read_vcpuif , vgic_mmio_write_vcpuif , 4 ,
VGIC_ACCESS_32bit ) ,
} ;
2016-04-26 23:32:49 +03:00
unsigned int vgic_v2_init_dist_iodev ( struct vgic_io_device * dev )
{
dev - > regions = vgic_v2_dist_registers ;
dev - > nr_regions = ARRAY_SIZE ( vgic_v2_dist_registers ) ;
kvm_iodevice_init ( & dev - > dev , & kvm_io_gic_ops ) ;
return SZ_4K ;
}
2015-12-21 19:34:52 +03:00
int vgic_v2_has_attr_regs ( struct kvm_device * dev , struct kvm_device_attr * attr )
{
2017-01-26 17:20:47 +03:00
const struct vgic_register_region * region ;
struct vgic_io_device iodev ;
struct vgic_reg_attr reg_attr ;
struct kvm_vcpu * vcpu ;
2015-12-21 19:34:52 +03:00
gpa_t addr ;
2017-01-26 17:20:47 +03:00
int ret ;
ret = vgic_v2_parse_attr ( dev , attr , & reg_attr ) ;
if ( ret )
return ret ;
2015-12-21 19:34:52 +03:00
2017-01-26 17:20:47 +03:00
vcpu = reg_attr . vcpu ;
addr = reg_attr . addr ;
2015-12-21 19:34:52 +03:00
switch ( attr - > group ) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS :
2017-01-26 17:20:47 +03:00
iodev . regions = vgic_v2_dist_registers ;
iodev . nr_regions = ARRAY_SIZE ( vgic_v2_dist_registers ) ;
iodev . base_addr = 0 ;
2015-12-21 19:34:52 +03:00
break ;
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS :
2017-01-26 17:20:47 +03:00
iodev . regions = vgic_v2_cpu_registers ;
iodev . nr_regions = ARRAY_SIZE ( vgic_v2_cpu_registers ) ;
iodev . base_addr = 0 ;
2015-12-03 14:48:42 +03:00
break ;
2015-12-21 19:34:52 +03:00
default :
return - ENXIO ;
}
/* We only support aligned 32-bit accesses. */
if ( addr & 3 )
return - ENXIO ;
2017-01-26 17:20:47 +03:00
region = vgic_get_mmio_region ( vcpu , & iodev , addr , sizeof ( u32 ) ) ;
if ( ! region )
return - ENXIO ;
2015-12-21 19:34:52 +03:00
2017-01-26 17:20:47 +03:00
return 0 ;
2015-12-21 19:34:52 +03:00
}
2016-04-25 02:11:37 +03:00
2015-12-03 14:48:42 +03:00
int vgic_v2_cpuif_uaccess ( struct kvm_vcpu * vcpu , bool is_write ,
int offset , u32 * val )
{
struct vgic_io_device dev = {
. regions = vgic_v2_cpu_registers ,
. nr_regions = ARRAY_SIZE ( vgic_v2_cpu_registers ) ,
2016-07-18 13:57:36 +03:00
. iodev_type = IODEV_CPUIF ,
2015-12-03 14:48:42 +03:00
} ;
return vgic_uaccess ( vcpu , & dev , is_write , offset , val ) ;
}
2016-04-25 02:11:37 +03:00
int vgic_v2_dist_uaccess ( struct kvm_vcpu * vcpu , bool is_write ,
int offset , u32 * val )
{
struct vgic_io_device dev = {
. regions = vgic_v2_dist_registers ,
. nr_regions = ARRAY_SIZE ( vgic_v2_dist_registers ) ,
2016-07-18 13:57:36 +03:00
. iodev_type = IODEV_DIST ,
2016-04-25 02:11:37 +03:00
} ;
return vgic_uaccess ( vcpu , & dev , is_write , offset , val ) ;
}