2020-10-05 20:36:51 +03:00
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013 - 2016 Freescale Semiconductor Inc .
* Copyright 2019 NXP
*/
# include <linux/vfio.h>
# include <linux/slab.h>
# include <linux/types.h>
# include <linux/eventfd.h>
# include "linux/fsl/mc.h"
# include "vfio_fsl_mc_private.h"
2020-10-26 18:53:36 +02:00
static int vfio_fsl_mc_irqs_allocate ( struct vfio_fsl_mc_device * vdev )
2020-10-05 20:36:52 +03:00
{
struct fsl_mc_device * mc_dev = vdev - > mc_dev ;
struct vfio_fsl_mc_irq * mc_irq ;
int irq_count ;
int ret , i ;
/* Device does not support any interrupt */
if ( mc_dev - > obj_desc . irq_count = = 0 )
return 0 ;
/* interrupts were already allocated for this device */
if ( vdev - > mc_irqs )
return 0 ;
irq_count = mc_dev - > obj_desc . irq_count ;
2023-01-08 17:44:26 +02:00
mc_irq = kcalloc ( irq_count , sizeof ( * mc_irq ) , GFP_KERNEL_ACCOUNT ) ;
2020-10-05 20:36:52 +03:00
if ( ! mc_irq )
return - ENOMEM ;
/* Allocate IRQs */
ret = fsl_mc_allocate_irqs ( mc_dev ) ;
if ( ret ) {
kfree ( mc_irq ) ;
return ret ;
}
for ( i = 0 ; i < irq_count ; i + + ) {
mc_irq [ i ] . count = 1 ;
mc_irq [ i ] . flags = VFIO_IRQ_INFO_EVENTFD ;
}
vdev - > mc_irqs = mc_irq ;
return 0 ;
}
static irqreturn_t vfio_fsl_mc_irq_handler ( int irq_num , void * arg )
{
struct vfio_fsl_mc_irq * mc_irq = ( struct vfio_fsl_mc_irq * ) arg ;
2023-11-22 13:48:23 +01:00
eventfd_signal ( mc_irq - > trigger ) ;
2020-10-05 20:36:52 +03:00
return IRQ_HANDLED ;
}
static int vfio_set_trigger ( struct vfio_fsl_mc_device * vdev ,
int index , int fd )
{
struct vfio_fsl_mc_irq * irq = & vdev - > mc_irqs [ index ] ;
struct eventfd_ctx * trigger ;
int hwirq ;
int ret ;
2021-12-10 23:19:34 +01:00
hwirq = vdev - > mc_dev - > irqs [ index ] - > virq ;
2020-10-05 20:36:52 +03:00
if ( irq - > trigger ) {
free_irq ( hwirq , irq ) ;
kfree ( irq - > name ) ;
eventfd_ctx_put ( irq - > trigger ) ;
irq - > trigger = NULL ;
}
if ( fd < 0 ) /* Disable only */
return 0 ;
2023-01-08 17:44:26 +02:00
irq - > name = kasprintf ( GFP_KERNEL_ACCOUNT , " vfio-irq[%d](%s) " ,
2020-10-05 20:36:52 +03:00
hwirq , dev_name ( & vdev - > mc_dev - > dev ) ) ;
if ( ! irq - > name )
return - ENOMEM ;
trigger = eventfd_ctx_fdget ( fd ) ;
if ( IS_ERR ( trigger ) ) {
kfree ( irq - > name ) ;
return PTR_ERR ( trigger ) ;
}
irq - > trigger = trigger ;
ret = request_irq ( hwirq , vfio_fsl_mc_irq_handler , 0 ,
irq - > name , irq ) ;
if ( ret ) {
kfree ( irq - > name ) ;
eventfd_ctx_put ( trigger ) ;
irq - > trigger = NULL ;
return ret ;
}
return 0 ;
}
2020-10-05 20:36:51 +03:00
static int vfio_fsl_mc_set_irq_trigger ( struct vfio_fsl_mc_device * vdev ,
unsigned int index , unsigned int start ,
unsigned int count , u32 flags ,
void * data )
{
2020-10-05 20:36:52 +03:00
struct fsl_mc_device * mc_dev = vdev - > mc_dev ;
int ret , hwirq ;
struct vfio_fsl_mc_irq * irq ;
struct device * cont_dev = fsl_mc_cont_dev ( & mc_dev - > dev ) ;
struct fsl_mc_device * mc_cont = to_fsl_mc_device ( cont_dev ) ;
2020-10-15 21:14:17 +03:00
if ( ! count & & ( flags & VFIO_IRQ_SET_DATA_NONE ) )
return vfio_set_trigger ( vdev , index , - 1 ) ;
2020-10-05 20:36:52 +03:00
if ( start ! = 0 | | count ! = 1 )
return - EINVAL ;
2021-08-05 22:19:02 -03:00
mutex_lock ( & vdev - > vdev . dev_set - > lock ) ;
2020-10-05 20:36:52 +03:00
ret = fsl_mc_populate_irq_pool ( mc_cont ,
FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS ) ;
if ( ret )
goto unlock ;
ret = vfio_fsl_mc_irqs_allocate ( vdev ) ;
if ( ret )
goto unlock ;
2021-08-05 22:19:02 -03:00
mutex_unlock ( & vdev - > vdev . dev_set - > lock ) ;
2020-10-05 20:36:52 +03:00
if ( flags & VFIO_IRQ_SET_DATA_EVENTFD ) {
s32 fd = * ( s32 * ) data ;
return vfio_set_trigger ( vdev , index , fd ) ;
}
2021-12-10 23:19:34 +01:00
hwirq = vdev - > mc_dev - > irqs [ index ] - > virq ;
2020-10-05 20:36:52 +03:00
irq = & vdev - > mc_irqs [ index ] ;
if ( flags & VFIO_IRQ_SET_DATA_NONE ) {
vfio_fsl_mc_irq_handler ( hwirq , irq ) ;
} else if ( flags & VFIO_IRQ_SET_DATA_BOOL ) {
u8 trigger = * ( u8 * ) data ;
if ( trigger )
vfio_fsl_mc_irq_handler ( hwirq , irq ) ;
}
return 0 ;
unlock :
2021-08-05 22:19:02 -03:00
mutex_unlock ( & vdev - > vdev . dev_set - > lock ) ;
2020-10-05 20:36:52 +03:00
return ret ;
2020-10-05 20:36:51 +03:00
}
int vfio_fsl_mc_set_irqs_ioctl ( struct vfio_fsl_mc_device * vdev ,
u32 flags , unsigned int index ,
unsigned int start , unsigned int count ,
void * data )
{
if ( flags & VFIO_IRQ_SET_ACTION_TRIGGER )
return vfio_fsl_mc_set_irq_trigger ( vdev , index , start ,
count , flags , data ) ;
else
return - EINVAL ;
}
2020-10-05 20:36:52 +03:00
/* Free All IRQs for the given MC object */
void vfio_fsl_mc_irqs_cleanup ( struct vfio_fsl_mc_device * vdev )
{
struct fsl_mc_device * mc_dev = vdev - > mc_dev ;
int irq_count = mc_dev - > obj_desc . irq_count ;
int i ;
/*
* Device does not support any interrupt or the interrupts
* were not configured
*/
if ( ! vdev - > mc_irqs )
return ;
for ( i = 0 ; i < irq_count ; i + + )
vfio_set_trigger ( vdev , i , - 1 ) ;
fsl_mc_free_irqs ( mc_dev ) ;
kfree ( vdev - > mc_irqs ) ;
vdev - > mc_irqs = NULL ;
}