2015-03-16 23:08:54 +03:00
/*
* VFIO generic eventfd code for IRQFD support .
* Derived from drivers / vfio / pci / vfio_pci_intrs . c
*
* Copyright ( C ) 2012 Red Hat , Inc . All rights reserved .
* Author : Alex Williamson < alex . williamson @ redhat . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/vfio.h>
# include <linux/eventfd.h>
# include <linux/file.h>
2015-03-17 17:33:38 +03:00
# include <linux/module.h>
2015-03-16 23:08:54 +03:00
# include <linux/slab.h>
2015-03-17 17:33:38 +03:00
# define DRIVER_VERSION "0.1"
# define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
# define DRIVER_DESC "IRQFD support for VFIO bus drivers"
2015-03-16 23:08:54 +03:00
static struct workqueue_struct * vfio_irqfd_cleanup_wq ;
2015-03-17 02:43:21 +03:00
static DEFINE_SPINLOCK ( virqfd_lock ) ;
2015-03-16 23:08:54 +03:00
2015-03-17 17:33:38 +03:00
static int __init vfio_virqfd_init ( void )
2015-03-16 23:08:54 +03:00
{
vfio_irqfd_cleanup_wq =
create_singlethread_workqueue ( " vfio-irqfd-cleanup " ) ;
if ( ! vfio_irqfd_cleanup_wq )
return - ENOMEM ;
return 0 ;
}
2015-03-17 17:33:38 +03:00
static void __exit vfio_virqfd_exit ( void )
2015-03-16 23:08:54 +03:00
{
destroy_workqueue ( vfio_irqfd_cleanup_wq ) ;
}
static void virqfd_deactivate ( struct virqfd * virqfd )
{
queue_work ( vfio_irqfd_cleanup_wq , & virqfd - > shutdown ) ;
}
2017-06-20 13:06:13 +03:00
static int virqfd_wakeup ( wait_queue_entry_t * wait , unsigned mode , int sync , void * key )
2015-03-16 23:08:54 +03:00
{
struct virqfd * virqfd = container_of ( wait , struct virqfd , wait ) ;
2017-07-04 03:14:56 +03:00
__poll_t flags = key_to_poll ( key ) ;
2015-03-16 23:08:54 +03:00
2018-02-12 01:34:03 +03:00
if ( flags & EPOLLIN ) {
2015-03-16 23:08:54 +03:00
/* An event has been signaled, call function */
if ( ( ! virqfd - > handler | |
virqfd - > handler ( virqfd - > opaque , virqfd - > data ) ) & &
virqfd - > thread )
schedule_work ( & virqfd - > inject ) ;
}
2018-02-12 01:34:03 +03:00
if ( flags & EPOLLHUP ) {
2015-03-16 23:08:54 +03:00
unsigned long flags ;
spin_lock_irqsave ( & virqfd_lock , flags ) ;
/*
* The eventfd is closing , if the virqfd has not yet been
* queued for release , as determined by testing whether the
* virqfd pointer to it is still valid , queue it now . As
* with kvm irqfds , we know we won ' t race against the virqfd
* going away because we hold the lock to get here .
*/
if ( * ( virqfd - > pvirqfd ) = = virqfd ) {
* ( virqfd - > pvirqfd ) = NULL ;
virqfd_deactivate ( virqfd ) ;
}
spin_unlock_irqrestore ( & virqfd_lock , flags ) ;
}
return 0 ;
}
static void virqfd_ptable_queue_proc ( struct file * file ,
wait_queue_head_t * wqh , poll_table * pt )
{
struct virqfd * virqfd = container_of ( pt , struct virqfd , pt ) ;
add_wait_queue ( wqh , & virqfd - > wait ) ;
}
static void virqfd_shutdown ( struct work_struct * work )
{
struct virqfd * virqfd = container_of ( work , struct virqfd , shutdown ) ;
u64 cnt ;
eventfd_ctx_remove_wait_queue ( virqfd - > eventfd , & virqfd - > wait , & cnt ) ;
flush_work ( & virqfd - > inject ) ;
eventfd_ctx_put ( virqfd - > eventfd ) ;
kfree ( virqfd ) ;
}
static void virqfd_inject ( struct work_struct * work )
{
struct virqfd * virqfd = container_of ( work , struct virqfd , inject ) ;
if ( virqfd - > thread )
virqfd - > thread ( virqfd - > opaque , virqfd - > data ) ;
}
int vfio_virqfd_enable ( void * opaque ,
int ( * handler ) ( void * , void * ) ,
void ( * thread ) ( void * , void * ) ,
void * data , struct virqfd * * pvirqfd , int fd )
{
struct fd irqfd ;
struct eventfd_ctx * ctx ;
struct virqfd * virqfd ;
int ret = 0 ;
2017-07-04 05:25:56 +03:00
__poll_t events ;
2015-03-16 23:08:54 +03:00
virqfd = kzalloc ( sizeof ( * virqfd ) , GFP_KERNEL ) ;
if ( ! virqfd )
return - ENOMEM ;
virqfd - > pvirqfd = pvirqfd ;
virqfd - > opaque = opaque ;
virqfd - > handler = handler ;
virqfd - > thread = thread ;
virqfd - > data = data ;
INIT_WORK ( & virqfd - > shutdown , virqfd_shutdown ) ;
INIT_WORK ( & virqfd - > inject , virqfd_inject ) ;
irqfd = fdget ( fd ) ;
if ( ! irqfd . file ) {
ret = - EBADF ;
goto err_fd ;
}
ctx = eventfd_ctx_fileget ( irqfd . file ) ;
if ( IS_ERR ( ctx ) ) {
ret = PTR_ERR ( ctx ) ;
goto err_ctx ;
}
virqfd - > eventfd = ctx ;
/*
* virqfds can be released by closing the eventfd or directly
* through ioctl . These are both done through a workqueue , so
* we update the pointer to the virqfd under lock to avoid
* pushing multiple jobs to release the same virqfd .
*/
spin_lock_irq ( & virqfd_lock ) ;
if ( * pvirqfd ) {
spin_unlock_irq ( & virqfd_lock ) ;
ret = - EBUSY ;
goto err_busy ;
}
* pvirqfd = virqfd ;
spin_unlock_irq ( & virqfd_lock ) ;
/*
* Install our own custom wake - up handling so we are notified via
* a callback whenever someone signals the underlying eventfd .
*/
init_waitqueue_func_entry ( & virqfd - > wait , virqfd_wakeup ) ;
init_poll_funcptr ( & virqfd - > pt , virqfd_ptable_queue_proc ) ;
events = irqfd . file - > f_op - > poll ( irqfd . file , & virqfd - > pt ) ;
/*
* Check if there was an event already pending on the eventfd
* before we registered and trigger it as if we didn ' t miss it .
*/
2018-02-12 01:34:03 +03:00
if ( events & EPOLLIN ) {
2015-03-16 23:08:54 +03:00
if ( ( ! handler | | handler ( opaque , data ) ) & & thread )
schedule_work ( & virqfd - > inject ) ;
}
/*
* Do not drop the file until the irqfd is fully initialized ,
2018-02-12 01:34:03 +03:00
* otherwise we might race against the EPOLLHUP .
2015-03-16 23:08:54 +03:00
*/
fdput ( irqfd ) ;
return 0 ;
err_busy :
eventfd_ctx_put ( ctx ) ;
err_ctx :
fdput ( irqfd ) ;
err_fd :
kfree ( virqfd ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( vfio_virqfd_enable ) ;
void vfio_virqfd_disable ( struct virqfd * * pvirqfd )
{
unsigned long flags ;
spin_lock_irqsave ( & virqfd_lock , flags ) ;
if ( * pvirqfd ) {
virqfd_deactivate ( * pvirqfd ) ;
* pvirqfd = NULL ;
}
spin_unlock_irqrestore ( & virqfd_lock , flags ) ;
/*
* Block until we know all outstanding shutdown jobs have completed .
* Even if we don ' t queue the job , flush the wq to be sure it ' s
* been released .
*/
flush_workqueue ( vfio_irqfd_cleanup_wq ) ;
}
EXPORT_SYMBOL_GPL ( vfio_virqfd_disable ) ;
2015-03-17 17:33:38 +03:00
module_init ( vfio_virqfd_init ) ;
module_exit ( vfio_virqfd_exit ) ;
MODULE_VERSION ( DRIVER_VERSION ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_AUTHOR ( DRIVER_AUTHOR ) ;
MODULE_DESCRIPTION ( DRIVER_DESC ) ;