2017-11-14 18:38:02 +01:00
// SPDX-License-Identifier: GPL-2.0
2017-03-17 04:17:31 +01:00
/*
* VFIO based Physical Subchannel device driver
*
* Copyright IBM Corp . 2017
2018-07-23 16:03:27 +02:00
* Copyright Red Hat , Inc . 2019
2017-03-17 04:17:31 +01:00
*
* Author ( s ) : Dong Jia Shi < bjsdjshi @ linux . vnet . ibm . com >
* Xiao Feng Ren < renxiaof @ linux . vnet . ibm . com >
2018-07-23 16:03:27 +02:00
* Cornelia Huck < cohuck @ redhat . com >
2017-03-17 04:17:31 +01:00
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/slab.h>
2017-03-17 04:17:35 +01:00
# include <linux/mdev.h>
2017-03-17 04:17:31 +01:00
# include <asm/isc.h>
2020-05-05 14:27:39 +02:00
# include "chp.h"
2017-03-17 04:17:35 +01:00
# include "ioasm.h"
# include "css.h"
2017-03-17 04:17:31 +01:00
# include "vfio_ccw_private.h"
2017-03-17 04:17:39 +01:00
struct workqueue_struct * vfio_ccw_work_q ;
2022-11-04 15:20:03 +01:00
struct kmem_cache * vfio_ccw_io_region ;
struct kmem_cache * vfio_ccw_cmd_region ;
struct kmem_cache * vfio_ccw_schib_region ;
struct kmem_cache * vfio_ccw_crw_region ;
2017-03-17 04:17:39 +01:00
2019-08-15 13:53:41 +02:00
debug_info_t * vfio_ccw_debug_msg_id ;
debug_info_t * vfio_ccw_debug_trace_id ;
2017-03-17 04:17:31 +01:00
/*
* Helpers
*/
2017-03-17 04:17:33 +01:00
int vfio_ccw_sch_quiesce ( struct subchannel * sch )
2017-03-17 04:17:31 +01:00
{
2022-11-04 15:20:01 +01:00
struct vfio_ccw_parent * parent = dev_get_drvdata ( & sch - > dev ) ;
struct vfio_ccw_private * private = dev_get_drvdata ( & parent - > dev ) ;
2017-03-17 04:17:31 +01:00
DECLARE_COMPLETION_ONSTACK ( completion ) ;
int iretry , ret = 0 ;
2022-11-04 15:20:01 +01:00
/*
* Probably an impossible situation , after being called through
* FSM callbacks . But in the event it did , register a warning
* and return as if things were fine .
*/
if ( WARN_ON ( ! private ) )
return 0 ;
2019-04-16 17:23:14 -04:00
iretry = 255 ;
2017-03-17 04:17:31 +01:00
do {
ret = cio_cancel_halt_clear ( sch , & iretry ) ;
2019-04-16 17:23:14 -04:00
if ( ret = = - EIO ) {
pr_err ( " vfio_ccw: could not quiesce subchannel 0.%x.%04x! \n " ,
sch - > schid . ssid , sch - > schid . sch_no ) ;
break ;
}
/*
* Flush all I / O and wait for
* cancel / halt / clear completion .
*/
private - > completion = & completion ;
2023-11-01 12:57:51 +01:00
spin_unlock_irq ( & sch - > lock ) ;
2017-03-17 04:17:31 +01:00
2019-04-16 17:23:14 -04:00
if ( ret = = - EBUSY )
wait_for_completion_timeout ( & completion , 3 * HZ ) ;
2017-03-17 04:17:31 +01:00
2019-04-16 17:23:14 -04:00
private - > completion = NULL ;
flush_workqueue ( vfio_ccw_work_q ) ;
2023-11-01 12:57:51 +01:00
spin_lock_irq ( & sch - > lock ) ;
2017-03-17 04:17:31 +01:00
ret = cio_disable_subchannel ( sch ) ;
} while ( ret = = - EBUSY ) ;
2022-07-07 15:57:35 +02:00
2017-03-17 04:17:31 +01:00
return ret ;
}
2022-11-04 15:20:03 +01:00
void vfio_ccw_sch_io_todo ( struct work_struct * work )
2017-03-17 04:17:39 +01:00
{
struct vfio_ccw_private * private ;
struct irb * irb ;
2019-03-11 10:59:53 +01:00
bool is_final ;
2021-05-11 21:56:31 +02:00
bool cp_is_finished = false ;
2017-03-17 04:17:35 +01:00
2017-03-17 04:17:39 +01:00
private = container_of ( work , struct vfio_ccw_private , io_work ) ;
irb = & private - > irb ;
2017-03-17 04:17:35 +01:00
2019-03-11 10:59:53 +01:00
is_final = ! ( scsw_actl ( & irb - > scsw ) &
( SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT ) ) ;
2017-03-17 04:17:39 +01:00
if ( scsw_is_solicited ( & irb - > scsw ) ) {
cp_update_scsw ( & private - > cp , & irb - > scsw ) ;
2021-05-11 21:56:31 +02:00
if ( is_final & & private - > state = = VFIO_CCW_STATE_CP_PENDING ) {
2019-03-11 10:59:53 +01:00
cp_free ( & private - > cp ) ;
2021-05-11 21:56:31 +02:00
cp_is_finished = true ;
}
2017-03-17 04:17:39 +01:00
}
2019-01-08 15:53:03 +01:00
mutex_lock ( & private - > io_mutex ) ;
2018-09-21 22:40:12 +02:00
memcpy ( private - > io_region - > irb_area , irb , sizeof ( * irb ) ) ;
2019-01-08 15:53:03 +01:00
mutex_unlock ( & private - > io_mutex ) ;
2017-03-17 04:17:39 +01:00
2021-05-11 21:56:31 +02:00
/*
* Reset to IDLE only if processing of a channel program
* has finished . Do not overwrite a possible processing
2022-07-07 15:57:28 +02:00
* state if the interrupt was unsolicited , or if the final
* interrupt was for HSCH or CSCH .
2021-05-11 21:56:31 +02:00
*/
2022-07-07 15:57:28 +02:00
if ( cp_is_finished )
2017-03-17 04:17:40 +01:00
private - > state = VFIO_CCW_STATE_IDLE ;
2019-05-15 01:42:43 +02:00
if ( private - > io_trigger )
2023-11-22 13:48:23 +01:00
eventfd_signal ( private - > io_trigger ) ;
2017-03-17 04:17:35 +01:00
}
2022-11-04 15:20:03 +01:00
void vfio_ccw_crw_todo ( struct work_struct * work )
2020-05-05 14:27:44 +02:00
{
struct vfio_ccw_private * private ;
private = container_of ( work , struct vfio_ccw_private , crw_work ) ;
if ( ! list_empty ( & private - > crw ) & & private - > crw_trigger )
2023-11-22 13:48:23 +01:00
eventfd_signal ( private - > crw_trigger ) ;
2020-05-05 14:27:44 +02:00
}
2017-03-17 04:17:31 +01:00
/*
* Css driver callbacks
*/
static void vfio_ccw_sch_irq ( struct subchannel * sch )
{
2022-11-04 15:20:01 +01:00
struct vfio_ccw_parent * parent = dev_get_drvdata ( & sch - > dev ) ;
struct vfio_ccw_private * private = dev_get_drvdata ( & parent - > dev ) ;
/*
* The subchannel should still be disabled at this point ,
* so an interrupt would be quite surprising . As with an
* interrupt while the FSM is closed , let ' s attempt to
* disable the subchannel again .
*/
if ( ! private ) {
VFIO_CCW_MSG_EVENT ( 2 , " sch %x.%x.%04x: unexpected interrupt \n " ,
sch - > schid . cssid , sch - > schid . ssid ,
sch - > schid . sch_no ) ;
cio_disable_subchannel ( sch ) ;
return ;
}
2017-03-17 04:17:31 +01:00
inc_irq_stat ( IRQIO_CIO ) ;
2017-03-17 04:17:40 +01:00
vfio_ccw_fsm_event ( private , VFIO_CCW_EVENT_INTERRUPT ) ;
2017-03-17 04:17:31 +01:00
}
2022-11-04 15:20:01 +01:00
static void vfio_ccw_free_parent ( struct device * dev )
{
struct vfio_ccw_parent * parent = container_of ( dev , struct vfio_ccw_parent , dev ) ;
kfree ( parent ) ;
}
2021-10-26 14:57:31 -03:00
static int vfio_ccw_sch_probe ( struct subchannel * sch )
{
struct pmcw * pmcw = & sch - > schib . pmcw ;
2022-11-04 15:20:01 +01:00
struct vfio_ccw_parent * parent ;
2021-10-26 14:57:31 -03:00
int ret = - ENOMEM ;
if ( pmcw - > qf ) {
dev_warn ( & sch - > dev , " vfio: ccw: does not support QDIO: %s \n " ,
dev_name ( & sch - > dev ) ) ;
return - ENODEV ;
}
2023-05-22 19:35:59 -06:00
parent = kzalloc ( struct_size ( parent , mdev_types , 1 ) , GFP_KERNEL ) ;
2022-11-04 15:20:01 +01:00
if ( ! parent )
return - ENOMEM ;
dev_set_name ( & parent - > dev , " parent " ) ;
parent - > dev . parent = & sch - > dev ;
parent - > dev . release = & vfio_ccw_free_parent ;
ret = device_register ( & parent - > dev ) ;
if ( ret )
goto out_free ;
dev_set_drvdata ( & sch - > dev , parent ) ;
2017-03-17 04:17:31 +01:00
2022-11-04 15:20:01 +01:00
parent - > mdev_type . sysfs_name = " io " ;
parent - > mdev_type . pretty_name = " I/O subchannel (Non-QDIO) " ;
parent - > mdev_types [ 0 ] = & parent - > mdev_type ;
ret = mdev_register_parent ( & parent - > parent , & sch - > dev ,
2022-09-23 11:26:43 +02:00
& vfio_ccw_mdev_driver ,
2022-11-04 15:20:01 +01:00
parent - > mdev_types , 1 ) ;
2018-10-25 19:15:20 +02:00
if ( ret )
2022-11-04 15:20:01 +01:00
goto out_unreg ;
2018-10-25 19:15:20 +02:00
2019-08-15 13:53:41 +02:00
VFIO_CCW_MSG_EVENT ( 4 , " bound to subchannel %x.%x.%04x \n " ,
sch - > schid . cssid , sch - > schid . ssid ,
sch - > schid . sch_no ) ;
2017-03-17 04:17:31 +01:00
return 0 ;
2022-11-04 15:20:01 +01:00
out_unreg :
2022-12-02 09:46:15 -07:00
device_del ( & parent - > dev ) ;
2017-03-17 04:17:31 +01:00
out_free :
2022-12-02 09:46:15 -07:00
put_device ( & parent - > dev ) ;
2017-03-17 04:17:31 +01:00
dev_set_drvdata ( & sch - > dev , NULL ) ;
return ret ;
}
2021-07-13 21:35:19 +02:00
static void vfio_ccw_sch_remove ( struct subchannel * sch )
2017-03-17 04:17:31 +01:00
{
2022-11-04 15:20:01 +01:00
struct vfio_ccw_parent * parent = dev_get_drvdata ( & sch - > dev ) ;
2017-03-17 04:17:31 +01:00
2022-11-04 15:20:01 +01:00
mdev_unregister_parent ( & parent - > parent ) ;
2017-03-17 04:17:33 +01:00
2022-11-04 15:20:01 +01:00
device_unregister ( & parent - > dev ) ;
2017-03-17 04:17:31 +01:00
dev_set_drvdata ( & sch - > dev , NULL ) ;
2019-08-15 13:53:41 +02:00
VFIO_CCW_MSG_EVENT ( 4 , " unbound from subchannel %x.%x.%04x \n " ,
sch - > schid . cssid , sch - > schid . ssid ,
sch - > schid . sch_no ) ;
2017-03-17 04:17:31 +01:00
}
static void vfio_ccw_sch_shutdown ( struct subchannel * sch )
{
2022-11-04 15:20:01 +01:00
struct vfio_ccw_parent * parent = dev_get_drvdata ( & sch - > dev ) ;
struct vfio_ccw_private * private = dev_get_drvdata ( & parent - > dev ) ;
2023-02-10 18:42:27 +01:00
if ( ! private )
2022-11-04 15:20:01 +01:00
return ;
2022-07-07 15:57:35 +02:00
vfio_ccw_fsm_event ( private , VFIO_CCW_EVENT_CLOSE ) ;
2022-07-07 15:57:37 +02:00
vfio_ccw_fsm_event ( private , VFIO_CCW_EVENT_NOT_OPER ) ;
2017-03-17 04:17:31 +01:00
}
/**
* vfio_ccw_sch_event - process subchannel event
* @ sch : subchannel
* @ process : non - zero if function is called in process context
*
* An unspecified event occurred for this subchannel . Adjust data according
* to the current operational state of the subchannel . Return zero when the
* event has been handled sufficiently or - EAGAIN when this function should
* be called again in process context .
*/
static int vfio_ccw_sch_event ( struct subchannel * sch , int process )
{
2022-11-04 15:20:01 +01:00
struct vfio_ccw_parent * parent = dev_get_drvdata ( & sch - > dev ) ;
struct vfio_ccw_private * private = dev_get_drvdata ( & parent - > dev ) ;
2017-03-17 04:17:31 +01:00
unsigned long flags ;
2018-05-02 09:25:59 +02:00
int rc = - EAGAIN ;
2017-03-17 04:17:31 +01:00
2023-11-01 12:57:51 +01:00
spin_lock_irqsave ( & sch - > lock , flags ) ;
2017-03-17 04:17:31 +01:00
if ( ! device_is_registered ( & sch - > dev ) )
goto out_unlock ;
if ( work_pending ( & sch - > todo_work ) )
goto out_unlock ;
2018-05-02 09:25:59 +02:00
rc = 0 ;
2017-03-17 04:17:40 +01:00
2022-11-04 15:20:01 +01:00
if ( cio_update_schib ( sch ) ) {
if ( private )
vfio_ccw_fsm_event ( private , VFIO_CCW_EVENT_NOT_OPER ) ;
}
2022-07-07 15:57:29 +02:00
2017-03-17 04:17:31 +01:00
out_unlock :
2023-11-01 12:57:51 +01:00
spin_unlock_irqrestore ( & sch - > lock , flags ) ;
2017-03-17 04:17:31 +01:00
2018-05-02 09:25:59 +02:00
return rc ;
2017-03-17 04:17:31 +01:00
}
2020-05-05 14:27:44 +02:00
static void vfio_ccw_queue_crw ( struct vfio_ccw_private * private ,
unsigned int rsc ,
unsigned int erc ,
unsigned int rsid )
{
struct vfio_ccw_crw * crw ;
/*
* If unable to allocate a CRW , just drop the event and
* carry on . The guest will either see a later one or
* learn when it issues its own store subchannel .
*/
crw = kzalloc ( sizeof ( * crw ) , GFP_ATOMIC ) ;
if ( ! crw )
return ;
/*
* Build the CRW based on the inputs given to us .
*/
crw - > crw . rsc = rsc ;
crw - > crw . erc = erc ;
crw - > crw . rsid = rsid ;
list_add_tail ( & crw - > next , & private - > crw ) ;
queue_work ( vfio_ccw_work_q , & private - > crw_work ) ;
}
2020-05-05 14:27:39 +02:00
static int vfio_ccw_chp_event ( struct subchannel * sch ,
struct chp_link * link , int event )
{
2022-11-04 15:20:01 +01:00
struct vfio_ccw_parent * parent = dev_get_drvdata ( & sch - > dev ) ;
struct vfio_ccw_private * private = dev_get_drvdata ( & parent - > dev ) ;
2020-05-05 14:27:39 +02:00
int mask = chp_ssd_get_mask ( & sch - > ssd_info , link ) ;
int retry = 255 ;
if ( ! private | | ! mask )
return 0 ;
2022-11-04 15:20:02 +01:00
trace_vfio_ccw_chp_event ( sch - > schid , mask , event ) ;
2022-07-07 15:57:27 +02:00
VFIO_CCW_MSG_EVENT ( 2 , " sch %x.%x.%04x: mask=0x%x event=%d \n " ,
sch - > schid . cssid ,
2020-05-05 14:27:39 +02:00
sch - > schid . ssid , sch - > schid . sch_no ,
mask , event ) ;
if ( cio_update_schib ( sch ) )
return - ENODEV ;
switch ( event ) {
case CHP_VARY_OFF :
/* Path logically turned off */
sch - > opm & = ~ mask ;
sch - > lpm & = ~ mask ;
if ( sch - > schib . pmcw . lpum & mask )
cio_cancel_halt_clear ( sch , & retry ) ;
break ;
case CHP_OFFLINE :
/* Path is gone */
if ( sch - > schib . pmcw . lpum & mask )
cio_cancel_halt_clear ( sch , & retry ) ;
2020-05-05 14:27:44 +02:00
vfio_ccw_queue_crw ( private , CRW_RSC_CPATH , CRW_ERC_PERRN ,
link - > chpid . id ) ;
2020-05-05 14:27:39 +02:00
break ;
case CHP_VARY_ON :
/* Path logically turned on */
sch - > opm | = mask ;
sch - > lpm | = mask ;
break ;
case CHP_ONLINE :
/* Path became available */
sch - > lpm | = mask & sch - > opm ;
2020-05-05 14:27:44 +02:00
vfio_ccw_queue_crw ( private , CRW_RSC_CPATH , CRW_ERC_INIT ,
link - > chpid . id ) ;
2020-05-05 14:27:39 +02:00
break ;
}
return 0 ;
}
2017-03-17 04:17:31 +01:00
static struct css_device_id vfio_ccw_sch_ids [ ] = {
{ . match_flags = 0x1 , . type = SUBCHANNEL_TYPE_IO , } ,
{ /* end of list */ } ,
} ;
MODULE_DEVICE_TABLE ( css , vfio_ccw_sch_ids ) ;
static struct css_driver vfio_ccw_sch_driver = {
. drv = {
. name = " vfio_ccw " ,
. owner = THIS_MODULE ,
} ,
. subchannel_type = vfio_ccw_sch_ids ,
. irq = vfio_ccw_sch_irq ,
. probe = vfio_ccw_sch_probe ,
. remove = vfio_ccw_sch_remove ,
. shutdown = vfio_ccw_sch_shutdown ,
. sch_event = vfio_ccw_sch_event ,
2020-05-05 14:27:39 +02:00
. chp_event = vfio_ccw_chp_event ,
2017-03-17 04:17:31 +01:00
} ;
2019-08-15 13:53:41 +02:00
static int __init vfio_ccw_debug_init ( void )
{
vfio_ccw_debug_msg_id = debug_register ( " vfio_ccw_msg " , 16 , 1 ,
11 * sizeof ( long ) ) ;
if ( ! vfio_ccw_debug_msg_id )
goto out_unregister ;
debug_register_view ( vfio_ccw_debug_msg_id , & debug_sprintf_view ) ;
debug_set_level ( vfio_ccw_debug_msg_id , 2 ) ;
vfio_ccw_debug_trace_id = debug_register ( " vfio_ccw_trace " , 16 , 1 , 16 ) ;
if ( ! vfio_ccw_debug_trace_id )
goto out_unregister ;
debug_register_view ( vfio_ccw_debug_trace_id , & debug_hex_ascii_view ) ;
debug_set_level ( vfio_ccw_debug_trace_id , 2 ) ;
return 0 ;
out_unregister :
debug_unregister ( vfio_ccw_debug_msg_id ) ;
debug_unregister ( vfio_ccw_debug_trace_id ) ;
return - 1 ;
}
static void vfio_ccw_debug_exit ( void )
{
debug_unregister ( vfio_ccw_debug_msg_id ) ;
debug_unregister ( vfio_ccw_debug_trace_id ) ;
}
2020-05-05 14:27:38 +02:00
static void vfio_ccw_destroy_regions ( void )
{
2020-05-05 14:27:43 +02:00
kmem_cache_destroy ( vfio_ccw_crw_region ) ;
2020-05-05 14:27:41 +02:00
kmem_cache_destroy ( vfio_ccw_schib_region ) ;
2020-05-05 14:27:38 +02:00
kmem_cache_destroy ( vfio_ccw_cmd_region ) ;
kmem_cache_destroy ( vfio_ccw_io_region ) ;
}
2017-03-17 04:17:31 +01:00
static int __init vfio_ccw_sch_init ( void )
{
2019-08-15 13:53:41 +02:00
int ret ;
ret = vfio_ccw_debug_init ( ) ;
if ( ret )
return ret ;
2017-03-17 04:17:31 +01:00
2017-03-17 04:17:39 +01:00
vfio_ccw_work_q = create_singlethread_workqueue ( " vfio-ccw " ) ;
2019-08-15 13:53:41 +02:00
if ( ! vfio_ccw_work_q ) {
ret = - ENOMEM ;
2021-10-26 14:57:33 -03:00
goto out_regions ;
2019-08-15 13:53:41 +02:00
}
2017-03-17 04:17:39 +01:00
2018-09-21 22:40:13 +02:00
vfio_ccw_io_region = kmem_cache_create_usercopy ( " vfio_ccw_io_region " ,
sizeof ( struct ccw_io_region ) , 0 ,
SLAB_ACCOUNT , 0 ,
sizeof ( struct ccw_io_region ) , NULL ) ;
2019-09-04 08:33:15 +00:00
if ( ! vfio_ccw_io_region ) {
ret = - ENOMEM ;
2021-10-26 14:57:33 -03:00
goto out_regions ;
2019-09-04 08:33:15 +00:00
}
2018-07-23 16:03:27 +02:00
vfio_ccw_cmd_region = kmem_cache_create_usercopy ( " vfio_ccw_cmd_region " ,
sizeof ( struct ccw_cmd_region ) , 0 ,
SLAB_ACCOUNT , 0 ,
sizeof ( struct ccw_cmd_region ) , NULL ) ;
2019-09-04 08:33:15 +00:00
if ( ! vfio_ccw_cmd_region ) {
ret = - ENOMEM ;
2021-10-26 14:57:33 -03:00
goto out_regions ;
2019-09-04 08:33:15 +00:00
}
2018-09-21 22:40:13 +02:00
2020-05-05 14:27:41 +02:00
vfio_ccw_schib_region = kmem_cache_create_usercopy ( " vfio_ccw_schib_region " ,
sizeof ( struct ccw_schib_region ) , 0 ,
SLAB_ACCOUNT , 0 ,
sizeof ( struct ccw_schib_region ) , NULL ) ;
if ( ! vfio_ccw_schib_region ) {
ret = - ENOMEM ;
2021-10-26 14:57:33 -03:00
goto out_regions ;
2020-05-05 14:27:41 +02:00
}
2020-05-05 14:27:43 +02:00
vfio_ccw_crw_region = kmem_cache_create_usercopy ( " vfio_ccw_crw_region " ,
sizeof ( struct ccw_crw_region ) , 0 ,
SLAB_ACCOUNT , 0 ,
sizeof ( struct ccw_crw_region ) , NULL ) ;
if ( ! vfio_ccw_crw_region ) {
ret = - ENOMEM ;
2021-10-26 14:57:33 -03:00
goto out_regions ;
2020-05-05 14:27:43 +02:00
}
2021-10-26 14:57:33 -03:00
ret = mdev_register_driver ( & vfio_ccw_mdev_driver ) ;
if ( ret )
goto out_regions ;
2017-03-17 04:17:31 +01:00
isc_register ( VFIO_CCW_ISC ) ;
ret = css_driver_register ( & vfio_ccw_sch_driver ) ;
2017-03-17 04:17:39 +01:00
if ( ret ) {
2017-03-17 04:17:31 +01:00
isc_unregister ( VFIO_CCW_ISC ) ;
2021-10-26 14:57:33 -03:00
goto out_driver ;
2017-03-17 04:17:39 +01:00
}
2017-03-17 04:17:31 +01:00
return ret ;
2018-07-23 16:03:27 +02:00
2021-10-26 14:57:33 -03:00
out_driver :
mdev_unregister_driver ( & vfio_ccw_mdev_driver ) ;
out_regions :
2020-05-05 14:27:38 +02:00
vfio_ccw_destroy_regions ( ) ;
2018-07-23 16:03:27 +02:00
destroy_workqueue ( vfio_ccw_work_q ) ;
2019-08-15 13:53:41 +02:00
vfio_ccw_debug_exit ( ) ;
2018-07-23 16:03:27 +02:00
return ret ;
2017-03-17 04:17:31 +01:00
}
static void __exit vfio_ccw_sch_exit ( void )
{
css_driver_unregister ( & vfio_ccw_sch_driver ) ;
2021-10-26 14:57:33 -03:00
mdev_unregister_driver ( & vfio_ccw_mdev_driver ) ;
2017-03-17 04:17:31 +01:00
isc_unregister ( VFIO_CCW_ISC ) ;
2020-05-05 14:27:38 +02:00
vfio_ccw_destroy_regions ( ) ;
2017-03-17 04:17:39 +01:00
destroy_workqueue ( vfio_ccw_work_q ) ;
2019-08-15 13:53:41 +02:00
vfio_ccw_debug_exit ( ) ;
2017-03-17 04:17:31 +01:00
}
module_init ( vfio_ccw_sch_init ) ;
module_exit ( vfio_ccw_sch_exit ) ;
MODULE_LICENSE ( " GPL v2 " ) ;