2017-11-14 20:38:02 +03:00
// SPDX-License-Identifier: GPL-2.0
2017-03-17 06:17:31 +03:00
/*
* VFIO based Physical Subchannel device driver
*
* Copyright IBM Corp . 2017
2018-07-23 17:03:27 +03:00
* Copyright Red Hat , Inc . 2019
2017-03-17 06:17:31 +03:00
*
* Author ( s ) : Dong Jia Shi < bjsdjshi @ linux . vnet . ibm . com >
* Xiao Feng Ren < renxiaof @ linux . vnet . ibm . com >
2018-07-23 17:03:27 +03:00
* Cornelia Huck < cohuck @ redhat . com >
2017-03-17 06:17:31 +03:00
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/device.h>
# include <linux/slab.h>
2017-03-17 06:17:35 +03:00
# include <linux/uuid.h>
# include <linux/mdev.h>
2017-03-17 06:17:31 +03:00
# include <asm/isc.h>
2020-05-05 15:27:39 +03:00
# include "chp.h"
2017-03-17 06:17:35 +03:00
# include "ioasm.h"
# include "css.h"
2017-03-17 06:17:31 +03:00
# include "vfio_ccw_private.h"
2017-03-17 06:17:39 +03:00
struct workqueue_struct * vfio_ccw_work_q ;
2018-10-15 14:31:39 +03:00
static struct kmem_cache * vfio_ccw_io_region ;
2018-07-23 17:03:27 +03:00
static struct kmem_cache * vfio_ccw_cmd_region ;
2020-05-05 15:27:41 +03:00
static struct kmem_cache * vfio_ccw_schib_region ;
2020-05-05 15:27:43 +03:00
static struct kmem_cache * vfio_ccw_crw_region ;
2017-03-17 06:17:39 +03:00
2019-08-15 14:53:41 +03:00
debug_info_t * vfio_ccw_debug_msg_id ;
debug_info_t * vfio_ccw_debug_trace_id ;
2017-03-17 06:17:31 +03:00
/*
* Helpers
*/
2017-03-17 06:17:33 +03:00
int vfio_ccw_sch_quiesce ( struct subchannel * sch )
2017-03-17 06:17:31 +03:00
{
struct vfio_ccw_private * private = dev_get_drvdata ( & sch - > dev ) ;
DECLARE_COMPLETION_ONSTACK ( completion ) ;
int iretry , ret = 0 ;
spin_lock_irq ( sch - > lock ) ;
if ( ! sch - > schib . pmcw . ena )
goto out_unlock ;
ret = cio_disable_subchannel ( sch ) ;
if ( ret ! = - EBUSY )
goto out_unlock ;
2019-04-17 00:23:14 +03:00
iretry = 255 ;
2017-03-17 06:17:31 +03:00
do {
ret = cio_cancel_halt_clear ( sch , & iretry ) ;
2019-04-17 00:23:14 +03:00
if ( ret = = - EIO ) {
pr_err ( " vfio_ccw: could not quiesce subchannel 0.%x.%04x! \n " ,
sch - > schid . ssid , sch - > schid . sch_no ) ;
break ;
}
/*
* Flush all I / O and wait for
* cancel / halt / clear completion .
*/
private - > completion = & completion ;
spin_unlock_irq ( sch - > lock ) ;
2017-03-17 06:17:31 +03:00
2019-04-17 00:23:14 +03:00
if ( ret = = - EBUSY )
wait_for_completion_timeout ( & completion , 3 * HZ ) ;
2017-03-17 06:17:31 +03:00
2019-04-17 00:23:14 +03:00
private - > completion = NULL ;
flush_workqueue ( vfio_ccw_work_q ) ;
spin_lock_irq ( sch - > lock ) ;
2017-03-17 06:17:31 +03:00
ret = cio_disable_subchannel ( sch ) ;
} while ( ret = = - EBUSY ) ;
out_unlock :
2017-03-17 06:17:40 +03:00
private - > state = VFIO_CCW_STATE_NOT_OPER ;
2017-03-17 06:17:31 +03:00
spin_unlock_irq ( sch - > lock ) ;
return ret ;
}
2017-03-17 06:17:39 +03:00
static void vfio_ccw_sch_io_todo ( struct work_struct * work )
{
struct vfio_ccw_private * private ;
struct irb * irb ;
2019-03-11 12:59:53 +03:00
bool is_final ;
2017-03-17 06:17:35 +03:00
2017-03-17 06:17:39 +03:00
private = container_of ( work , struct vfio_ccw_private , io_work ) ;
irb = & private - > irb ;
2017-03-17 06:17:35 +03:00
2019-03-11 12:59:53 +03:00
is_final = ! ( scsw_actl ( & irb - > scsw ) &
( SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT ) ) ;
2017-03-17 06:17:39 +03:00
if ( scsw_is_solicited ( & irb - > scsw ) ) {
cp_update_scsw ( & private - > cp , & irb - > scsw ) ;
2019-07-11 17:28:54 +03:00
if ( is_final & & private - > state = = VFIO_CCW_STATE_CP_PENDING )
2019-03-11 12:59:53 +03:00
cp_free ( & private - > cp ) ;
2017-03-17 06:17:39 +03:00
}
2019-01-08 17:53:03 +03:00
mutex_lock ( & private - > io_mutex ) ;
2018-09-21 23:40:12 +03:00
memcpy ( private - > io_region - > irb_area , irb , sizeof ( * irb ) ) ;
2019-01-08 17:53:03 +03:00
mutex_unlock ( & private - > io_mutex ) ;
2017-03-17 06:17:39 +03:00
2019-03-11 12:59:53 +03:00
if ( private - > mdev & & is_final )
2017-03-17 06:17:40 +03:00
private - > state = VFIO_CCW_STATE_IDLE ;
2019-05-15 02:42:43 +03:00
if ( private - > io_trigger )
eventfd_signal ( private - > io_trigger , 1 ) ;
2017-03-17 06:17:35 +03:00
}
2020-05-05 15:27:44 +03:00
static void vfio_ccw_crw_todo ( struct work_struct * work )
{
struct vfio_ccw_private * private ;
private = container_of ( work , struct vfio_ccw_private , crw_work ) ;
if ( ! list_empty ( & private - > crw ) & & private - > crw_trigger )
eventfd_signal ( private - > crw_trigger , 1 ) ;
}
2017-03-17 06:17:31 +03:00
/*
* Css driver callbacks
*/
static void vfio_ccw_sch_irq ( struct subchannel * sch )
{
struct vfio_ccw_private * private = dev_get_drvdata ( & sch - > dev ) ;
inc_irq_stat ( IRQIO_CIO ) ;
2017-03-17 06:17:40 +03:00
vfio_ccw_fsm_event ( private , VFIO_CCW_EVENT_INTERRUPT ) ;
2017-03-17 06:17:31 +03:00
}
2020-05-05 15:27:38 +03:00
static void vfio_ccw_free_regions ( struct vfio_ccw_private * private )
{
2020-05-05 15:27:43 +03:00
if ( private - > crw_region )
kmem_cache_free ( vfio_ccw_crw_region , private - > crw_region ) ;
2020-05-05 15:27:41 +03:00
if ( private - > schib_region )
kmem_cache_free ( vfio_ccw_schib_region , private - > schib_region ) ;
2020-05-05 15:27:38 +03:00
if ( private - > cmd_region )
kmem_cache_free ( vfio_ccw_cmd_region , private - > cmd_region ) ;
if ( private - > io_region )
kmem_cache_free ( vfio_ccw_io_region , private - > io_region ) ;
}
2017-03-17 06:17:31 +03:00
static int vfio_ccw_sch_probe ( struct subchannel * sch )
{
struct pmcw * pmcw = & sch - > schib . pmcw ;
struct vfio_ccw_private * private ;
2018-07-23 17:03:27 +03:00
int ret = - ENOMEM ;
2017-03-17 06:17:31 +03:00
if ( pmcw - > qf ) {
dev_warn ( & sch - > dev , " vfio: ccw: does not support QDIO: %s \n " ,
dev_name ( & sch - > dev ) ) ;
return - ENODEV ;
}
private = kzalloc ( sizeof ( * private ) , GFP_KERNEL | GFP_DMA ) ;
if ( ! private )
return - ENOMEM ;
2018-09-21 23:40:12 +03:00
2019-06-18 23:23:48 +03:00
private - > cp . guest_cp = kcalloc ( CCWCHAIN_LEN_MAX , sizeof ( struct ccw1 ) ,
GFP_KERNEL ) ;
if ( ! private - > cp . guest_cp )
goto out_free ;
2018-09-21 23:40:13 +03:00
private - > io_region = kmem_cache_zalloc ( vfio_ccw_io_region ,
GFP_KERNEL | GFP_DMA ) ;
2018-07-23 17:03:27 +03:00
if ( ! private - > io_region )
goto out_free ;
private - > cmd_region = kmem_cache_zalloc ( vfio_ccw_cmd_region ,
GFP_KERNEL | GFP_DMA ) ;
if ( ! private - > cmd_region )
goto out_free ;
2018-09-21 23:40:12 +03:00
2020-05-05 15:27:41 +03:00
private - > schib_region = kmem_cache_zalloc ( vfio_ccw_schib_region ,
GFP_KERNEL | GFP_DMA ) ;
if ( ! private - > schib_region )
goto out_free ;
2020-05-05 15:27:43 +03:00
private - > crw_region = kmem_cache_zalloc ( vfio_ccw_crw_region ,
GFP_KERNEL | GFP_DMA ) ;
if ( ! private - > crw_region )
goto out_free ;
2017-03-17 06:17:31 +03:00
private - > sch = sch ;
dev_set_drvdata ( & sch - > dev , private ) ;
2019-01-08 17:53:03 +03:00
mutex_init ( & private - > io_mutex ) ;
2017-03-17 06:17:31 +03:00
spin_lock_irq ( sch - > lock ) ;
2017-03-17 06:17:40 +03:00
private - > state = VFIO_CCW_STATE_NOT_OPER ;
2017-03-17 06:17:31 +03:00
sch - > isc = VFIO_CCW_ISC ;
ret = cio_enable_subchannel ( sch , ( u32 ) ( unsigned long ) sch ) ;
spin_unlock_irq ( sch - > lock ) ;
if ( ret )
goto out_free ;
2020-05-05 15:27:44 +03:00
INIT_LIST_HEAD ( & private - > crw ) ;
2017-03-17 06:17:39 +03:00
INIT_WORK ( & private - > io_work , vfio_ccw_sch_io_todo ) ;
2020-05-05 15:27:44 +03:00
INIT_WORK ( & private - > crw_work , vfio_ccw_crw_todo ) ;
2017-03-17 06:17:33 +03:00
atomic_set ( & private - > avail , 1 ) ;
2017-03-17 06:17:40 +03:00
private - > state = VFIO_CCW_STATE_STANDBY ;
2017-03-17 06:17:33 +03:00
2018-10-25 20:15:20 +03:00
ret = vfio_ccw_mdev_reg ( sch ) ;
if ( ret )
goto out_disable ;
2020-03-27 15:45:03 +03:00
if ( dev_get_uevent_suppress ( & sch - > dev ) ) {
dev_set_uevent_suppress ( & sch - > dev , 0 ) ;
kobject_uevent ( & sch - > dev . kobj , KOBJ_ADD ) ;
}
2019-08-15 14:53:41 +03:00
VFIO_CCW_MSG_EVENT ( 4 , " bound to subchannel %x.%x.%04x \n " ,
sch - > schid . cssid , sch - > schid . ssid ,
sch - > schid . sch_no ) ;
2017-03-17 06:17:31 +03:00
return 0 ;
out_disable :
cio_disable_subchannel ( sch ) ;
out_free :
dev_set_drvdata ( & sch - > dev , NULL ) ;
2020-05-05 15:27:38 +03:00
vfio_ccw_free_regions ( private ) ;
2019-06-18 23:23:48 +03:00
kfree ( private - > cp . guest_cp ) ;
2017-03-17 06:17:31 +03:00
kfree ( private ) ;
return ret ;
}
static int vfio_ccw_sch_remove ( struct subchannel * sch )
{
struct vfio_ccw_private * private = dev_get_drvdata ( & sch - > dev ) ;
2020-05-05 15:27:44 +03:00
struct vfio_ccw_crw * crw , * temp ;
2017-03-17 06:17:31 +03:00
vfio_ccw_sch_quiesce ( sch ) ;
2020-05-05 15:27:44 +03:00
list_for_each_entry_safe ( crw , temp , & private - > crw , next ) {
list_del ( & crw - > next ) ;
kfree ( crw ) ;
}
2017-03-17 06:17:33 +03:00
vfio_ccw_mdev_unreg ( sch ) ;
2017-03-17 06:17:31 +03:00
dev_set_drvdata ( & sch - > dev , NULL ) ;
2020-05-05 15:27:38 +03:00
vfio_ccw_free_regions ( private ) ;
2019-06-18 23:23:48 +03:00
kfree ( private - > cp . guest_cp ) ;
2017-03-17 06:17:31 +03:00
kfree ( private ) ;
2019-08-15 14:53:41 +03:00
VFIO_CCW_MSG_EVENT ( 4 , " unbound from subchannel %x.%x.%04x \n " ,
sch - > schid . cssid , sch - > schid . ssid ,
sch - > schid . sch_no ) ;
2017-03-17 06:17:31 +03:00
return 0 ;
}
static void vfio_ccw_sch_shutdown ( struct subchannel * sch )
{
vfio_ccw_sch_quiesce ( sch ) ;
}
/**
* vfio_ccw_sch_event - process subchannel event
* @ sch : subchannel
* @ process : non - zero if function is called in process context
*
* An unspecified event occurred for this subchannel . Adjust data according
* to the current operational state of the subchannel . Return zero when the
* event has been handled sufficiently or - EAGAIN when this function should
* be called again in process context .
*/
static int vfio_ccw_sch_event ( struct subchannel * sch , int process )
{
2017-03-17 06:17:40 +03:00
struct vfio_ccw_private * private = dev_get_drvdata ( & sch - > dev ) ;
2017-03-17 06:17:31 +03:00
unsigned long flags ;
2018-05-02 10:25:59 +03:00
int rc = - EAGAIN ;
2017-03-17 06:17:31 +03:00
spin_lock_irqsave ( sch - > lock , flags ) ;
if ( ! device_is_registered ( & sch - > dev ) )
goto out_unlock ;
if ( work_pending ( & sch - > todo_work ) )
goto out_unlock ;
if ( cio_update_schib ( sch ) ) {
2017-03-17 06:17:40 +03:00
vfio_ccw_fsm_event ( private , VFIO_CCW_EVENT_NOT_OPER ) ;
2018-05-02 10:25:59 +03:00
rc = 0 ;
2017-03-17 06:17:31 +03:00
goto out_unlock ;
}
2017-03-17 06:17:40 +03:00
private = dev_get_drvdata ( & sch - > dev ) ;
if ( private - > state = = VFIO_CCW_STATE_NOT_OPER ) {
private - > state = private - > mdev ? VFIO_CCW_STATE_IDLE :
VFIO_CCW_STATE_STANDBY ;
}
2018-05-02 10:25:59 +03:00
rc = 0 ;
2017-03-17 06:17:40 +03:00
2017-03-17 06:17:31 +03:00
out_unlock :
spin_unlock_irqrestore ( sch - > lock , flags ) ;
2018-05-02 10:25:59 +03:00
return rc ;
2017-03-17 06:17:31 +03:00
}
2020-05-05 15:27:44 +03:00
static void vfio_ccw_queue_crw ( struct vfio_ccw_private * private ,
unsigned int rsc ,
unsigned int erc ,
unsigned int rsid )
{
struct vfio_ccw_crw * crw ;
/*
* If unable to allocate a CRW , just drop the event and
* carry on . The guest will either see a later one or
* learn when it issues its own store subchannel .
*/
crw = kzalloc ( sizeof ( * crw ) , GFP_ATOMIC ) ;
if ( ! crw )
return ;
/*
* Build the CRW based on the inputs given to us .
*/
crw - > crw . rsc = rsc ;
crw - > crw . erc = erc ;
crw - > crw . rsid = rsid ;
list_add_tail ( & crw - > next , & private - > crw ) ;
queue_work ( vfio_ccw_work_q , & private - > crw_work ) ;
}
2020-05-05 15:27:39 +03:00
static int vfio_ccw_chp_event ( struct subchannel * sch ,
struct chp_link * link , int event )
{
struct vfio_ccw_private * private = dev_get_drvdata ( & sch - > dev ) ;
int mask = chp_ssd_get_mask ( & sch - > ssd_info , link ) ;
int retry = 255 ;
if ( ! private | | ! mask )
return 0 ;
2020-05-05 15:27:45 +03:00
trace_vfio_ccw_chp_event ( private - > sch - > schid , mask , event ) ;
2020-05-05 15:27:39 +03:00
VFIO_CCW_MSG_EVENT ( 2 , " %pUl (%x.%x.%04x): mask=0x%x event=%d \n " ,
mdev_uuid ( private - > mdev ) , sch - > schid . cssid ,
sch - > schid . ssid , sch - > schid . sch_no ,
mask , event ) ;
if ( cio_update_schib ( sch ) )
return - ENODEV ;
switch ( event ) {
case CHP_VARY_OFF :
/* Path logically turned off */
sch - > opm & = ~ mask ;
sch - > lpm & = ~ mask ;
if ( sch - > schib . pmcw . lpum & mask )
cio_cancel_halt_clear ( sch , & retry ) ;
break ;
case CHP_OFFLINE :
/* Path is gone */
if ( sch - > schib . pmcw . lpum & mask )
cio_cancel_halt_clear ( sch , & retry ) ;
2020-05-05 15:27:44 +03:00
vfio_ccw_queue_crw ( private , CRW_RSC_CPATH , CRW_ERC_PERRN ,
link - > chpid . id ) ;
2020-05-05 15:27:39 +03:00
break ;
case CHP_VARY_ON :
/* Path logically turned on */
sch - > opm | = mask ;
sch - > lpm | = mask ;
break ;
case CHP_ONLINE :
/* Path became available */
sch - > lpm | = mask & sch - > opm ;
2020-05-05 15:27:44 +03:00
vfio_ccw_queue_crw ( private , CRW_RSC_CPATH , CRW_ERC_INIT ,
link - > chpid . id ) ;
2020-05-05 15:27:39 +03:00
break ;
}
return 0 ;
}
2017-03-17 06:17:31 +03:00
static struct css_device_id vfio_ccw_sch_ids [ ] = {
{ . match_flags = 0x1 , . type = SUBCHANNEL_TYPE_IO , } ,
{ /* end of list */ } ,
} ;
MODULE_DEVICE_TABLE ( css , vfio_ccw_sch_ids ) ;
static struct css_driver vfio_ccw_sch_driver = {
. drv = {
. name = " vfio_ccw " ,
. owner = THIS_MODULE ,
} ,
. subchannel_type = vfio_ccw_sch_ids ,
. irq = vfio_ccw_sch_irq ,
. probe = vfio_ccw_sch_probe ,
. remove = vfio_ccw_sch_remove ,
. shutdown = vfio_ccw_sch_shutdown ,
. sch_event = vfio_ccw_sch_event ,
2020-05-05 15:27:39 +03:00
. chp_event = vfio_ccw_chp_event ,
2017-03-17 06:17:31 +03:00
} ;
2019-08-15 14:53:41 +03:00
static int __init vfio_ccw_debug_init ( void )
{
vfio_ccw_debug_msg_id = debug_register ( " vfio_ccw_msg " , 16 , 1 ,
11 * sizeof ( long ) ) ;
if ( ! vfio_ccw_debug_msg_id )
goto out_unregister ;
debug_register_view ( vfio_ccw_debug_msg_id , & debug_sprintf_view ) ;
debug_set_level ( vfio_ccw_debug_msg_id , 2 ) ;
vfio_ccw_debug_trace_id = debug_register ( " vfio_ccw_trace " , 16 , 1 , 16 ) ;
if ( ! vfio_ccw_debug_trace_id )
goto out_unregister ;
debug_register_view ( vfio_ccw_debug_trace_id , & debug_hex_ascii_view ) ;
debug_set_level ( vfio_ccw_debug_trace_id , 2 ) ;
return 0 ;
out_unregister :
debug_unregister ( vfio_ccw_debug_msg_id ) ;
debug_unregister ( vfio_ccw_debug_trace_id ) ;
return - 1 ;
}
static void vfio_ccw_debug_exit ( void )
{
debug_unregister ( vfio_ccw_debug_msg_id ) ;
debug_unregister ( vfio_ccw_debug_trace_id ) ;
}
2020-05-05 15:27:38 +03:00
static void vfio_ccw_destroy_regions ( void )
{
2020-05-05 15:27:43 +03:00
kmem_cache_destroy ( vfio_ccw_crw_region ) ;
2020-05-05 15:27:41 +03:00
kmem_cache_destroy ( vfio_ccw_schib_region ) ;
2020-05-05 15:27:38 +03:00
kmem_cache_destroy ( vfio_ccw_cmd_region ) ;
kmem_cache_destroy ( vfio_ccw_io_region ) ;
}
2017-03-17 06:17:31 +03:00
static int __init vfio_ccw_sch_init ( void )
{
2019-08-15 14:53:41 +03:00
int ret ;
ret = vfio_ccw_debug_init ( ) ;
if ( ret )
return ret ;
2017-03-17 06:17:31 +03:00
2017-03-17 06:17:39 +03:00
vfio_ccw_work_q = create_singlethread_workqueue ( " vfio-ccw " ) ;
2019-08-15 14:53:41 +03:00
if ( ! vfio_ccw_work_q ) {
ret = - ENOMEM ;
goto out_err ;
}
2017-03-17 06:17:39 +03:00
2018-09-21 23:40:13 +03:00
vfio_ccw_io_region = kmem_cache_create_usercopy ( " vfio_ccw_io_region " ,
sizeof ( struct ccw_io_region ) , 0 ,
SLAB_ACCOUNT , 0 ,
sizeof ( struct ccw_io_region ) , NULL ) ;
2019-09-04 11:33:15 +03:00
if ( ! vfio_ccw_io_region ) {
ret = - ENOMEM ;
2018-07-23 17:03:27 +03:00
goto out_err ;
2019-09-04 11:33:15 +03:00
}
2018-07-23 17:03:27 +03:00
vfio_ccw_cmd_region = kmem_cache_create_usercopy ( " vfio_ccw_cmd_region " ,
sizeof ( struct ccw_cmd_region ) , 0 ,
SLAB_ACCOUNT , 0 ,
sizeof ( struct ccw_cmd_region ) , NULL ) ;
2019-09-04 11:33:15 +03:00
if ( ! vfio_ccw_cmd_region ) {
ret = - ENOMEM ;
2018-07-23 17:03:27 +03:00
goto out_err ;
2019-09-04 11:33:15 +03:00
}
2018-09-21 23:40:13 +03:00
2020-05-05 15:27:41 +03:00
vfio_ccw_schib_region = kmem_cache_create_usercopy ( " vfio_ccw_schib_region " ,
sizeof ( struct ccw_schib_region ) , 0 ,
SLAB_ACCOUNT , 0 ,
sizeof ( struct ccw_schib_region ) , NULL ) ;
if ( ! vfio_ccw_schib_region ) {
ret = - ENOMEM ;
goto out_err ;
}
2020-05-05 15:27:43 +03:00
vfio_ccw_crw_region = kmem_cache_create_usercopy ( " vfio_ccw_crw_region " ,
sizeof ( struct ccw_crw_region ) , 0 ,
SLAB_ACCOUNT , 0 ,
sizeof ( struct ccw_crw_region ) , NULL ) ;
if ( ! vfio_ccw_crw_region ) {
ret = - ENOMEM ;
goto out_err ;
}
2017-03-17 06:17:31 +03:00
isc_register ( VFIO_CCW_ISC ) ;
ret = css_driver_register ( & vfio_ccw_sch_driver ) ;
2017-03-17 06:17:39 +03:00
if ( ret ) {
2017-03-17 06:17:31 +03:00
isc_unregister ( VFIO_CCW_ISC ) ;
2018-07-23 17:03:27 +03:00
goto out_err ;
2017-03-17 06:17:39 +03:00
}
2017-03-17 06:17:31 +03:00
return ret ;
2018-07-23 17:03:27 +03:00
out_err :
2020-05-05 15:27:38 +03:00
vfio_ccw_destroy_regions ( ) ;
2018-07-23 17:03:27 +03:00
destroy_workqueue ( vfio_ccw_work_q ) ;
2019-08-15 14:53:41 +03:00
vfio_ccw_debug_exit ( ) ;
2018-07-23 17:03:27 +03:00
return ret ;
2017-03-17 06:17:31 +03:00
}
static void __exit vfio_ccw_sch_exit ( void )
{
css_driver_unregister ( & vfio_ccw_sch_driver ) ;
isc_unregister ( VFIO_CCW_ISC ) ;
2020-05-05 15:27:38 +03:00
vfio_ccw_destroy_regions ( ) ;
2017-03-17 06:17:39 +03:00
destroy_workqueue ( vfio_ccw_work_q ) ;
2019-08-15 14:53:41 +03:00
vfio_ccw_debug_exit ( ) ;
2017-03-17 06:17:31 +03:00
}
module_init ( vfio_ccw_sch_init ) ;
module_exit ( vfio_ccw_sch_exit ) ;
MODULE_LICENSE ( " GPL v2 " ) ;