2021-11-02 16:12:01 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2017-04-05 22:03:57 +03:00
/*
* linux / fs / 9 p / trans_xen
*
* Xen transport layer .
*
* Copyright ( C ) 2017 by Stefano Stabellini < stefano @ aporeto . com >
*/
# include <xen/events.h>
# include <xen/grant_table.h>
# include <xen/xen.h>
# include <xen/xenbus.h>
# include <xen/interface/io/9pfs.h>
# include <linux/module.h>
2017-04-05 22:03:58 +03:00
# include <linux/spinlock.h>
2017-04-05 22:03:57 +03:00
# include <net/9p/9p.h>
# include <net/9p/client.h>
# include <net/9p/transport.h>
2017-04-05 22:03:58 +03:00
# define XEN_9PFS_NUM_RINGS 2
2020-05-21 22:32:42 +03:00
# define XEN_9PFS_RING_ORDER 9
# define XEN_9PFS_RING_SIZE(ring) XEN_FLEX_RING_SIZE(ring->intf->ring_order)
2017-04-05 22:03:58 +03:00
struct xen_9pfs_header {
uint32_t size ;
uint8_t id ;
uint16_t tag ;
/* uint8_t sdata[]; */
} __attribute__ ( ( packed ) ) ;
/* One per ring, more than one per 9pfs share */
struct xen_9pfs_dataring {
struct xen_9pfs_front_priv * priv ;
struct xen_9pfs_data_intf * intf ;
grant_ref_t ref ;
int evtchn ;
int irq ;
/* protect a ring from concurrent accesses */
spinlock_t lock ;
struct xen_9pfs_data data ;
wait_queue_head_t wq ;
struct work_struct work ;
} ;
/* One per 9pfs share */
struct xen_9pfs_front_priv {
struct list_head list ;
struct xenbus_device * dev ;
char * tag ;
struct p9_client * client ;
int num_rings ;
struct xen_9pfs_dataring * rings ;
} ;
static LIST_HEAD ( xen_9pfs_devs ) ;
static DEFINE_RWLOCK ( xen_9pfs_lock ) ;
2017-04-05 22:03:59 +03:00
/* We don't currently allow canceling of requests */
2017-04-05 22:03:57 +03:00
static int p9_xen_cancel ( struct p9_client * client , struct p9_req_t * req )
{
2017-04-05 22:03:59 +03:00
return 1 ;
2017-04-05 22:03:57 +03:00
}
static int p9_xen_create ( struct p9_client * client , const char * addr , char * args )
{
2017-04-05 22:03:59 +03:00
struct xen_9pfs_front_priv * priv ;
2018-07-27 14:05:58 +03:00
if ( addr = = NULL )
return - EINVAL ;
2017-04-05 22:03:59 +03:00
read_lock ( & xen_9pfs_lock ) ;
list_for_each_entry ( priv , & xen_9pfs_devs , list ) {
if ( ! strcmp ( priv - > tag , addr ) ) {
priv - > client = client ;
read_unlock ( & xen_9pfs_lock ) ;
return 0 ;
}
}
read_unlock ( & xen_9pfs_lock ) ;
return - EINVAL ;
2017-04-05 22:03:57 +03:00
}
static void p9_xen_close ( struct p9_client * client )
{
2017-04-05 22:03:59 +03:00
struct xen_9pfs_front_priv * priv ;
read_lock ( & xen_9pfs_lock ) ;
list_for_each_entry ( priv , & xen_9pfs_devs , list ) {
if ( priv - > client = = client ) {
priv - > client = NULL ;
read_unlock ( & xen_9pfs_lock ) ;
return ;
}
}
read_unlock ( & xen_9pfs_lock ) ;
}
static bool p9_xen_write_todo ( struct xen_9pfs_dataring * ring , RING_IDX size )
{
RING_IDX cons , prod ;
cons = ring - > intf - > out_cons ;
prod = ring - > intf - > out_prod ;
virt_mb ( ) ;
2020-05-21 22:32:42 +03:00
return XEN_9PFS_RING_SIZE ( ring ) -
xen_9pfs_queued ( prod , cons , XEN_9PFS_RING_SIZE ( ring ) ) > = size ;
2017-04-05 22:03:57 +03:00
}
static int p9_xen_request ( struct p9_client * client , struct p9_req_t * p9_req )
{
2021-07-27 03:07:10 +03:00
struct xen_9pfs_front_priv * priv ;
2017-04-05 22:03:59 +03:00
RING_IDX cons , prod , masked_cons , masked_prod ;
unsigned long flags ;
2018-07-30 08:55:19 +03:00
u32 size = p9_req - > tc . size ;
2017-04-05 22:03:59 +03:00
struct xen_9pfs_dataring * ring ;
int num ;
read_lock ( & xen_9pfs_lock ) ;
list_for_each_entry ( priv , & xen_9pfs_devs , list ) {
if ( priv - > client = = client )
break ;
}
read_unlock ( & xen_9pfs_lock ) ;
2021-07-27 03:07:10 +03:00
if ( list_entry_is_head ( priv , & xen_9pfs_devs , list ) )
2017-04-05 22:03:59 +03:00
return - EINVAL ;
2018-07-30 08:55:19 +03:00
num = p9_req - > tc . tag % priv - > num_rings ;
2017-04-05 22:03:59 +03:00
ring = & priv - > rings [ num ] ;
again :
2017-09-06 17:59:08 +03:00
while ( wait_event_killable ( ring - > wq ,
p9_xen_write_todo ( ring , size ) ) ! = 0 )
2017-04-05 22:03:59 +03:00
;
spin_lock_irqsave ( & ring - > lock , flags ) ;
cons = ring - > intf - > out_cons ;
prod = ring - > intf - > out_prod ;
virt_mb ( ) ;
2020-05-21 22:32:42 +03:00
if ( XEN_9PFS_RING_SIZE ( ring ) -
xen_9pfs_queued ( prod , cons , XEN_9PFS_RING_SIZE ( ring ) ) < size ) {
2017-04-05 22:03:59 +03:00
spin_unlock_irqrestore ( & ring - > lock , flags ) ;
goto again ;
}
2020-05-21 22:32:42 +03:00
masked_prod = xen_9pfs_mask ( prod , XEN_9PFS_RING_SIZE ( ring ) ) ;
masked_cons = xen_9pfs_mask ( cons , XEN_9PFS_RING_SIZE ( ring ) ) ;
2017-04-05 22:03:59 +03:00
2018-07-30 08:55:19 +03:00
xen_9pfs_write_packet ( ring - > data . out , p9_req - > tc . sdata , size ,
2020-05-21 22:32:42 +03:00
& masked_prod , masked_cons ,
XEN_9PFS_RING_SIZE ( ring ) ) ;
2017-04-05 22:03:59 +03:00
2022-12-05 15:39:01 +03:00
WRITE_ONCE ( p9_req - > status , REQ_STATUS_SENT ) ;
2017-04-05 22:03:59 +03:00
virt_wmb ( ) ; /* write ring before updating pointer */
prod + = size ;
ring - > intf - > out_prod = prod ;
spin_unlock_irqrestore ( & ring - > lock , flags ) ;
notify_remote_via_irq ( ring - > irq ) ;
2022-07-04 04:08:18 +03:00
p9_req_put ( client , p9_req ) ;
2017-04-05 22:03:59 +03:00
2017-04-05 22:03:57 +03:00
return 0 ;
}
2017-04-05 22:03:58 +03:00
static void p9_xen_response ( struct work_struct * work )
{
2017-04-05 22:04:00 +03:00
struct xen_9pfs_front_priv * priv ;
struct xen_9pfs_dataring * ring ;
RING_IDX cons , prod , masked_cons , masked_prod ;
struct xen_9pfs_header h ;
struct p9_req_t * req ;
int status ;
ring = container_of ( work , struct xen_9pfs_dataring , work ) ;
priv = ring - > priv ;
while ( 1 ) {
cons = ring - > intf - > in_cons ;
prod = ring - > intf - > in_prod ;
virt_rmb ( ) ;
2020-05-21 22:32:42 +03:00
if ( xen_9pfs_queued ( prod , cons , XEN_9PFS_RING_SIZE ( ring ) ) <
2017-04-05 22:04:00 +03:00
sizeof ( h ) ) {
notify_remote_via_irq ( ring - > irq ) ;
return ;
}
2020-05-21 22:32:42 +03:00
masked_prod = xen_9pfs_mask ( prod , XEN_9PFS_RING_SIZE ( ring ) ) ;
masked_cons = xen_9pfs_mask ( cons , XEN_9PFS_RING_SIZE ( ring ) ) ;
2017-04-05 22:04:00 +03:00
/* First, read just the header */
xen_9pfs_read_packet ( & h , ring - > data . in , sizeof ( h ) ,
masked_prod , & masked_cons ,
2020-05-21 22:32:42 +03:00
XEN_9PFS_RING_SIZE ( ring ) ) ;
2017-04-05 22:04:00 +03:00
req = p9_tag_lookup ( priv - > client , h . tag ) ;
if ( ! req | | req - > status ! = REQ_STATUS_SENT ) {
dev_warn ( & priv - > dev - > dev , " Wrong req tag=%x \n " , h . tag ) ;
cons + = h . size ;
virt_mb ( ) ;
ring - > intf - > in_cons = cons ;
continue ;
}
2022-11-18 16:44:41 +03:00
if ( h . size > req - > rc . capacity ) {
dev_warn ( & priv - > dev - > dev ,
" requested packet size too big: %d for tag %d with capacity %zd \n " ,
h . size , h . tag , req - > rc . capacity ) ;
2022-12-05 15:39:01 +03:00
WRITE_ONCE ( req - > status , REQ_STATUS_ERROR ) ;
2022-11-18 16:44:41 +03:00
goto recv_error ;
}
2022-11-22 03:06:56 +03:00
req - > rc . size = h . size ;
req - > rc . id = h . id ;
req - > rc . tag = h . tag ;
2018-07-30 08:55:19 +03:00
req - > rc . offset = 0 ;
2017-04-05 22:04:00 +03:00
2020-05-21 22:32:42 +03:00
masked_cons = xen_9pfs_mask ( cons , XEN_9PFS_RING_SIZE ( ring ) ) ;
2017-04-05 22:04:00 +03:00
/* Then, read the whole packet (including the header) */
2018-07-30 08:55:19 +03:00
xen_9pfs_read_packet ( req - > rc . sdata , ring - > data . in , h . size ,
2017-04-05 22:04:00 +03:00
masked_prod , & masked_cons ,
2020-05-21 22:32:42 +03:00
XEN_9PFS_RING_SIZE ( ring ) ) ;
2017-04-05 22:04:00 +03:00
2022-11-18 16:44:41 +03:00
recv_error :
2017-04-05 22:04:00 +03:00
virt_mb ( ) ;
cons + = h . size ;
ring - > intf - > in_cons = cons ;
status = ( req - > status ! = REQ_STATUS_ERROR ) ?
REQ_STATUS_RCVD : REQ_STATUS_ERROR ;
p9_client_cb ( priv - > client , req , status ) ;
}
2017-04-05 22:03:58 +03:00
}
static irqreturn_t xen_9pfs_front_event_handler ( int irq , void * r )
{
struct xen_9pfs_dataring * ring = r ;
if ( ! ring | | ! ring - > priv - > client ) {
/* ignore spurious interrupt */
return IRQ_HANDLED ;
}
wake_up_interruptible ( & ring - > wq ) ;
schedule_work ( & ring - > work ) ;
return IRQ_HANDLED ;
}
2017-04-05 22:03:57 +03:00
static struct p9_trans_module p9_xen_trans = {
. name = " xen " ,
2020-05-21 22:32:42 +03:00
. maxsize = 1 < < ( XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT - 2 ) ,
2022-07-16 00:33:09 +03:00
. pooled_rbuffers = false ,
2017-04-05 22:03:57 +03:00
. def = 1 ,
. create = p9_xen_create ,
. close = p9_xen_close ,
. request = p9_xen_request ,
. cancel = p9_xen_cancel ,
. owner = THIS_MODULE ,
} ;
static const struct xenbus_device_id xen_9pfs_front_ids [ ] = {
{ " 9pfs " } ,
{ " " }
} ;
2017-04-05 22:03:58 +03:00
static void xen_9pfs_front_free ( struct xen_9pfs_front_priv * priv )
{
int i , j ;
write_lock ( & xen_9pfs_lock ) ;
list_del ( & priv - > list ) ;
write_unlock ( & xen_9pfs_lock ) ;
for ( i = 0 ; i < priv - > num_rings ; i + + ) {
if ( ! priv - > rings [ i ] . intf )
break ;
if ( priv - > rings [ i ] . irq > 0 )
unbind_from_irqhandler ( priv - > rings [ i ] . irq , priv - > dev ) ;
if ( priv - > rings [ i ] . data . in ) {
2020-05-21 22:32:42 +03:00
for ( j = 0 ;
j < ( 1 < < priv - > rings [ i ] . intf - > ring_order ) ;
j + + ) {
2017-04-05 22:03:58 +03:00
grant_ref_t ref ;
ref = priv - > rings [ i ] . intf - > ref [ j ] ;
2022-05-24 14:46:30 +03:00
gnttab_end_foreign_access ( ref , NULL ) ;
2017-04-05 22:03:58 +03:00
}
2022-03-07 11:48:55 +03:00
free_pages_exact ( priv - > rings [ i ] . data . in ,
1UL < < ( priv - > rings [ i ] . intf - > ring_order +
XEN_PAGE_SHIFT ) ) ;
2017-04-05 22:03:58 +03:00
}
2022-05-24 14:46:30 +03:00
gnttab_end_foreign_access ( priv - > rings [ i ] . ref , NULL ) ;
2017-04-05 22:03:58 +03:00
free_page ( ( unsigned long ) priv - > rings [ i ] . intf ) ;
}
kfree ( priv - > rings ) ;
kfree ( priv - > tag ) ;
kfree ( priv ) ;
}
2017-04-05 22:03:57 +03:00
static int xen_9pfs_front_remove ( struct xenbus_device * dev )
{
2017-04-05 22:03:58 +03:00
struct xen_9pfs_front_priv * priv = dev_get_drvdata ( & dev - > dev ) ;
dev_set_drvdata ( & dev - > dev , NULL ) ;
xen_9pfs_front_free ( priv ) ;
2017-04-05 22:03:57 +03:00
return 0 ;
}
2017-04-05 22:03:58 +03:00
static int xen_9pfs_front_alloc_dataring ( struct xenbus_device * dev ,
2020-05-21 22:32:42 +03:00
struct xen_9pfs_dataring * ring ,
unsigned int order )
2017-04-05 22:03:58 +03:00
{
int i = 0 ;
int ret = - ENOMEM ;
void * bytes = NULL ;
init_waitqueue_head ( & ring - > wq ) ;
spin_lock_init ( & ring - > lock ) ;
INIT_WORK ( & ring - > work , p9_xen_response ) ;
ring - > intf = ( struct xen_9pfs_data_intf * ) get_zeroed_page ( GFP_KERNEL ) ;
if ( ! ring - > intf )
return ret ;
ret = gnttab_grant_foreign_access ( dev - > otherend_id ,
virt_to_gfn ( ring - > intf ) , 0 ) ;
if ( ret < 0 )
goto out ;
ring - > ref = ret ;
2022-03-07 11:48:55 +03:00
bytes = alloc_pages_exact ( 1UL < < ( order + XEN_PAGE_SHIFT ) ,
GFP_KERNEL | __GFP_ZERO ) ;
2017-04-05 22:03:58 +03:00
if ( ! bytes ) {
ret = - ENOMEM ;
goto out ;
}
2020-05-21 22:32:42 +03:00
for ( ; i < ( 1 < < order ) ; i + + ) {
2017-04-05 22:03:58 +03:00
ret = gnttab_grant_foreign_access (
dev - > otherend_id , virt_to_gfn ( bytes ) + i , 0 ) ;
if ( ret < 0 )
goto out ;
ring - > intf - > ref [ i ] = ret ;
}
2020-05-21 22:32:42 +03:00
ring - > intf - > ring_order = order ;
2017-04-05 22:03:58 +03:00
ring - > data . in = bytes ;
2020-05-21 22:32:42 +03:00
ring - > data . out = bytes + XEN_FLEX_RING_SIZE ( order ) ;
2017-04-05 22:03:58 +03:00
ret = xenbus_alloc_evtchn ( dev , & ring - > evtchn ) ;
if ( ret )
goto out ;
ring - > irq = bind_evtchn_to_irqhandler ( ring - > evtchn ,
xen_9pfs_front_event_handler ,
0 , " xen_9pfs-frontend " , ring ) ;
if ( ring - > irq > = 0 )
return 0 ;
xenbus_free_evtchn ( dev , ring - > evtchn ) ;
ret = ring - > irq ;
out :
if ( bytes ) {
for ( i - - ; i > = 0 ; i - - )
2022-05-24 14:46:30 +03:00
gnttab_end_foreign_access ( ring - > intf - > ref [ i ] , NULL ) ;
2022-03-07 11:48:55 +03:00
free_pages_exact ( bytes , 1UL < < ( order + XEN_PAGE_SHIFT ) ) ;
2017-04-05 22:03:58 +03:00
}
2022-05-24 14:46:30 +03:00
gnttab_end_foreign_access ( ring - > ref , NULL ) ;
2017-04-05 22:03:58 +03:00
free_page ( ( unsigned long ) ring - > intf ) ;
return ret ;
}
2017-04-05 22:03:57 +03:00
static int xen_9pfs_front_probe ( struct xenbus_device * dev ,
const struct xenbus_device_id * id )
{
2017-04-05 22:03:58 +03:00
int ret , i ;
struct xenbus_transaction xbt ;
struct xen_9pfs_front_priv * priv = NULL ;
char * versions ;
2017-04-13 20:57:56 +03:00
unsigned int max_rings , max_ring_order , len = 0 ;
2017-04-05 22:03:58 +03:00
versions = xenbus_read ( XBT_NIL , dev - > otherend , " versions " , & len ) ;
2018-08-14 05:43:48 +03:00
if ( IS_ERR ( versions ) )
return PTR_ERR ( versions ) ;
2017-04-05 22:03:58 +03:00
if ( strcmp ( versions , " 1 " ) ) {
kfree ( versions ) ;
return - EINVAL ;
}
kfree ( versions ) ;
max_rings = xenbus_read_unsigned ( dev - > otherend , " max-rings " , 0 ) ;
if ( max_rings < XEN_9PFS_NUM_RINGS )
return - EINVAL ;
max_ring_order = xenbus_read_unsigned ( dev - > otherend ,
" max-ring-page-order " , 0 ) ;
2020-05-21 22:32:42 +03:00
if ( max_ring_order > XEN_9PFS_RING_ORDER )
max_ring_order = XEN_9PFS_RING_ORDER ;
if ( p9_xen_trans . maxsize > XEN_FLEX_RING_SIZE ( max_ring_order ) )
p9_xen_trans . maxsize = XEN_FLEX_RING_SIZE ( max_ring_order ) / 2 ;
2017-04-05 22:03:58 +03:00
priv = kzalloc ( sizeof ( * priv ) , GFP_KERNEL ) ;
if ( ! priv )
return - ENOMEM ;
priv - > dev = dev ;
priv - > num_rings = XEN_9PFS_NUM_RINGS ;
priv - > rings = kcalloc ( priv - > num_rings , sizeof ( * priv - > rings ) ,
GFP_KERNEL ) ;
if ( ! priv - > rings ) {
kfree ( priv ) ;
return - ENOMEM ;
}
for ( i = 0 ; i < priv - > num_rings ; i + + ) {
priv - > rings [ i ] . priv = priv ;
2020-05-21 22:32:42 +03:00
ret = xen_9pfs_front_alloc_dataring ( dev , & priv - > rings [ i ] ,
max_ring_order ) ;
2017-04-05 22:03:58 +03:00
if ( ret < 0 )
goto error ;
}
again :
ret = xenbus_transaction_start ( & xbt ) ;
if ( ret ) {
xenbus_dev_fatal ( dev , ret , " starting transaction " ) ;
goto error ;
}
ret = xenbus_printf ( xbt , dev - > nodename , " version " , " %u " , 1 ) ;
if ( ret )
goto error_xenbus ;
ret = xenbus_printf ( xbt , dev - > nodename , " num-rings " , " %u " ,
priv - > num_rings ) ;
if ( ret )
goto error_xenbus ;
for ( i = 0 ; i < priv - > num_rings ; i + + ) {
char str [ 16 ] ;
BUILD_BUG_ON ( XEN_9PFS_NUM_RINGS > 9 ) ;
2020-10-09 11:05:52 +03:00
sprintf ( str , " ring-ref%d " , i ) ;
2017-04-05 22:03:58 +03:00
ret = xenbus_printf ( xbt , dev - > nodename , str , " %d " ,
priv - > rings [ i ] . ref ) ;
if ( ret )
goto error_xenbus ;
2020-10-09 11:05:52 +03:00
sprintf ( str , " event-channel-%d " , i ) ;
2017-04-05 22:03:58 +03:00
ret = xenbus_printf ( xbt , dev - > nodename , str , " %u " ,
priv - > rings [ i ] . evtchn ) ;
if ( ret )
goto error_xenbus ;
}
priv - > tag = xenbus_read ( xbt , dev - > nodename , " tag " , NULL ) ;
2017-05-16 17:22:47 +03:00
if ( IS_ERR ( priv - > tag ) ) {
ret = PTR_ERR ( priv - > tag ) ;
2017-04-05 22:03:58 +03:00
goto error_xenbus ;
}
ret = xenbus_transaction_end ( xbt , 0 ) ;
if ( ret ) {
if ( ret = = - EAGAIN )
goto again ;
xenbus_dev_fatal ( dev , ret , " completing transaction " ) ;
goto error ;
}
write_lock ( & xen_9pfs_lock ) ;
list_add_tail ( & priv - > list , & xen_9pfs_devs ) ;
write_unlock ( & xen_9pfs_lock ) ;
dev_set_drvdata ( & dev - > dev , priv ) ;
xenbus_switch_state ( dev , XenbusStateInitialised ) ;
2017-04-05 22:03:57 +03:00
return 0 ;
2017-04-05 22:03:58 +03:00
error_xenbus :
xenbus_transaction_end ( xbt , 1 ) ;
xenbus_dev_fatal ( dev , ret , " writing xenstore " ) ;
error :
dev_set_drvdata ( & dev - > dev , NULL ) ;
xen_9pfs_front_free ( priv ) ;
return ret ;
2017-04-05 22:03:57 +03:00
}
static int xen_9pfs_front_resume ( struct xenbus_device * dev )
{
2018-05-09 12:48:33 +03:00
dev_warn ( & dev - > dev , " suspend/resume unsupported \n " ) ;
2017-04-05 22:03:57 +03:00
return 0 ;
}
static void xen_9pfs_front_changed ( struct xenbus_device * dev ,
enum xenbus_state backend_state )
{
2017-04-05 22:03:58 +03:00
switch ( backend_state ) {
case XenbusStateReconfiguring :
case XenbusStateReconfigured :
case XenbusStateInitialising :
case XenbusStateInitialised :
case XenbusStateUnknown :
break ;
case XenbusStateInitWait :
break ;
case XenbusStateConnected :
xenbus_switch_state ( dev , XenbusStateConnected ) ;
break ;
case XenbusStateClosed :
if ( dev - > state = = XenbusStateClosed )
break ;
2020-08-24 01:36:59 +03:00
fallthrough ; /* Missed the backend's CLOSING state */
2017-04-05 22:03:58 +03:00
case XenbusStateClosing :
xenbus_frontend_closed ( dev ) ;
break ;
}
2017-04-05 22:03:57 +03:00
}
static struct xenbus_driver xen_9pfs_front_driver = {
. ids = xen_9pfs_front_ids ,
. probe = xen_9pfs_front_probe ,
. remove = xen_9pfs_front_remove ,
. resume = xen_9pfs_front_resume ,
. otherend_changed = xen_9pfs_front_changed ,
} ;
2022-09-09 13:35:46 +03:00
static int __init p9_trans_xen_init ( void )
2017-04-05 22:03:57 +03:00
{
2019-04-30 17:39:33 +03:00
int rc ;
2017-04-05 22:03:57 +03:00
if ( ! xen_domain ( ) )
return - ENODEV ;
pr_info ( " Initialising Xen transport for 9pfs \n " ) ;
v9fs_register_trans ( & p9_xen_trans ) ;
2019-04-30 17:39:33 +03:00
rc = xenbus_register_frontend ( & xen_9pfs_front_driver ) ;
if ( rc )
v9fs_unregister_trans ( & p9_xen_trans ) ;
return rc ;
2017-04-05 22:03:57 +03:00
}
module_init ( p9_trans_xen_init ) ;
2021-10-17 16:46:11 +03:00
MODULE_ALIAS_9P ( " xen " ) ;
2017-04-05 22:03:57 +03:00
2022-09-09 13:35:46 +03:00
static void __exit p9_trans_xen_exit ( void )
2017-04-05 22:03:57 +03:00
{
v9fs_unregister_trans ( & p9_xen_trans ) ;
return xenbus_unregister_driver ( & xen_9pfs_front_driver ) ;
}
module_exit ( p9_trans_xen_exit ) ;
2018-01-08 19:23:18 +03:00
2021-11-03 22:38:22 +03:00
MODULE_ALIAS ( " xen:9pfs " ) ;
2018-01-08 19:23:18 +03:00
MODULE_AUTHOR ( " Stefano Stabellini <stefano@aporeto.com> " ) ;
MODULE_DESCRIPTION ( " Xen Transport for 9P " ) ;
MODULE_LICENSE ( " GPL " ) ;