2019-06-04 11:11:15 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2010-11-29 20:09:01 +03:00
/* Copyright (C) 2009 Red Hat, Inc.
* Author : Michael S . Tsirkin < mst @ redhat . com >
*
* test virtio server in host kernel .
*/
# include <linux/compat.h>
# include <linux/eventfd.h>
# include <linux/vhost.h>
# include <linux/miscdevice.h>
# include <linux/module.h>
# include <linux/mutex.h>
# include <linux/workqueue.h>
# include <linux/file.h>
# include <linux/slab.h>
# include "test.h"
2013-05-06 12:38:21 +04:00
# include "vhost.h"
2010-11-29 20:09:01 +03:00
/* Max number of bytes transferred before requeueing the job.
* Using this limit prevents one virtqueue from starving others . */
# define VHOST_TEST_WEIGHT 0x80000
2019-08-28 08:37:00 +03:00
/* Max number of packets transferred before requeueing the job.
* Using this limit prevents one virtqueue from starving others with
* pkts .
*/
# define VHOST_TEST_PKT_WEIGHT 256
2010-11-29 20:09:01 +03:00
enum {
VHOST_TEST_VQ = 0 ,
VHOST_TEST_VQ_MAX = 1 ,
} ;
struct vhost_test {
struct vhost_dev dev ;
struct vhost_virtqueue vqs [ VHOST_TEST_VQ_MAX ] ;
} ;
/* Expects to be always run from workqueue - which acts as
* read - size critical section for our kind of RCU . */
static void handle_vq ( struct vhost_test * n )
{
2013-07-07 18:12:36 +04:00
struct vhost_virtqueue * vq = & n - > vqs [ VHOST_TEST_VQ ] ;
2010-11-29 20:09:01 +03:00
unsigned out , in ;
int head ;
size_t len , total_len = 0 ;
void * private ;
2013-07-07 18:12:36 +04:00
mutex_lock ( & vq - > mutex ) ;
2020-03-31 22:27:57 +03:00
private = vhost_vq_get_backend ( vq ) ;
2013-07-07 18:12:36 +04:00
if ( ! private ) {
mutex_unlock ( & vq - > mutex ) ;
2010-11-29 20:09:01 +03:00
return ;
2013-07-07 18:12:36 +04:00
}
2010-11-29 20:09:01 +03:00
2011-05-20 03:10:54 +04:00
vhost_disable_notify ( & n - > dev , vq ) ;
2010-11-29 20:09:01 +03:00
for ( ; ; ) {
2014-06-05 16:20:27 +04:00
head = vhost_get_vq_desc ( vq , vq - > iov ,
2010-11-29 20:09:01 +03:00
ARRAY_SIZE ( vq - > iov ) ,
& out , & in ,
NULL , NULL ) ;
/* On error, stop handling until the next kick. */
if ( unlikely ( head < 0 ) )
break ;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if ( head = = vq - > num ) {
2011-05-20 03:10:54 +04:00
if ( unlikely ( vhost_enable_notify ( & n - > dev , vq ) ) ) {
vhost_disable_notify ( & n - > dev , vq ) ;
2010-11-29 20:09:01 +03:00
continue ;
}
break ;
}
if ( in ) {
vq_err ( vq , " Unexpected descriptor format for TX: "
" out %d, int %d \n " , out , in ) ;
break ;
}
len = iov_length ( vq - > iov , out ) ;
/* Sanity check */
if ( ! len ) {
vq_err ( vq , " Unexpected 0 len for TX \n " ) ;
break ;
}
vhost_add_used_and_signal ( & n - > dev , vq , head , 0 ) ;
total_len + = len ;
2019-08-28 08:37:00 +03:00
if ( unlikely ( vhost_exceeds_weight ( vq , 0 , total_len ) ) )
2010-11-29 20:09:01 +03:00
break ;
}
mutex_unlock ( & vq - > mutex ) ;
}
static void handle_vq_kick ( struct vhost_work * work )
{
struct vhost_virtqueue * vq = container_of ( work , struct vhost_virtqueue ,
poll . work ) ;
struct vhost_test * n = container_of ( vq - > dev , struct vhost_test , dev ) ;
handle_vq ( n ) ;
}
static int vhost_test_open ( struct inode * inode , struct file * f )
{
struct vhost_test * n = kmalloc ( sizeof * n , GFP_KERNEL ) ;
struct vhost_dev * dev ;
2013-07-07 18:12:36 +04:00
struct vhost_virtqueue * * vqs ;
2010-11-29 20:09:01 +03:00
if ( ! n )
return - ENOMEM ;
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 23:55:00 +03:00
vqs = kmalloc_array ( VHOST_TEST_VQ_MAX , sizeof ( * vqs ) , GFP_KERNEL ) ;
2013-07-07 18:12:36 +04:00
if ( ! vqs ) {
kfree ( n ) ;
return - ENOMEM ;
}
2010-11-29 20:09:01 +03:00
dev = & n - > dev ;
2013-07-07 18:12:36 +04:00
vqs [ VHOST_TEST_VQ ] = & n - > vqs [ VHOST_TEST_VQ ] ;
2010-11-29 20:09:01 +03:00
n - > vqs [ VHOST_TEST_VQ ] . handle_kick = handle_vq_kick ;
2019-08-28 08:37:00 +03:00
vhost_dev_init ( dev , vqs , VHOST_TEST_VQ_MAX , UIO_MAXIOV ,
2020-06-08 15:42:09 +03:00
VHOST_TEST_PKT_WEIGHT , VHOST_TEST_WEIGHT , true , NULL ) ;
2010-11-29 20:09:01 +03:00
f - > private_data = n ;
return 0 ;
}
static void * vhost_test_stop_vq ( struct vhost_test * n ,
struct vhost_virtqueue * vq )
{
void * private ;
mutex_lock ( & vq - > mutex ) ;
2020-03-31 22:27:57 +03:00
private = vhost_vq_get_backend ( vq ) ;
vhost_vq_set_backend ( vq , NULL ) ;
2010-11-29 20:09:01 +03:00
mutex_unlock ( & vq - > mutex ) ;
return private ;
}
static void vhost_test_stop ( struct vhost_test * n , void * * privatep )
{
* privatep = vhost_test_stop_vq ( n , n - > vqs + VHOST_TEST_VQ ) ;
}
static void vhost_test_flush ( struct vhost_test * n )
{
2022-05-17 21:08:50 +03:00
vhost_dev_flush ( & n - > dev ) ;
2010-11-29 20:09:01 +03:00
}
static int vhost_test_release ( struct inode * inode , struct file * f )
{
struct vhost_test * n = f - > private_data ;
void * private ;
vhost_test_stop ( n , & private ) ;
vhost_test_flush ( n ) ;
2019-10-07 20:56:59 +03:00
vhost_dev_stop ( & n - > dev ) ;
2017-12-24 19:08:58 +03:00
vhost_dev_cleanup ( & n - > dev ) ;
2021-12-28 06:09:24 +03:00
kfree ( n - > dev . vqs ) ;
2010-11-29 20:09:01 +03:00
kfree ( n ) ;
return 0 ;
}
static long vhost_test_run ( struct vhost_test * n , int test )
{
void * priv , * oldpriv ;
struct vhost_virtqueue * vq ;
int r , index ;
if ( test < 0 | | test > 1 )
return - EINVAL ;
mutex_lock ( & n - > dev . mutex ) ;
r = vhost_dev_check_owner ( & n - > dev ) ;
if ( r )
goto err ;
for ( index = 0 ; index < n - > dev . nvqs ; + + index ) {
/* Verify that ring has been setup correctly. */
if ( ! vhost_vq_access_ok ( & n - > vqs [ index ] ) ) {
r = - EFAULT ;
goto err ;
}
}
for ( index = 0 ; index < n - > dev . nvqs ; + + index ) {
vq = n - > vqs + index ;
mutex_lock ( & vq - > mutex ) ;
priv = test ? n : NULL ;
/* start polling new socket */
2020-03-31 22:27:57 +03:00
oldpriv = vhost_vq_get_backend ( vq ) ;
vhost_vq_set_backend ( vq , priv ) ;
2010-11-29 20:09:01 +03:00
2016-02-16 17:59:44 +03:00
r = vhost_vq_init_access ( & n - > vqs [ index ] ) ;
2011-06-21 14:04:27 +04:00
2010-11-29 20:09:01 +03:00
mutex_unlock ( & vq - > mutex ) ;
2011-06-21 14:04:27 +04:00
if ( r )
goto err ;
2010-11-29 20:09:01 +03:00
if ( oldpriv ) {
2022-05-17 21:08:46 +03:00
vhost_test_flush ( n ) ;
2010-11-29 20:09:01 +03:00
}
}
mutex_unlock ( & n - > dev . mutex ) ;
return 0 ;
err :
mutex_unlock ( & n - > dev . mutex ) ;
return r ;
}
static long vhost_test_reset_owner ( struct vhost_test * n )
{
void * priv = NULL ;
long err ;
2020-04-01 19:46:22 +03:00
struct vhost_iotlb * umem ;
2013-04-28 18:12:08 +04:00
2010-11-29 20:09:01 +03:00
mutex_lock ( & n - > dev . mutex ) ;
err = vhost_dev_check_owner ( & n - > dev ) ;
if ( err )
goto done ;
2016-08-15 04:28:12 +03:00
umem = vhost_dev_reset_owner_prepare ( ) ;
if ( ! umem ) {
2013-04-28 18:12:08 +04:00
err = - ENOMEM ;
goto done ;
}
2010-11-29 20:09:01 +03:00
vhost_test_stop ( n , & priv ) ;
vhost_test_flush ( n ) ;
2019-10-07 20:56:59 +03:00
vhost_dev_stop ( & n - > dev ) ;
2016-08-15 04:28:12 +03:00
vhost_dev_reset_owner ( & n - > dev , umem ) ;
2010-11-29 20:09:01 +03:00
done :
mutex_unlock ( & n - > dev . mutex ) ;
return err ;
}
static int vhost_test_set_features ( struct vhost_test * n , u64 features )
{
2014-06-05 16:20:23 +04:00
struct vhost_virtqueue * vq ;
2010-11-29 20:09:01 +03:00
mutex_lock ( & n - > dev . mutex ) ;
if ( ( features & ( 1 < < VHOST_F_LOG_ALL ) ) & &
! vhost_log_access_ok ( & n - > dev ) ) {
mutex_unlock ( & n - > dev . mutex ) ;
return - EFAULT ;
}
2014-06-05 16:20:23 +04:00
vq = & n - > vqs [ VHOST_TEST_VQ ] ;
mutex_lock ( & vq - > mutex ) ;
vq - > acked_features = features ;
mutex_unlock ( & vq - > mutex ) ;
2010-11-29 20:09:01 +03:00
mutex_unlock ( & n - > dev . mutex ) ;
return 0 ;
}
2020-04-18 13:22:13 +03:00
static long vhost_test_set_backend ( struct vhost_test * n , unsigned index , int fd )
{
static void * backend ;
const bool enable = fd ! = - 1 ;
struct vhost_virtqueue * vq ;
int r ;
mutex_lock ( & n - > dev . mutex ) ;
r = vhost_dev_check_owner ( & n - > dev ) ;
if ( r )
goto err ;
if ( index > = VHOST_TEST_VQ_MAX ) {
r = - ENOBUFS ;
goto err ;
}
vq = & n - > vqs [ index ] ;
mutex_lock ( & vq - > mutex ) ;
/* Verify that ring has been setup correctly. */
if ( ! vhost_vq_access_ok ( vq ) ) {
r = - EFAULT ;
goto err_vq ;
}
if ( ! enable ) {
vhost_poll_stop ( & vq - > poll ) ;
backend = vhost_vq_get_backend ( vq ) ;
vhost_vq_set_backend ( vq , NULL ) ;
} else {
vhost_vq_set_backend ( vq , backend ) ;
r = vhost_vq_init_access ( vq ) ;
if ( r = = 0 )
r = vhost_poll_start ( & vq - > poll , vq - > kick ) ;
}
mutex_unlock ( & vq - > mutex ) ;
if ( enable ) {
2022-05-17 21:08:46 +03:00
vhost_test_flush ( n ) ;
2020-04-18 13:22:13 +03:00
}
mutex_unlock ( & n - > dev . mutex ) ;
return 0 ;
err_vq :
mutex_unlock ( & vq - > mutex ) ;
err :
mutex_unlock ( & n - > dev . mutex ) ;
return r ;
}
2010-11-29 20:09:01 +03:00
static long vhost_test_ioctl ( struct file * f , unsigned int ioctl ,
unsigned long arg )
{
2020-04-18 13:22:13 +03:00
struct vhost_vring_file backend ;
2010-11-29 20:09:01 +03:00
struct vhost_test * n = f - > private_data ;
void __user * argp = ( void __user * ) arg ;
u64 __user * featurep = argp ;
int test ;
u64 features ;
int r ;
switch ( ioctl ) {
case VHOST_TEST_RUN :
if ( copy_from_user ( & test , argp , sizeof test ) )
return - EFAULT ;
return vhost_test_run ( n , test ) ;
2020-04-18 13:22:13 +03:00
case VHOST_TEST_SET_BACKEND :
if ( copy_from_user ( & backend , argp , sizeof backend ) )
return - EFAULT ;
return vhost_test_set_backend ( n , backend . index , backend . fd ) ;
2010-11-29 20:09:01 +03:00
case VHOST_GET_FEATURES :
2013-07-07 18:12:36 +04:00
features = VHOST_FEATURES ;
2010-11-29 20:09:01 +03:00
if ( copy_to_user ( featurep , & features , sizeof features ) )
return - EFAULT ;
return 0 ;
case VHOST_SET_FEATURES :
if ( copy_from_user ( & features , featurep , sizeof features ) )
return - EFAULT ;
2013-07-07 18:12:36 +04:00
if ( features & ~ VHOST_FEATURES )
2010-11-29 20:09:01 +03:00
return - EOPNOTSUPP ;
return vhost_test_set_features ( n , features ) ;
case VHOST_RESET_OWNER :
return vhost_test_reset_owner ( n ) ;
default :
mutex_lock ( & n - > dev . mutex ) ;
2013-03-18 06:52:18 +04:00
r = vhost_dev_ioctl ( & n - > dev , ioctl , argp ) ;
if ( r = = - ENOIOCTLCMD )
r = vhost_vring_ioctl ( & n - > dev , ioctl , argp ) ;
2010-11-29 20:09:01 +03:00
vhost_test_flush ( n ) ;
mutex_unlock ( & n - > dev . mutex ) ;
return r ;
}
}
static const struct file_operations vhost_test_fops = {
. owner = THIS_MODULE ,
. release = vhost_test_release ,
. unlocked_ioctl = vhost_test_ioctl ,
2018-09-11 18:23:00 +03:00
. compat_ioctl = compat_ptr_ioctl ,
2010-11-29 20:09:01 +03:00
. open = vhost_test_open ,
. llseek = noop_llseek ,
} ;
static struct miscdevice vhost_test_misc = {
MISC_DYNAMIC_MINOR ,
" vhost-test " ,
& vhost_test_fops ,
} ;
2016-08-25 20:00:49 +03:00
module_misc_device ( vhost_test_misc ) ;
2010-11-29 20:09:01 +03:00
MODULE_VERSION ( " 0.0.1 " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_AUTHOR ( " Michael S. Tsirkin " ) ;
MODULE_DESCRIPTION ( " Host kernel side for virtio simulator " ) ;