2019-05-29 17:17:54 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-04-12 04:07:51 +03:00
/*
* Kernel / userspace transport abstraction for Hyper - V util driver .
*
* Copyright ( C ) 2015 , Vitaly Kuznetsov < vkuznets @ redhat . com >
*/
# include <linux/slab.h>
# include <linux/fs.h>
# include <linux/poll.h>
# include "hyperv_vmbus.h"
# include "hv_utils_transport.h"
static DEFINE_SPINLOCK ( hvt_list_lock ) ;
static struct list_head hvt_list = LIST_HEAD_INIT ( hvt_list ) ;
static void hvt_reset ( struct hvutil_transport * hvt )
{
kfree ( hvt - > outmsg ) ;
hvt - > outmsg = NULL ;
hvt - > outmsg_len = 0 ;
if ( hvt - > on_reset )
hvt - > on_reset ( ) ;
}
static ssize_t hvt_op_read ( struct file * file , char __user * buf ,
size_t count , loff_t * ppos )
{
struct hvutil_transport * hvt ;
int ret ;
hvt = container_of ( file - > f_op , struct hvutil_transport , fops ) ;
2015-12-15 06:01:55 +03:00
if ( wait_event_interruptible ( hvt - > outmsg_q , hvt - > outmsg_len > 0 | |
hvt - > mode ! = HVUTIL_TRANSPORT_CHARDEV ) )
2015-04-12 04:07:51 +03:00
return - EINTR ;
2015-12-15 06:01:54 +03:00
mutex_lock ( & hvt - > lock ) ;
2015-12-15 06:01:55 +03:00
if ( hvt - > mode = = HVUTIL_TRANSPORT_DESTROY ) {
ret = - EBADF ;
goto out_unlock ;
}
2015-04-12 04:07:51 +03:00
if ( ! hvt - > outmsg ) {
ret = - EAGAIN ;
goto out_unlock ;
}
if ( count < hvt - > outmsg_len ) {
ret = - EINVAL ;
goto out_unlock ;
}
if ( ! copy_to_user ( buf , hvt - > outmsg , hvt - > outmsg_len ) )
ret = hvt - > outmsg_len ;
else
ret = - EFAULT ;
kfree ( hvt - > outmsg ) ;
hvt - > outmsg = NULL ;
hvt - > outmsg_len = 0 ;
2016-06-10 03:08:57 +03:00
if ( hvt - > on_read )
hvt - > on_read ( ) ;
hvt - > on_read = NULL ;
2015-04-12 04:07:51 +03:00
out_unlock :
2015-12-15 06:01:54 +03:00
mutex_unlock ( & hvt - > lock ) ;
2015-04-12 04:07:51 +03:00
return ret ;
}
static ssize_t hvt_op_write ( struct file * file , const char __user * buf ,
size_t count , loff_t * ppos )
{
struct hvutil_transport * hvt ;
u8 * inmsg ;
2015-12-15 06:01:53 +03:00
int ret ;
2015-04-12 04:07:51 +03:00
hvt = container_of ( file - > f_op , struct hvutil_transport , fops ) ;
2015-12-15 03:01:37 +03:00
inmsg = memdup_user ( buf , count ) ;
if ( IS_ERR ( inmsg ) )
return PTR_ERR ( inmsg ) ;
2015-12-15 06:01:55 +03:00
if ( hvt - > mode = = HVUTIL_TRANSPORT_DESTROY )
ret = - EBADF ;
else
ret = hvt - > on_msg ( inmsg , count ) ;
2015-12-15 06:01:53 +03:00
2015-04-12 04:07:51 +03:00
kfree ( inmsg ) ;
2015-12-15 06:01:53 +03:00
return ret ? ret : count ;
2015-04-12 04:07:51 +03:00
}
2017-07-03 13:39:46 +03:00
static __poll_t hvt_op_poll ( struct file * file , poll_table * wait )
2015-04-12 04:07:51 +03:00
{
struct hvutil_transport * hvt ;
hvt = container_of ( file - > f_op , struct hvutil_transport , fops ) ;
poll_wait ( file , & hvt - > outmsg_q , wait ) ;
2015-12-15 06:01:55 +03:00
if ( hvt - > mode = = HVUTIL_TRANSPORT_DESTROY )
2018-02-12 01:34:03 +03:00
return EPOLLERR | EPOLLHUP ;
2015-12-15 06:01:55 +03:00
2015-04-12 04:07:51 +03:00
if ( hvt - > outmsg_len > 0 )
2018-02-12 01:34:03 +03:00
return EPOLLIN | EPOLLRDNORM ;
2015-04-12 04:07:51 +03:00
return 0 ;
}
static int hvt_op_open ( struct inode * inode , struct file * file )
{
struct hvutil_transport * hvt ;
2015-12-15 06:01:55 +03:00
int ret = 0 ;
bool issue_reset = false ;
2015-04-12 04:07:51 +03:00
hvt = container_of ( file - > f_op , struct hvutil_transport , fops ) ;
2015-12-15 06:01:55 +03:00
mutex_lock ( & hvt - > lock ) ;
if ( hvt - > mode = = HVUTIL_TRANSPORT_DESTROY ) {
ret = - EBADF ;
} else if ( hvt - > mode = = HVUTIL_TRANSPORT_INIT ) {
/*
* Switching to CHARDEV mode . We switch bach to INIT when
* device gets released .
*/
2015-04-12 04:07:51 +03:00
hvt - > mode = HVUTIL_TRANSPORT_CHARDEV ;
2015-12-15 06:01:55 +03:00
}
2015-04-12 04:07:51 +03:00
else if ( hvt - > mode = = HVUTIL_TRANSPORT_NETLINK ) {
/*
* We ' re switching from netlink communication to using char
* device . Issue the reset first .
*/
2015-12-15 06:01:55 +03:00
issue_reset = true ;
2015-04-12 04:07:51 +03:00
hvt - > mode = HVUTIL_TRANSPORT_CHARDEV ;
2015-12-15 06:01:55 +03:00
} else {
ret = - EBUSY ;
}
2015-04-12 04:07:51 +03:00
2015-12-15 06:01:55 +03:00
if ( issue_reset )
hvt_reset ( hvt ) ;
mutex_unlock ( & hvt - > lock ) ;
return ret ;
2015-04-12 04:07:51 +03:00
}
2015-12-15 06:01:56 +03:00
static void hvt_transport_free ( struct hvutil_transport * hvt )
{
misc_deregister ( & hvt - > mdev ) ;
kfree ( hvt - > outmsg ) ;
kfree ( hvt ) ;
}
2015-04-12 04:07:51 +03:00
static int hvt_op_release ( struct inode * inode , struct file * file )
{
struct hvutil_transport * hvt ;
2015-12-15 06:01:56 +03:00
int mode_old ;
2015-04-12 04:07:51 +03:00
hvt = container_of ( file - > f_op , struct hvutil_transport , fops ) ;
2015-12-15 06:01:55 +03:00
mutex_lock ( & hvt - > lock ) ;
2015-12-15 06:01:56 +03:00
mode_old = hvt - > mode ;
2015-12-15 06:01:55 +03:00
if ( hvt - > mode ! = HVUTIL_TRANSPORT_DESTROY )
hvt - > mode = HVUTIL_TRANSPORT_INIT ;
2015-04-12 04:07:51 +03:00
/*
* Cleanup message buffers to avoid spurious messages when the daemon
* connects back .
*/
hvt_reset ( hvt ) ;
2015-12-15 06:01:56 +03:00
if ( mode_old = = HVUTIL_TRANSPORT_DESTROY )
2017-03-05 04:13:59 +03:00
complete ( & hvt - > release ) ;
mutex_unlock ( & hvt - > lock ) ;
2015-12-15 06:01:56 +03:00
2015-04-12 04:07:51 +03:00
return 0 ;
}
static void hvt_cn_callback ( struct cn_msg * msg , struct netlink_skb_parms * nsp )
{
struct hvutil_transport * hvt , * hvt_found = NULL ;
spin_lock ( & hvt_list_lock ) ;
list_for_each_entry ( hvt , & hvt_list , list ) {
if ( hvt - > cn_id . idx = = msg - > id . idx & &
hvt - > cn_id . val = = msg - > id . val ) {
hvt_found = hvt ;
break ;
}
}
spin_unlock ( & hvt_list_lock ) ;
if ( ! hvt_found ) {
pr_warn ( " hvt_cn_callback: spurious message received! \n " ) ;
return ;
}
/*
* Switching to NETLINK mode . Switching to CHARDEV happens when someone
* opens the device .
*/
2015-12-15 06:01:55 +03:00
mutex_lock ( & hvt - > lock ) ;
2015-04-12 04:07:51 +03:00
if ( hvt - > mode = = HVUTIL_TRANSPORT_INIT )
hvt - > mode = HVUTIL_TRANSPORT_NETLINK ;
if ( hvt - > mode = = HVUTIL_TRANSPORT_NETLINK )
hvt_found - > on_msg ( msg - > data , msg - > len ) ;
else
pr_warn ( " hvt_cn_callback: unexpected netlink message! \n " ) ;
2015-12-15 06:01:55 +03:00
mutex_unlock ( & hvt - > lock ) ;
2015-04-12 04:07:51 +03:00
}
2016-06-10 03:08:57 +03:00
int hvutil_transport_send ( struct hvutil_transport * hvt , void * msg , int len ,
void ( * on_read_cb ) ( void ) )
2015-04-12 04:07:51 +03:00
{
struct cn_msg * cn_msg ;
int ret = 0 ;
2015-12-15 06:01:55 +03:00
if ( hvt - > mode = = HVUTIL_TRANSPORT_INIT | |
hvt - > mode = = HVUTIL_TRANSPORT_DESTROY ) {
2015-04-12 04:07:51 +03:00
return - EINVAL ;
} else if ( hvt - > mode = = HVUTIL_TRANSPORT_NETLINK ) {
cn_msg = kzalloc ( sizeof ( * cn_msg ) + len , GFP_ATOMIC ) ;
2015-08-02 02:08:17 +03:00
if ( ! cn_msg )
2015-04-12 04:07:51 +03:00
return - ENOMEM ;
cn_msg - > id . idx = hvt - > cn_id . idx ;
cn_msg - > id . val = hvt - > cn_id . val ;
cn_msg - > len = len ;
memcpy ( cn_msg - > data , msg , len ) ;
ret = cn_netlink_send ( cn_msg , 0 , 0 , GFP_ATOMIC ) ;
kfree ( cn_msg ) ;
2016-06-10 03:08:57 +03:00
/*
* We don ' t know when netlink messages are delivered but unlike
* in CHARDEV mode we ' re not blocked and we can send next
* messages right away .
*/
if ( on_read_cb )
on_read_cb ( ) ;
2015-04-12 04:07:51 +03:00
return ret ;
}
/* HVUTIL_TRANSPORT_CHARDEV */
2015-12-15 06:01:54 +03:00
mutex_lock ( & hvt - > lock ) ;
2015-12-15 06:01:55 +03:00
if ( hvt - > mode ! = HVUTIL_TRANSPORT_CHARDEV ) {
ret = - EINVAL ;
goto out_unlock ;
}
2015-04-12 04:07:51 +03:00
if ( hvt - > outmsg ) {
/* Previous message wasn't received */
ret = - EFAULT ;
goto out_unlock ;
}
hvt - > outmsg = kzalloc ( len , GFP_KERNEL ) ;
2015-12-15 03:01:36 +03:00
if ( hvt - > outmsg ) {
memcpy ( hvt - > outmsg , msg , len ) ;
hvt - > outmsg_len = len ;
2016-06-10 03:08:57 +03:00
hvt - > on_read = on_read_cb ;
2015-12-15 03:01:36 +03:00
wake_up_interruptible ( & hvt - > outmsg_q ) ;
} else
ret = - ENOMEM ;
2015-04-12 04:07:51 +03:00
out_unlock :
2015-12-15 06:01:54 +03:00
mutex_unlock ( & hvt - > lock ) ;
2015-04-12 04:07:51 +03:00
return ret ;
}
struct hvutil_transport * hvutil_transport_init ( const char * name ,
u32 cn_idx , u32 cn_val ,
int ( * on_msg ) ( void * , int ) ,
void ( * on_reset ) ( void ) )
{
struct hvutil_transport * hvt ;
hvt = kzalloc ( sizeof ( * hvt ) , GFP_KERNEL ) ;
if ( ! hvt )
return NULL ;
hvt - > cn_id . idx = cn_idx ;
hvt - > cn_id . val = cn_val ;
hvt - > mdev . minor = MISC_DYNAMIC_MINOR ;
hvt - > mdev . name = name ;
hvt - > fops . owner = THIS_MODULE ;
hvt - > fops . read = hvt_op_read ;
hvt - > fops . write = hvt_op_write ;
hvt - > fops . poll = hvt_op_poll ;
hvt - > fops . open = hvt_op_open ;
hvt - > fops . release = hvt_op_release ;
hvt - > mdev . fops = & hvt - > fops ;
init_waitqueue_head ( & hvt - > outmsg_q ) ;
2015-12-15 06:01:54 +03:00
mutex_init ( & hvt - > lock ) ;
2017-03-05 04:13:59 +03:00
init_completion ( & hvt - > release ) ;
2015-04-12 04:07:51 +03:00
spin_lock ( & hvt_list_lock ) ;
list_add ( & hvt - > list , & hvt_list ) ;
spin_unlock ( & hvt_list_lock ) ;
hvt - > on_msg = on_msg ;
hvt - > on_reset = on_reset ;
if ( misc_register ( & hvt - > mdev ) )
goto err_free_hvt ;
/* Use cn_id.idx/cn_id.val to determine if we need to setup netlink */
if ( hvt - > cn_id . idx > 0 & & hvt - > cn_id . val > 0 & &
cn_add_callback ( & hvt - > cn_id , name , hvt_cn_callback ) )
goto err_free_hvt ;
return hvt ;
err_free_hvt :
2016-02-27 02:13:20 +03:00
spin_lock ( & hvt_list_lock ) ;
list_del ( & hvt - > list ) ;
spin_unlock ( & hvt_list_lock ) ;
2015-04-12 04:07:51 +03:00
kfree ( hvt ) ;
return NULL ;
}
void hvutil_transport_destroy ( struct hvutil_transport * hvt )
{
2015-12-15 06:01:56 +03:00
int mode_old ;
2015-12-15 06:01:55 +03:00
mutex_lock ( & hvt - > lock ) ;
2015-12-15 06:01:56 +03:00
mode_old = hvt - > mode ;
2015-12-15 06:01:55 +03:00
hvt - > mode = HVUTIL_TRANSPORT_DESTROY ;
wake_up_interruptible ( & hvt - > outmsg_q ) ;
mutex_unlock ( & hvt - > lock ) ;
2015-12-15 06:01:56 +03:00
/*
* In case we were in ' chardev ' mode we still have an open fd so we
* have to defer freeing the device . Netlink interface can be freed
* now .
*/
2015-04-12 04:07:51 +03:00
spin_lock ( & hvt_list_lock ) ;
list_del ( & hvt - > list ) ;
spin_unlock ( & hvt_list_lock ) ;
if ( hvt - > cn_id . idx > 0 & & hvt - > cn_id . val > 0 )
cn_del_callback ( & hvt - > cn_id ) ;
2015-12-15 06:01:56 +03:00
2017-03-05 04:13:59 +03:00
if ( mode_old = = HVUTIL_TRANSPORT_CHARDEV )
wait_for_completion ( & hvt - > release ) ;
hvt_transport_free ( hvt ) ;
2015-04-12 04:07:51 +03:00
}