2019-05-30 02:57:35 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2010-05-05 02:55:05 +04:00
/*
* Copyright ( c ) 2010 , Microsoft Corporation .
*
* Authors :
* Haiyang Zhang < haiyangz @ microsoft . com >
* Hank Janssen < hjanssen @ microsoft . com >
*/
2011-03-30 00:58:49 +04:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2010-05-05 02:55:05 +04:00
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/module.h>
# include <linux/slab.h>
# include <linux/sysctl.h>
2010-05-04 19:26:23 +04:00
# include <linux/reboot.h>
2011-10-04 23:29:52 +04:00
# include <linux/hyperv.h>
2017-02-04 19:57:14 +03:00
# include <linux/clockchips.h>
# include <linux/ptp_clock_kernel.h>
2017-01-19 21:51:52 +03:00
# include <asm/mshyperv.h>
2010-05-05 02:55:05 +04:00
2014-02-16 23:34:30 +04:00
# include "hyperv_vmbus.h"
2013-07-02 21:31:30 +04:00
2013-09-06 22:49:56 +04:00
# define SD_MAJOR 3
# define SD_MINOR 0
2020-01-26 08:49:42 +03:00
# define SD_MINOR_1 1
2020-01-26 08:49:43 +03:00
# define SD_MINOR_2 2
2020-01-26 08:49:42 +03:00
# define SD_VERSION_3_1 (SD_MAJOR << 16 | SD_MINOR_1)
2020-01-26 08:49:43 +03:00
# define SD_VERSION_3_2 (SD_MAJOR << 16 | SD_MINOR_2)
2013-09-06 22:49:56 +04:00
# define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
2013-07-02 21:31:30 +04:00
2016-09-08 15:24:12 +03:00
# define SD_MAJOR_1 1
# define SD_VERSION_1 (SD_MAJOR_1 << 16 | SD_MINOR)
2013-09-06 22:49:56 +04:00
2016-09-08 15:24:14 +03:00
# define TS_MAJOR 4
2013-09-06 22:49:56 +04:00
# define TS_MINOR 0
# define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
2016-09-08 15:24:12 +03:00
# define TS_MAJOR_1 1
# define TS_VERSION_1 (TS_MAJOR_1 << 16 | TS_MINOR)
2013-09-06 22:49:56 +04:00
2016-09-08 15:24:14 +03:00
# define TS_MAJOR_3 3
# define TS_VERSION_3 (TS_MAJOR_3 << 16 | TS_MINOR)
2013-09-06 22:49:56 +04:00
# define HB_MAJOR 3
2016-09-08 15:24:12 +03:00
# define HB_MINOR 0
2013-09-06 22:49:56 +04:00
# define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
2016-09-08 15:24:12 +03:00
# define HB_MAJOR_1 1
# define HB_VERSION_1 (HB_MAJOR_1 << 16 | HB_MINOR)
2013-09-06 22:49:56 +04:00
static int sd_srv_version ;
static int ts_srv_version ;
static int hb_srv_version ;
2017-01-28 22:37:17 +03:00
2020-01-26 08:49:43 +03:00
# define SD_VER_COUNT 4
2017-01-28 22:37:17 +03:00
static const int sd_versions [ ] = {
2020-01-26 08:49:43 +03:00
SD_VERSION_3_2 ,
2020-01-26 08:49:42 +03:00
SD_VERSION_3_1 ,
2017-01-28 22:37:17 +03:00
SD_VERSION ,
SD_VERSION_1
} ;
# define TS_VER_COUNT 3
static const int ts_versions [ ] = {
TS_VERSION ,
TS_VERSION_3 ,
TS_VERSION_1
} ;
# define HB_VER_COUNT 2
static const int hb_versions [ ] = {
HB_VERSION ,
HB_VERSION_1
} ;
# define FW_VER_COUNT 2
static const int fw_versions [ ] = {
UTIL_FW_VERSION ,
UTIL_WS2K8_FW_VERSION
} ;
2013-07-02 21:31:30 +04:00
2020-01-26 08:49:43 +03:00
/*
* Send the " hibernate " udev event in a thread context .
*/
struct hibernate_work_context {
struct work_struct work ;
struct hv_device * dev ;
} ;
static struct hibernate_work_context hibernate_context ;
static bool hibernation_supported ;
static void send_hibernate_uevent ( struct work_struct * work )
{
char * uevent_env [ 2 ] = { " EVENT=hibernate " , NULL } ;
struct hibernate_work_context * ctx ;
ctx = container_of ( work , struct hibernate_work_context , work ) ;
kobject_uevent_env ( & ctx - > dev - > device . kobj , KOBJ_CHANGE , uevent_env ) ;
pr_info ( " Sent hibernation uevent \n " ) ;
}
static int hv_shutdown_init ( struct hv_util_service * srv )
{
struct vmbus_channel * channel = srv - > channel ;
INIT_WORK ( & hibernate_context . work , send_hibernate_uevent ) ;
hibernate_context . dev = channel - > device_obj ;
hibernation_supported = hv_is_hibernation_supported ( ) ;
return 0 ;
}
2011-09-18 21:31:33 +04:00
static void shutdown_onchannelcallback ( void * context ) ;
static struct hv_util_service util_shutdown = {
. util_cb = shutdown_onchannelcallback ,
2020-01-26 08:49:43 +03:00
. util_init = hv_shutdown_init ,
2011-09-18 21:31:33 +04:00
} ;
2016-09-09 10:42:30 +03:00
static int hv_timesync_init ( struct hv_util_service * srv ) ;
2020-01-26 08:49:44 +03:00
static int hv_timesync_pre_suspend ( void ) ;
2016-09-09 10:42:30 +03:00
static void hv_timesync_deinit ( void ) ;
2011-09-18 21:31:33 +04:00
static void timesync_onchannelcallback ( void * context ) ;
static struct hv_util_service util_timesynch = {
. util_cb = timesync_onchannelcallback ,
2016-09-09 10:42:30 +03:00
. util_init = hv_timesync_init ,
2020-01-26 08:49:44 +03:00
. util_pre_suspend = hv_timesync_pre_suspend ,
2016-09-09 10:42:30 +03:00
. util_deinit = hv_timesync_deinit ,
2011-09-18 21:31:33 +04:00
} ;
static void heartbeat_onchannelcallback ( void * context ) ;
static struct hv_util_service util_heartbeat = {
. util_cb = heartbeat_onchannelcallback ,
} ;
static struct hv_util_service util_kvp = {
. util_cb = hv_kvp_onchannelcallback ,
. util_init = hv_kvp_init ,
2020-01-26 08:49:44 +03:00
. util_pre_suspend = hv_kvp_pre_suspend ,
. util_pre_resume = hv_kvp_pre_resume ,
2011-09-18 21:31:33 +04:00
. util_deinit = hv_kvp_deinit ,
} ;
2010-05-05 02:55:05 +04:00
2013-03-15 23:30:06 +04:00
static struct hv_util_service util_vss = {
. util_cb = hv_vss_onchannelcallback ,
. util_init = hv_vss_init ,
2020-01-26 08:49:44 +03:00
. util_pre_suspend = hv_vss_pre_suspend ,
. util_pre_resume = hv_vss_pre_resume ,
2013-03-15 23:30:06 +04:00
. util_deinit = hv_vss_deinit ,
} ;
2014-02-16 23:34:30 +04:00
static struct hv_util_service util_fcopy = {
. util_cb = hv_fcopy_onchannelcallback ,
. util_init = hv_fcopy_init ,
2020-01-26 08:49:44 +03:00
. util_pre_suspend = hv_fcopy_pre_suspend ,
. util_pre_resume = hv_fcopy_pre_resume ,
2014-02-16 23:34:30 +04:00
. util_deinit = hv_fcopy_deinit ,
} ;
2013-01-24 05:42:45 +04:00
static void perform_shutdown ( struct work_struct * dummy )
{
orderly_poweroff ( true ) ;
}
2020-01-26 08:49:42 +03:00
static void perform_restart ( struct work_struct * dummy )
{
orderly_reboot ( ) ;
}
2013-01-24 05:42:45 +04:00
/*
* Perform the shutdown operation in a thread context .
*/
static DECLARE_WORK ( shutdown_work , perform_shutdown ) ;
2020-01-26 08:49:42 +03:00
/*
* Perform the restart operation in a thread context .
*/
static DECLARE_WORK ( restart_work , perform_restart ) ;
2010-05-05 01:31:18 +04:00
static void shutdown_onchannelcallback ( void * context )
2010-05-05 02:55:05 +04:00
{
struct vmbus_channel * channel = context ;
2020-01-26 08:49:42 +03:00
struct work_struct * work = NULL ;
2010-12-14 03:23:36 +03:00
u32 recvlen ;
2010-05-05 02:55:05 +04:00
u64 requestid ;
2011-09-18 21:31:33 +04:00
u8 * shut_txf_buf = util_shutdown . recv_buffer ;
2010-05-05 02:55:05 +04:00
struct shutdown_msg_data * shutdown_msg ;
struct icmsg_hdr * icmsghdrp ;
2020-11-09 13:07:04 +03:00
if ( vmbus_recvpacket ( channel , shut_txf_buf , HV_HYP_PAGE_SIZE , & recvlen , & requestid ) ) {
pr_err_ratelimited ( " Shutdown request received. Could not read into shut txf buf \n " ) ;
return ;
}
2010-05-05 02:55:05 +04:00
2020-11-09 13:07:04 +03:00
if ( ! recvlen )
return ;
2010-05-05 02:55:05 +04:00
2020-11-09 13:07:04 +03:00
/* Ensure recvlen is big enough to read header data */
if ( recvlen < ICMSG_HDR ) {
pr_err_ratelimited ( " Shutdown request received. Packet length too small: %d \n " ,
recvlen ) ;
return ;
}
2010-05-05 02:55:05 +04:00
2020-11-09 13:07:04 +03:00
icmsghdrp = ( struct icmsg_hdr * ) & shut_txf_buf [ sizeof ( struct vmbuspipe_hdr ) ] ;
if ( icmsghdrp - > icmsgtype = = ICMSGTYPE_NEGOTIATE ) {
if ( vmbus_prep_negotiate_resp ( icmsghdrp ,
shut_txf_buf , recvlen ,
fw_versions , FW_VER_COUNT ,
sd_versions , SD_VER_COUNT ,
NULL , & sd_srv_version ) ) {
pr_info ( " Shutdown IC version %d.%d \n " ,
sd_srv_version > > 16 ,
sd_srv_version & 0xFFFF ) ;
}
} else if ( icmsghdrp - > icmsgtype = = ICMSGTYPE_SHUTDOWN ) {
/* Ensure recvlen is big enough to contain shutdown_msg_data struct */
if ( recvlen < ICMSG_HDR + sizeof ( struct shutdown_msg_data ) ) {
pr_err_ratelimited ( " Invalid shutdown msg data. Packet length too small: %u \n " ,
recvlen ) ;
return ;
2010-05-05 02:55:05 +04:00
}
2020-11-09 13:07:04 +03:00
shutdown_msg = ( struct shutdown_msg_data * ) & shut_txf_buf [ ICMSG_HDR ] ;
/*
* shutdown_msg - > flags can be 0 ( shut down ) , 2 ( reboot ) ,
* or 4 ( hibernate ) . It may bitwise - OR 1 , which means
* performing the request by force . Linux always tries
* to perform the request by force .
*/
switch ( shutdown_msg - > flags ) {
case 0 :
case 1 :
icmsghdrp - > status = HV_S_OK ;
work = & shutdown_work ;
pr_info ( " Shutdown request received - graceful shutdown initiated \n " ) ;
break ;
case 2 :
case 3 :
icmsghdrp - > status = HV_S_OK ;
work = & restart_work ;
pr_info ( " Restart request received - graceful restart initiated \n " ) ;
break ;
case 4 :
case 5 :
pr_info ( " Hibernation request received \n " ) ;
icmsghdrp - > status = hibernation_supported ?
HV_S_OK : HV_E_FAIL ;
if ( hibernation_supported )
work = & hibernate_context . work ;
break ;
default :
icmsghdrp - > status = HV_E_FAIL ;
pr_info ( " Shutdown request received - Invalid request \n " ) ;
break ;
}
} else {
icmsghdrp - > status = HV_E_FAIL ;
pr_err_ratelimited ( " Shutdown request received. Invalid msg type: %d \n " ,
icmsghdrp - > icmsgtype ) ;
2010-05-05 02:55:05 +04:00
}
2020-11-09 13:07:04 +03:00
icmsghdrp - > icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE ;
vmbus_sendpacket ( channel , shut_txf_buf ,
recvlen , requestid ,
VM_PKT_DATA_INBAND , 0 ) ;
2020-01-26 08:49:42 +03:00
if ( work )
schedule_work ( work ) ;
2010-05-05 02:55:05 +04:00
}
2011-08-27 22:31:31 +04:00
/*
* Set the host time in a process context .
*/
2017-05-18 20:46:04 +03:00
static struct work_struct adj_time_work ;
2011-08-27 22:31:31 +04:00
2017-05-18 20:46:04 +03:00
/*
* The last time sample , received from the host . PTP device responds to
* requests by using this data and the current partition - wide time reference
* count .
*/
static struct {
u64 host_time ;
u64 ref_time ;
spinlock_t lock ;
} host_ts ;
2011-08-27 22:31:31 +04:00
2020-08-21 18:25:23 +03:00
static inline u64 reftime_to_ns ( u64 reftime )
2011-08-27 22:31:31 +04:00
{
2020-08-21 18:25:23 +03:00
return ( reftime - WLTIMEDELTA ) * 100 ;
}
/*
* Hard coded threshold for host timesync delay : 600 seconds
*/
static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * ( u64 ) NSEC_PER_SEC ;
static int hv_get_adj_host_time ( struct timespec64 * ts )
{
u64 newtime , reftime , timediff_adj ;
2017-05-18 20:46:04 +03:00
unsigned long flags ;
2020-08-21 18:25:23 +03:00
int ret = 0 ;
2016-09-08 15:24:14 +03:00
2017-05-18 20:46:04 +03:00
spin_lock_irqsave ( & host_ts . lock , flags ) ;
2020-01-09 19:06:49 +03:00
reftime = hv_read_reference_counter ( ) ;
2020-08-21 18:25:23 +03:00
/*
* We need to let the caller know that last update from host
* is older than the max allowable threshold . clock_gettime ( )
* and PTP ioctl do not have a documented error that we could
* return for this specific case . Use ESTALE to report this .
*/
timediff_adj = reftime - host_ts . ref_time ;
if ( timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH ) {
pr_warn_once ( " TIMESYNC IC: Stale time stamp, %llu nsecs old \n " ,
( timediff_adj * 100 ) ) ;
ret = - ESTALE ;
}
newtime = host_ts . host_time + timediff_adj ;
* ts = ns_to_timespec64 ( reftime_to_ns ( newtime ) ) ;
2017-05-18 20:46:04 +03:00
spin_unlock_irqrestore ( & host_ts . lock , flags ) ;
2016-09-08 15:24:14 +03:00
2020-08-21 18:25:23 +03:00
return ret ;
2017-05-18 20:46:04 +03:00
}
static void hv_set_host_time ( struct work_struct * work )
{
2020-08-21 18:25:23 +03:00
struct timespec64 ts ;
if ( ! hv_get_adj_host_time ( & ts ) )
do_settimeofday64 ( & ts ) ;
2011-08-27 22:31:31 +04:00
}
2010-05-11 19:11:24 +04:00
/*
* Synchronize time with host after reboot , restore , etc .
*
* ICTIMESYNCFLAG_SYNC flag bit indicates reboot , restore events of the VM .
* After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
* message after the timesync channel is opened . Since the hv_utils module is
2016-09-08 15:24:13 +03:00
* loaded after hv_vmbus , the first message is usually missed . This bit is
* considered a hard request to discipline the clock .
*
* ICTIMESYNCFLAG_SAMPLE bit indicates a time sample from host . This is
* typically used as a hint to the guest . The guest is under no obligation
* to discipline the clock .
2010-05-11 19:11:24 +04:00
*/
2017-02-04 19:57:14 +03:00
static inline void adj_guesttime ( u64 hosttime , u64 reftime , u8 adj_flags )
2010-05-11 19:11:24 +04:00
{
2017-02-04 19:57:14 +03:00
unsigned long flags ;
u64 cur_reftime ;
2010-05-11 19:11:24 +04:00
2016-09-09 10:42:30 +03:00
/*
2017-05-18 20:46:04 +03:00
* Save the adjusted time sample from the host and the snapshot
* of the current system time .
2016-09-09 10:42:30 +03:00
*/
2017-05-18 20:46:04 +03:00
spin_lock_irqsave ( & host_ts . lock , flags ) ;
2020-01-09 19:06:49 +03:00
cur_reftime = hv_read_reference_counter ( ) ;
2017-05-18 20:46:04 +03:00
host_ts . host_time = hosttime ;
host_ts . ref_time = cur_reftime ;
/*
* TimeSync v4 messages contain reference time ( guest ' s Hyper - V
* clocksource read when the time sample was generated ) , we can
* improve the precision by adding the delta between now and the
* time of generation . For older protocols we set
* reftime = = cur_reftime on call .
*/
host_ts . host_time + = ( cur_reftime - reftime ) ;
spin_unlock_irqrestore ( & host_ts . lock , flags ) ;
/* Schedule work to do do_settimeofday64() */
if ( adj_flags & ICTIMESYNCFLAG_SYNC )
schedule_work ( & adj_time_work ) ;
2010-05-05 23:23:46 +04:00
}
/*
* Time Sync Channel message handler .
*/
static void timesync_onchannelcallback ( void * context )
{
struct vmbus_channel * channel = context ;
2010-12-14 03:23:36 +03:00
u32 recvlen ;
2010-05-05 23:23:46 +04:00
u64 requestid ;
struct icmsg_hdr * icmsghdrp ;
struct ictimesync_data * timedatap ;
2016-09-08 15:24:14 +03:00
struct ictimesync_ref_data * refdata ;
2011-09-18 21:31:33 +04:00
u8 * time_txf_buf = util_timesynch . recv_buffer ;
2010-05-05 23:23:46 +04:00
2020-08-21 18:28:49 +03:00
/*
* Drain the ring buffer and use the last packet to update
* host_ts
*/
while ( 1 ) {
int ret = vmbus_recvpacket ( channel , time_txf_buf ,
HV_HYP_PAGE_SIZE , & recvlen ,
& requestid ) ;
if ( ret ) {
2020-11-09 13:07:04 +03:00
pr_err_ratelimited ( " TimeSync IC pkt recv failed (Err: %d) \n " ,
ret ) ;
2020-08-21 18:28:49 +03:00
break ;
}
if ( ! recvlen )
break ;
2010-05-05 23:23:46 +04:00
2020-11-09 13:07:04 +03:00
/* Ensure recvlen is big enough to read header data */
if ( recvlen < ICMSG_HDR ) {
pr_err_ratelimited ( " Timesync request received. Packet length too small: %d \n " ,
recvlen ) ;
break ;
}
2010-12-14 03:23:36 +03:00
icmsghdrp = ( struct icmsg_hdr * ) & time_txf_buf [
2010-05-05 23:23:46 +04:00
sizeof ( struct vmbuspipe_hdr ) ] ;
if ( icmsghdrp - > icmsgtype = = ICMSGTYPE_NEGOTIATE ) {
2020-11-09 13:07:04 +03:00
if ( vmbus_prep_negotiate_resp ( icmsghdrp ,
time_txf_buf , recvlen ,
2017-01-28 22:37:17 +03:00
fw_versions , FW_VER_COUNT ,
ts_versions , TS_VER_COUNT ,
NULL , & ts_srv_version ) ) {
2017-01-28 22:37:18 +03:00
pr_info ( " TimeSync IC version %d.%d \n " ,
2017-01-28 22:37:17 +03:00
ts_srv_version > > 16 ,
ts_srv_version & 0xFFFF ) ;
}
2020-11-09 13:07:04 +03:00
} else if ( icmsghdrp - > icmsgtype = = ICMSGTYPE_TIMESYNC ) {
2016-09-08 15:24:14 +03:00
if ( ts_srv_version > TS_VERSION_3 ) {
2020-11-09 13:07:04 +03:00
/* Ensure recvlen is big enough to read ictimesync_ref_data */
if ( recvlen < ICMSG_HDR + sizeof ( struct ictimesync_ref_data ) ) {
pr_err_ratelimited ( " Invalid ictimesync ref data. Length too small: %u \n " ,
recvlen ) ;
break ;
}
refdata = ( struct ictimesync_ref_data * ) & time_txf_buf [ ICMSG_HDR ] ;
2016-09-08 15:24:14 +03:00
adj_guesttime ( refdata - > parenttime ,
refdata - > vmreferencetime ,
refdata - > flags ) ;
} else {
2020-11-09 13:07:04 +03:00
/* Ensure recvlen is big enough to read ictimesync_data */
if ( recvlen < ICMSG_HDR + sizeof ( struct ictimesync_data ) ) {
pr_err_ratelimited ( " Invalid ictimesync data. Length too small: %u \n " ,
recvlen ) ;
break ;
}
timedatap = ( struct ictimesync_data * ) & time_txf_buf [ ICMSG_HDR ] ;
2016-09-08 15:24:14 +03:00
adj_guesttime ( timedatap - > parenttime ,
2020-01-09 19:06:49 +03:00
hv_read_reference_counter ( ) ,
2017-05-18 20:46:04 +03:00
timedatap - > flags ) ;
2016-09-08 15:24:14 +03:00
}
2020-11-09 13:07:04 +03:00
} else {
icmsghdrp - > status = HV_E_FAIL ;
pr_err_ratelimited ( " Timesync request received. Invalid msg type: %d \n " ,
icmsghdrp - > icmsgtype ) ;
2010-05-05 23:23:46 +04:00
}
icmsghdrp - > icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE ;
2010-12-14 03:23:36 +03:00
vmbus_sendpacket ( channel , time_txf_buf ,
2020-11-09 13:07:04 +03:00
recvlen , requestid ,
VM_PKT_DATA_INBAND , 0 ) ;
2010-05-05 23:23:46 +04:00
}
}
2010-05-16 01:39:58 +04:00
/*
* Heartbeat functionality .
* Every two seconds , Hyper - V send us a heartbeat request message .
* we respond to this message , and Hyper - V knows we are alive .
*/
static void heartbeat_onchannelcallback ( void * context )
{
struct vmbus_channel * channel = context ;
2010-12-14 03:23:36 +03:00
u32 recvlen ;
2010-05-16 01:39:58 +04:00
u64 requestid ;
struct icmsg_hdr * icmsghdrp ;
struct heartbeat_msg_data * heartbeat_msg ;
2011-09-18 21:31:33 +04:00
u8 * hbeat_txf_buf = util_heartbeat . recv_buffer ;
2010-05-16 01:39:58 +04:00
2016-10-06 02:57:46 +03:00
while ( 1 ) {
2020-11-09 13:07:04 +03:00
if ( vmbus_recvpacket ( channel , hbeat_txf_buf , HV_HYP_PAGE_SIZE ,
& recvlen , & requestid ) ) {
pr_err_ratelimited ( " Heartbeat request received. Could not read into hbeat txf buf \n " ) ;
return ;
}
2016-10-06 02:57:46 +03:00
if ( ! recvlen )
break ;
2010-05-16 01:39:58 +04:00
2020-11-09 13:07:04 +03:00
/* Ensure recvlen is big enough to read header data */
if ( recvlen < ICMSG_HDR ) {
2021-01-28 02:31:36 +03:00
pr_err_ratelimited ( " Heartbeat request received. Packet length too small: %d \n " ,
2020-11-09 13:07:04 +03:00
recvlen ) ;
break ;
}
2010-12-14 03:23:36 +03:00
icmsghdrp = ( struct icmsg_hdr * ) & hbeat_txf_buf [
2010-05-16 01:39:58 +04:00
sizeof ( struct vmbuspipe_hdr ) ] ;
if ( icmsghdrp - > icmsgtype = = ICMSGTYPE_NEGOTIATE ) {
2017-01-28 22:37:17 +03:00
if ( vmbus_prep_negotiate_resp ( icmsghdrp ,
2020-11-09 13:07:04 +03:00
hbeat_txf_buf , recvlen ,
2017-01-28 22:37:17 +03:00
fw_versions , FW_VER_COUNT ,
hb_versions , HB_VER_COUNT ,
NULL , & hb_srv_version ) ) {
2017-01-28 22:37:18 +03:00
pr_info ( " Heartbeat IC version %d.%d \n " ,
2017-01-28 22:37:17 +03:00
hb_srv_version > > 16 ,
hb_srv_version & 0xFFFF ) ;
}
2020-11-09 13:07:04 +03:00
} else if ( icmsghdrp - > icmsgtype = = ICMSGTYPE_HEARTBEAT ) {
/*
* Ensure recvlen is big enough to read seq_num . Reserved area is not
* included in the check as the host may not fill it up entirely
*/
if ( recvlen < ICMSG_HDR + sizeof ( u64 ) ) {
pr_err_ratelimited ( " Invalid heartbeat msg data. Length too small: %u \n " ,
recvlen ) ;
break ;
}
heartbeat_msg = ( struct heartbeat_msg_data * ) & hbeat_txf_buf [ ICMSG_HDR ] ;
2010-05-16 01:39:58 +04:00
heartbeat_msg - > seq_num + = 1 ;
2020-11-09 13:07:04 +03:00
} else {
icmsghdrp - > status = HV_E_FAIL ;
pr_err_ratelimited ( " Heartbeat request received. Invalid msg type: %d \n " ,
icmsghdrp - > icmsgtype ) ;
2010-05-16 01:39:58 +04:00
}
icmsghdrp - > icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE ;
2010-12-14 03:23:36 +03:00
vmbus_sendpacket ( channel , hbeat_txf_buf ,
2020-11-09 13:07:04 +03:00
recvlen , requestid ,
VM_PKT_DATA_INBAND , 0 ) ;
2010-05-16 01:39:58 +04:00
}
}
2010-05-05 23:23:46 +04:00
2020-09-16 06:48:16 +03:00
# define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
# define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
2011-09-13 21:59:38 +04:00
static int util_probe ( struct hv_device * dev ,
const struct hv_vmbus_device_id * dev_id )
2011-08-25 20:48:36 +04:00
{
2011-09-18 21:31:33 +04:00
struct hv_util_service * srv =
( struct hv_util_service * ) dev_id - > driver_data ;
int ret ;
2019-07-25 08:03:14 +03:00
srv - > recv_buffer = kmalloc ( HV_HYP_PAGE_SIZE * 4 , GFP_KERNEL ) ;
2011-09-18 21:31:33 +04:00
if ( ! srv - > recv_buffer )
return - ENOMEM ;
2016-02-27 02:13:19 +03:00
srv - > channel = dev - > channel ;
2011-09-18 21:31:33 +04:00
if ( srv - > util_init ) {
ret = srv - > util_init ( srv ) ;
if ( ret ) {
2011-09-18 21:31:34 +04:00
ret = - ENODEV ;
goto error1 ;
2011-09-18 21:31:33 +04:00
}
}
2012-12-01 18:46:34 +04:00
/*
* The set of services managed by the util driver are not performance
* critical and do not need batched reading . Furthermore , some services
* such as KVP can only handle one message from the host at a time .
* Turn off batched reading for all util drivers before we open the
* channel .
*/
2017-02-12 09:02:21 +03:00
set_channel_read_mode ( dev - > channel , HV_CALL_DIRECT ) ;
2012-12-01 18:46:34 +04:00
2011-09-18 21:31:33 +04:00
hv_set_drvdata ( dev , srv ) ;
2015-02-27 22:25:58 +03:00
2020-09-16 06:48:16 +03:00
ret = vmbus_open ( dev - > channel , HV_UTIL_RING_SEND_SIZE ,
HV_UTIL_RING_RECV_SIZE , NULL , 0 , srv - > util_cb ,
2019-07-25 08:03:15 +03:00
dev - > channel ) ;
2015-02-27 22:25:58 +03:00
if ( ret )
goto error ;
2011-08-25 20:48:36 +04:00
return 0 ;
2011-09-18 21:31:34 +04:00
error :
if ( srv - > util_deinit )
srv - > util_deinit ( ) ;
error1 :
kfree ( srv - > recv_buffer ) ;
return ret ;
2011-08-25 20:48:36 +04:00
}
static int util_remove ( struct hv_device * dev )
{
2011-09-18 21:31:33 +04:00
struct hv_util_service * srv = hv_get_drvdata ( dev ) ;
if ( srv - > util_deinit )
srv - > util_deinit ( ) ;
2015-02-28 22:18:20 +03:00
vmbus_close ( dev - > channel ) ;
2011-09-18 21:31:33 +04:00
kfree ( srv - > recv_buffer ) ;
2011-08-25 20:48:36 +04:00
return 0 ;
}
2020-01-26 08:49:44 +03:00
/*
* When we ' re in util_suspend ( ) , all the userspace processes have been frozen
* ( refer to hibernate ( ) - > freeze_processes ( ) ) . The userspace is thawed only
* after the whole resume procedure , including util_resume ( ) , finishes .
*/
static int util_suspend ( struct hv_device * dev )
{
struct hv_util_service * srv = hv_get_drvdata ( dev ) ;
int ret = 0 ;
if ( srv - > util_pre_suspend ) {
ret = srv - > util_pre_suspend ( ) ;
if ( ret )
return ret ;
}
vmbus_close ( dev - > channel ) ;
return 0 ;
}
static int util_resume ( struct hv_device * dev )
{
struct hv_util_service * srv = hv_get_drvdata ( dev ) ;
int ret = 0 ;
if ( srv - > util_pre_resume ) {
ret = srv - > util_pre_resume ( ) ;
if ( ret )
return ret ;
}
2020-09-16 06:48:16 +03:00
ret = vmbus_open ( dev - > channel , HV_UTIL_RING_SEND_SIZE ,
HV_UTIL_RING_RECV_SIZE , NULL , 0 , srv - > util_cb ,
2020-01-26 08:49:44 +03:00
dev - > channel ) ;
return ret ;
}
2011-08-25 20:48:36 +04:00
static const struct hv_vmbus_device_id id_table [ ] = {
2011-08-25 22:41:33 +04:00
/* Shutdown guid */
2013-01-24 05:42:41 +04:00
{ HV_SHUTDOWN_GUID ,
. driver_data = ( unsigned long ) & util_shutdown
} ,
2011-08-25 22:41:33 +04:00
/* Time synch guid */
2013-01-24 05:42:41 +04:00
{ HV_TS_GUID ,
. driver_data = ( unsigned long ) & util_timesynch
} ,
2011-08-25 22:41:33 +04:00
/* Heartbeat guid */
2013-01-24 05:42:41 +04:00
{ HV_HEART_BEAT_GUID ,
. driver_data = ( unsigned long ) & util_heartbeat
} ,
2011-08-25 22:41:33 +04:00
/* KVP guid */
2013-01-24 05:42:41 +04:00
{ HV_KVP_GUID ,
. driver_data = ( unsigned long ) & util_kvp
} ,
2013-03-15 23:30:06 +04:00
/* VSS GUID */
{ HV_VSS_GUID ,
. driver_data = ( unsigned long ) & util_vss
} ,
2014-02-16 23:34:30 +04:00
/* File copy GUID */
{ HV_FCOPY_GUID ,
. driver_data = ( unsigned long ) & util_fcopy
} ,
2011-08-25 22:41:33 +04:00
{ } ,
2011-08-25 20:48:36 +04:00
} ;
MODULE_DEVICE_TABLE ( vmbus , id_table ) ;
/* The one and only one */
static struct hv_driver util_drv = {
2018-10-18 08:09:29 +03:00
. name = " hv_utils " ,
2011-08-25 20:48:36 +04:00
. id_table = id_table ,
. probe = util_probe ,
. remove = util_remove ,
2020-01-26 08:49:44 +03:00
. suspend = util_suspend ,
. resume = util_resume ,
2018-06-05 23:37:49 +03:00
. driver = {
. probe_type = PROBE_PREFER_ASYNCHRONOUS ,
} ,
2011-08-25 20:48:36 +04:00
} ;
2017-02-04 19:57:14 +03:00
static int hv_ptp_enable ( struct ptp_clock_info * info ,
struct ptp_clock_request * request , int on )
{
return - EOPNOTSUPP ;
}
static int hv_ptp_settime ( struct ptp_clock_info * p , const struct timespec64 * ts )
{
return - EOPNOTSUPP ;
}
static int hv_ptp_adjfreq ( struct ptp_clock_info * ptp , s32 delta )
{
return - EOPNOTSUPP ;
}
static int hv_ptp_adjtime ( struct ptp_clock_info * ptp , s64 delta )
{
return - EOPNOTSUPP ;
}
static int hv_ptp_gettime ( struct ptp_clock_info * info , struct timespec64 * ts )
{
2020-08-21 18:25:23 +03:00
return hv_get_adj_host_time ( ts ) ;
2017-02-04 19:57:14 +03:00
}
static struct ptp_clock_info ptp_hyperv_info = {
. name = " hyperv " ,
. enable = hv_ptp_enable ,
. adjtime = hv_ptp_adjtime ,
. adjfreq = hv_ptp_adjfreq ,
. gettime64 = hv_ptp_gettime ,
. settime64 = hv_ptp_settime ,
. owner = THIS_MODULE ,
} ;
static struct ptp_clock * hv_ptp_clock ;
2016-09-09 10:42:30 +03:00
static int hv_timesync_init ( struct hv_util_service * srv )
{
2017-03-05 04:14:00 +03:00
spin_lock_init ( & host_ts . lock ) ;
2017-05-18 20:46:04 +03:00
INIT_WORK ( & adj_time_work , hv_set_host_time ) ;
2017-02-04 19:57:14 +03:00
/*
* ptp_clock_register ( ) returns NULL when CONFIG_PTP_1588_CLOCK is
* disabled but the driver is still useful without the PTP device
* as it still handles the ICTIMESYNCFLAG_SYNC case .
*/
hv_ptp_clock = ptp_clock_register ( & ptp_hyperv_info , NULL ) ;
if ( IS_ERR_OR_NULL ( hv_ptp_clock ) ) {
2021-05-14 10:01:16 +03:00
pr_err ( " cannot register PTP clock: %d \n " ,
PTR_ERR_OR_ZERO ( hv_ptp_clock ) ) ;
2017-02-04 19:57:14 +03:00
hv_ptp_clock = NULL ;
}
2016-09-09 10:42:30 +03:00
return 0 ;
}
2020-01-26 08:49:44 +03:00
static void hv_timesync_cancel_work ( void )
{
cancel_work_sync ( & adj_time_work ) ;
}
static int hv_timesync_pre_suspend ( void )
{
hv_timesync_cancel_work ( ) ;
return 0 ;
}
2016-09-09 10:42:30 +03:00
static void hv_timesync_deinit ( void )
{
2017-02-04 19:57:14 +03:00
if ( hv_ptp_clock )
ptp_clock_unregister ( hv_ptp_clock ) ;
2020-01-26 08:49:44 +03:00
hv_timesync_cancel_work ( ) ;
2016-09-09 10:42:30 +03:00
}
2010-05-05 02:55:05 +04:00
static int __init init_hyperv_utils ( void )
{
2011-03-30 00:58:49 +04:00
pr_info ( " Registering HyperV Utility Driver \n " ) ;
2010-05-05 02:55:05 +04:00
2011-09-18 21:31:34 +04:00
return vmbus_driver_register ( & util_drv ) ;
2010-05-05 02:55:05 +04:00
}
static void exit_hyperv_utils ( void )
{
2011-03-30 00:58:49 +04:00
pr_info ( " De-Registered HyperV Utility Driver \n " ) ;
2010-05-05 02:55:05 +04:00
2011-08-26 02:07:32 +04:00
vmbus_driver_unregister ( & util_drv ) ;
2010-05-05 02:55:05 +04:00
}
module_init ( init_hyperv_utils ) ;
module_exit ( exit_hyperv_utils ) ;
MODULE_DESCRIPTION ( " Hyper-V Utilities " ) ;
MODULE_LICENSE ( " GPL " ) ;