2019-05-29 17:17:54 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-04-30 02:05:42 +04:00
/*
* Copyright ( c ) 2012 , Microsoft Corporation .
*
* Author :
* Haiyang Zhang < haiyangz @ microsoft . com >
*/
/*
* Hyper - V Synthetic Video Frame Buffer Driver
*
* This is the driver for the Hyper - V Synthetic Video , which supports
* screen resolution up to Full HD 1920 x1080 with 32 bit color on Windows
* Server 2012 , and 1600 x1200 with 16 bit color on Windows Server 2008 R2
* or earlier .
*
* It also solves the double mouse cursor issue of the emulated video mode .
*
* The default screen resolution is 1152 x864 , which may be changed by a
* kernel parameter :
* video = hyperv_fb : < width > x < height >
* For example : video = hyperv_fb : 1280 x1024
*
* Portrait orientation is also supported :
* For example : video = hyperv_fb : 864 x1152
2019-09-05 12:11:53 +03:00
*
* When a Windows 10 RS5 + host is used , the virtual machine screen
* resolution is obtained from the host . The " video=hyperv_fb " option is
* not needed , but still can be used to overwrite what the host specifies .
* The VM resolution on the host could be set by executing the powershell
* " set-vmvideo " command . For example
* set - vmvideo - vmname name - horizontalresolution : 1920 \
* - verticalresolution : 1200 - resolutiontype single
2019-12-09 10:57:49 +03:00
*
* Gen 1 VMs also support direct using VM ' s physical memory for framebuffer .
* It could improve the efficiency and performance for framebuffer and VM .
* This requires to allocate contiguous physical memory from Linux kernel ' s
* CMA memory allocator . To enable this , supply a kernel parameter to give
* enough memory space to CMA allocator for framebuffer . For example :
* cma = 130 m
* This gives 130 MB memory to CMA allocator that can be allocated to
* framebuffer . For reference , 8 K resolution ( 7680 x4320 ) takes about
* 127 MB memory .
2013-04-30 02:05:42 +04:00
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <linux/module.h>
# include <linux/kernel.h>
2020-11-06 21:39:41 +03:00
# include <linux/vmalloc.h>
2013-04-30 02:05:42 +04:00
# include <linux/init.h>
# include <linux/completion.h>
# include <linux/fb.h>
# include <linux/pci.h>
2021-07-01 04:54:59 +03:00
# include <linux/panic_notifier.h>
2014-02-26 14:51:24 +04:00
# include <linux/efi.h>
2019-09-12 02:34:10 +03:00
# include <linux/console.h>
2013-04-30 02:05:42 +04:00
# include <linux/hyperv.h>
/* Hyper-V Synthetic Video Protocol definitions and structures */
# define MAX_VMBUS_PKT_SIZE 0x4000
# define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
# define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
# define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
2019-09-05 12:11:53 +03:00
# define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
# define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
# define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
2013-04-30 02:05:42 +04:00
# define SYNTHVID_DEPTH_WIN7 16
# define SYNTHVID_DEPTH_WIN8 32
# define SYNTHVID_FB_SIZE_WIN7 (4 * 1024 * 1024)
# define SYNTHVID_WIDTH_MAX_WIN7 1600
# define SYNTHVID_HEIGHT_MAX_WIN7 1200
# define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
# define PCI_VENDOR_ID_MICROSOFT 0x1414
# define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
enum pipe_msg_type {
PIPE_MSG_INVALID ,
PIPE_MSG_DATA ,
PIPE_MSG_MAX
} ;
struct pipe_msg_hdr {
u32 type ;
u32 size ; /* size of message after this field */
} __packed ;
enum synthvid_msg_type {
SYNTHVID_ERROR = 0 ,
SYNTHVID_VERSION_REQUEST = 1 ,
SYNTHVID_VERSION_RESPONSE = 2 ,
SYNTHVID_VRAM_LOCATION = 3 ,
SYNTHVID_VRAM_LOCATION_ACK = 4 ,
SYNTHVID_SITUATION_UPDATE = 5 ,
SYNTHVID_SITUATION_UPDATE_ACK = 6 ,
SYNTHVID_POINTER_POSITION = 7 ,
SYNTHVID_POINTER_SHAPE = 8 ,
SYNTHVID_FEATURE_CHANGE = 9 ,
SYNTHVID_DIRT = 10 ,
2019-09-05 12:11:53 +03:00
SYNTHVID_RESOLUTION_REQUEST = 13 ,
SYNTHVID_RESOLUTION_RESPONSE = 14 ,
2013-04-30 02:05:42 +04:00
2019-09-05 12:11:53 +03:00
SYNTHVID_MAX = 15
2013-04-30 02:05:42 +04:00
} ;
2019-09-05 12:11:53 +03:00
# define SYNTHVID_EDID_BLOCK_SIZE 128
# define SYNTHVID_MAX_RESOLUTION_COUNT 64
struct hvd_screen_info {
u16 width ;
u16 height ;
} __packed ;
2013-04-30 02:05:42 +04:00
struct synthvid_msg_hdr {
u32 type ;
u32 size ; /* size of this header + payload after this field*/
} __packed ;
struct synthvid_version_req {
u32 version ;
} __packed ;
struct synthvid_version_resp {
u32 version ;
u8 is_accepted ;
u8 max_video_outputs ;
} __packed ;
2019-09-05 12:11:53 +03:00
struct synthvid_supported_resolution_req {
u8 maximum_resolution_count ;
} __packed ;
struct synthvid_supported_resolution_resp {
u8 edid_block [ SYNTHVID_EDID_BLOCK_SIZE ] ;
u8 resolution_count ;
u8 default_resolution_index ;
u8 is_standard ;
struct hvd_screen_info
supported_resolution [ SYNTHVID_MAX_RESOLUTION_COUNT ] ;
} __packed ;
2013-04-30 02:05:42 +04:00
struct synthvid_vram_location {
u64 user_ctx ;
u8 is_vram_gpa_specified ;
u64 vram_gpa ;
} __packed ;
struct synthvid_vram_location_ack {
u64 user_ctx ;
} __packed ;
struct video_output_situation {
u8 active ;
u32 vram_offset ;
u8 depth_bits ;
u32 width_pixels ;
u32 height_pixels ;
u32 pitch_bytes ;
} __packed ;
struct synthvid_situation_update {
u64 user_ctx ;
u8 video_output_count ;
struct video_output_situation video_output [ 1 ] ;
} __packed ;
struct synthvid_situation_update_ack {
u64 user_ctx ;
} __packed ;
struct synthvid_pointer_position {
u8 is_visible ;
u8 video_output ;
s32 image_x ;
s32 image_y ;
} __packed ;
# define CURSOR_MAX_X 96
# define CURSOR_MAX_Y 96
# define CURSOR_ARGB_PIXEL_SIZE 4
# define CURSOR_MAX_SIZE (CURSOR_MAX_X * CURSOR_MAX_Y * CURSOR_ARGB_PIXEL_SIZE)
# define CURSOR_COMPLETE (-1)
struct synthvid_pointer_shape {
u8 part_idx ;
u8 is_argb ;
u32 width ; /* CURSOR_MAX_X at most */
u32 height ; /* CURSOR_MAX_Y at most */
u32 hot_x ; /* hotspot relative to upper-left of pointer image */
u32 hot_y ;
u8 data [ 4 ] ;
} __packed ;
struct synthvid_feature_change {
u8 is_dirt_needed ;
u8 is_ptr_pos_needed ;
u8 is_ptr_shape_needed ;
u8 is_situ_needed ;
} __packed ;
struct rect {
s32 x1 , y1 ; /* top left corner */
s32 x2 , y2 ; /* bottom right corner, exclusive */
} __packed ;
struct synthvid_dirt {
u8 video_output ;
u8 dirt_count ;
struct rect rect [ 1 ] ;
} __packed ;
struct synthvid_msg {
struct pipe_msg_hdr pipe_hdr ;
struct synthvid_msg_hdr vid_hdr ;
union {
struct synthvid_version_req ver_req ;
struct synthvid_version_resp ver_resp ;
struct synthvid_vram_location vram ;
struct synthvid_vram_location_ack vram_ack ;
struct synthvid_situation_update situ ;
struct synthvid_situation_update_ack situ_ack ;
struct synthvid_pointer_position ptr_pos ;
struct synthvid_pointer_shape ptr_shape ;
struct synthvid_feature_change feature_chg ;
struct synthvid_dirt dirt ;
2019-09-05 12:11:53 +03:00
struct synthvid_supported_resolution_req resolution_req ;
struct synthvid_supported_resolution_resp resolution_resp ;
2013-04-30 02:05:42 +04:00
} ;
} __packed ;
/* FB driver definitions and structures */
# define HVFB_WIDTH 1152 /* default screen width */
# define HVFB_HEIGHT 864 /* default screen height */
# define HVFB_WIDTH_MIN 640
# define HVFB_HEIGHT_MIN 480
# define RING_BUFSIZE (256 * 1024)
# define VSP_TIMEOUT (10 * HZ)
# define HVFB_UPDATE_DELAY (HZ / 20)
2019-09-18 09:03:20 +03:00
# define HVFB_ONDEMAND_THROTTLE (HZ / 20)
2013-04-30 02:05:42 +04:00
struct hvfb_par {
struct fb_info * info ;
2015-08-05 10:52:37 +03:00
struct resource * mem ;
2013-04-30 02:05:42 +04:00
bool fb_ready ; /* fb device is ready */
struct completion wait ;
u32 synthvid_version ;
struct delayed_work dwork ;
bool update ;
2019-09-12 02:34:10 +03:00
bool update_saved ; /* The value of 'update' before hibernation */
2013-04-30 02:05:42 +04:00
u32 pseudo_palette [ 16 ] ;
u8 init_buf [ MAX_VMBUS_PKT_SIZE ] ;
u8 recv_buf [ MAX_VMBUS_PKT_SIZE ] ;
2014-08-01 15:27:54 +04:00
/* If true, the VSC notifies the VSP on every framebuffer change */
bool synchronous_fb ;
2019-12-09 10:57:49 +03:00
/* If true, need to copy from deferred IO mem to framebuffer mem */
bool need_docopy ;
2014-08-01 15:27:54 +04:00
struct notifier_block hvfb_panic_nb ;
2019-09-18 09:03:20 +03:00
/* Memory for deferred IO and frame buffer itself */
unsigned char * dio_vp ;
unsigned char * mmio_vp ;
2019-12-09 10:57:49 +03:00
phys_addr_t mmio_pp ;
2019-09-18 09:03:20 +03:00
/* Dirty rectangle, protected by delayed_refresh_lock */
int x1 , y1 , x2 , y2 ;
bool delayed_refresh ;
spinlock_t delayed_refresh_lock ;
2013-04-30 02:05:42 +04:00
} ;
static uint screen_width = HVFB_WIDTH ;
static uint screen_height = HVFB_HEIGHT ;
2019-09-05 12:11:53 +03:00
static uint screen_width_max = HVFB_WIDTH ;
static uint screen_height_max = HVFB_HEIGHT ;
2013-04-30 02:05:42 +04:00
static uint screen_depth ;
static uint screen_fb_size ;
2019-09-18 09:03:20 +03:00
static uint dio_fb_size ; /* FB size for deferred IO */
2013-04-30 02:05:42 +04:00
/* Send message to Hyper-V host */
static inline int synthvid_send ( struct hv_device * hdev ,
struct synthvid_msg * msg )
{
static atomic64_t request_id = ATOMIC64_INIT ( 0 ) ;
int ret ;
msg - > pipe_hdr . type = PIPE_MSG_DATA ;
msg - > pipe_hdr . size = msg - > vid_hdr . size ;
ret = vmbus_sendpacket ( hdev - > channel , msg ,
msg - > vid_hdr . size + sizeof ( struct pipe_msg_hdr ) ,
atomic64_inc_return ( & request_id ) ,
VM_PKT_DATA_INBAND , 0 ) ;
if ( ret )
2021-04-20 18:44:19 +03:00
pr_err_ratelimited ( " Unable to send packet via vmbus; error %d \n " , ret ) ;
2013-04-30 02:05:42 +04:00
return ret ;
}
/* Send screen resolution info to host */
static int synthvid_send_situ ( struct hv_device * hdev )
{
struct fb_info * info = hv_get_drvdata ( hdev ) ;
struct synthvid_msg msg ;
if ( ! info )
return - ENODEV ;
memset ( & msg , 0 , sizeof ( struct synthvid_msg ) ) ;
msg . vid_hdr . type = SYNTHVID_SITUATION_UPDATE ;
msg . vid_hdr . size = sizeof ( struct synthvid_msg_hdr ) +
sizeof ( struct synthvid_situation_update ) ;
msg . situ . user_ctx = 0 ;
msg . situ . video_output_count = 1 ;
msg . situ . video_output [ 0 ] . active = 1 ;
msg . situ . video_output [ 0 ] . vram_offset = 0 ;
msg . situ . video_output [ 0 ] . depth_bits = info - > var . bits_per_pixel ;
msg . situ . video_output [ 0 ] . width_pixels = info - > var . xres ;
msg . situ . video_output [ 0 ] . height_pixels = info - > var . yres ;
msg . situ . video_output [ 0 ] . pitch_bytes = info - > fix . line_length ;
synthvid_send ( hdev , & msg ) ;
return 0 ;
}
/* Send mouse pointer info to host */
static int synthvid_send_ptr ( struct hv_device * hdev )
{
struct synthvid_msg msg ;
memset ( & msg , 0 , sizeof ( struct synthvid_msg ) ) ;
msg . vid_hdr . type = SYNTHVID_POINTER_POSITION ;
msg . vid_hdr . size = sizeof ( struct synthvid_msg_hdr ) +
sizeof ( struct synthvid_pointer_position ) ;
msg . ptr_pos . is_visible = 1 ;
msg . ptr_pos . video_output = 0 ;
msg . ptr_pos . image_x = 0 ;
msg . ptr_pos . image_y = 0 ;
synthvid_send ( hdev , & msg ) ;
memset ( & msg , 0 , sizeof ( struct synthvid_msg ) ) ;
msg . vid_hdr . type = SYNTHVID_POINTER_SHAPE ;
msg . vid_hdr . size = sizeof ( struct synthvid_msg_hdr ) +
sizeof ( struct synthvid_pointer_shape ) ;
msg . ptr_shape . part_idx = CURSOR_COMPLETE ;
msg . ptr_shape . is_argb = 1 ;
msg . ptr_shape . width = 1 ;
msg . ptr_shape . height = 1 ;
msg . ptr_shape . hot_x = 0 ;
msg . ptr_shape . hot_y = 0 ;
msg . ptr_shape . data [ 0 ] = 0 ;
msg . ptr_shape . data [ 1 ] = 1 ;
msg . ptr_shape . data [ 2 ] = 1 ;
msg . ptr_shape . data [ 3 ] = 1 ;
synthvid_send ( hdev , & msg ) ;
return 0 ;
}
/* Send updated screen area (dirty rectangle) location to host */
2019-09-18 09:03:20 +03:00
static int
synthvid_update ( struct fb_info * info , int x1 , int y1 , int x2 , int y2 )
2013-04-30 02:05:42 +04:00
{
struct hv_device * hdev = device_to_hv_device ( info - > device ) ;
struct synthvid_msg msg ;
memset ( & msg , 0 , sizeof ( struct synthvid_msg ) ) ;
2019-09-18 09:03:20 +03:00
if ( x2 = = INT_MAX )
x2 = info - > var . xres ;
if ( y2 = = INT_MAX )
y2 = info - > var . yres ;
2013-04-30 02:05:42 +04:00
msg . vid_hdr . type = SYNTHVID_DIRT ;
msg . vid_hdr . size = sizeof ( struct synthvid_msg_hdr ) +
sizeof ( struct synthvid_dirt ) ;
msg . dirt . video_output = 0 ;
msg . dirt . dirt_count = 1 ;
2019-09-18 09:03:20 +03:00
msg . dirt . rect [ 0 ] . x1 = ( x1 > x2 ) ? 0 : x1 ;
msg . dirt . rect [ 0 ] . y1 = ( y1 > y2 ) ? 0 : y1 ;
msg . dirt . rect [ 0 ] . x2 =
( x2 < x1 | | x2 > info - > var . xres ) ? info - > var . xres : x2 ;
msg . dirt . rect [ 0 ] . y2 =
( y2 < y1 | | y2 > info - > var . yres ) ? info - > var . yres : y2 ;
2013-04-30 02:05:42 +04:00
synthvid_send ( hdev , & msg ) ;
return 0 ;
}
2019-09-18 09:03:20 +03:00
static void hvfb_docopy ( struct hvfb_par * par ,
unsigned long offset ,
unsigned long size )
{
if ( ! par | | ! par - > mmio_vp | | ! par - > dio_vp | | ! par - > fb_ready | |
size = = 0 | | offset > = dio_fb_size )
return ;
if ( offset + size > dio_fb_size )
size = dio_fb_size - offset ;
memcpy ( par - > mmio_vp + offset , par - > dio_vp + offset , size ) ;
}
/* Deferred IO callback */
static void synthvid_deferred_io ( struct fb_info * p ,
struct list_head * pagelist )
{
struct hvfb_par * par = p - > par ;
struct page * page ;
unsigned long start , end ;
int y1 , y2 , miny , maxy ;
miny = INT_MAX ;
maxy = 0 ;
/*
* Merge dirty pages . It is possible that last page cross
* over the end of frame buffer row yres . This is taken care of
* in synthvid_update function by clamping the y2
* value to yres .
*/
list_for_each_entry ( page , pagelist , lru ) {
start = page - > index < < PAGE_SHIFT ;
end = start + PAGE_SIZE - 1 ;
y1 = start / p - > fix . line_length ;
y2 = end / p - > fix . line_length ;
miny = min_t ( int , miny , y1 ) ;
maxy = max_t ( int , maxy , y2 ) ;
/* Copy from dio space to mmio address */
2019-12-09 10:57:49 +03:00
if ( par - > fb_ready & & par - > need_docopy )
2019-09-18 09:03:20 +03:00
hvfb_docopy ( par , start , PAGE_SIZE ) ;
}
if ( par - > fb_ready & & par - > update )
synthvid_update ( p , 0 , miny , p - > var . xres , maxy + 1 ) ;
}
static struct fb_deferred_io synthvid_defio = {
. delay = HZ / 20 ,
. deferred_io = synthvid_deferred_io ,
} ;
2013-04-30 02:05:42 +04:00
/*
* Actions on received messages from host :
* Complete the wait event .
* Or , reply with screen and cursor info .
*/
static void synthvid_recv_sub ( struct hv_device * hdev )
{
struct fb_info * info = hv_get_drvdata ( hdev ) ;
struct hvfb_par * par ;
struct synthvid_msg * msg ;
if ( ! info )
return ;
par = info - > par ;
msg = ( struct synthvid_msg * ) par - > recv_buf ;
/* Complete the wait event */
if ( msg - > vid_hdr . type = = SYNTHVID_VERSION_RESPONSE | |
2019-09-05 12:11:53 +03:00
msg - > vid_hdr . type = = SYNTHVID_RESOLUTION_RESPONSE | |
2013-04-30 02:05:42 +04:00
msg - > vid_hdr . type = = SYNTHVID_VRAM_LOCATION_ACK ) {
memcpy ( par - > init_buf , msg , MAX_VMBUS_PKT_SIZE ) ;
complete ( & par - > wait ) ;
return ;
}
/* Reply with screen and cursor info */
if ( msg - > vid_hdr . type = = SYNTHVID_FEATURE_CHANGE ) {
if ( par - > fb_ready ) {
synthvid_send_ptr ( hdev ) ;
synthvid_send_situ ( hdev ) ;
}
par - > update = msg - > feature_chg . is_dirt_needed ;
if ( par - > update )
schedule_delayed_work ( & par - > dwork , HVFB_UPDATE_DELAY ) ;
}
}
/* Receive callback for messages from the host */
static void synthvid_receive ( void * ctx )
{
struct hv_device * hdev = ctx ;
struct fb_info * info = hv_get_drvdata ( hdev ) ;
struct hvfb_par * par ;
struct synthvid_msg * recv_buf ;
u32 bytes_recvd ;
u64 req_id ;
int ret ;
if ( ! info )
return ;
par = info - > par ;
recv_buf = ( struct synthvid_msg * ) par - > recv_buf ;
do {
ret = vmbus_recvpacket ( hdev - > channel , recv_buf ,
MAX_VMBUS_PKT_SIZE ,
& bytes_recvd , & req_id ) ;
if ( bytes_recvd > 0 & &
recv_buf - > pipe_hdr . type = = PIPE_MSG_DATA )
synthvid_recv_sub ( hdev ) ;
} while ( bytes_recvd > 0 & & ret = = 0 ) ;
}
2019-09-05 12:11:53 +03:00
/* Check if the ver1 version is equal or greater than ver2 */
static inline bool synthvid_ver_ge ( u32 ver1 , u32 ver2 )
{
if ( SYNTHVID_VER_GET_MAJOR ( ver1 ) > SYNTHVID_VER_GET_MAJOR ( ver2 ) | |
( SYNTHVID_VER_GET_MAJOR ( ver1 ) = = SYNTHVID_VER_GET_MAJOR ( ver2 ) & &
SYNTHVID_VER_GET_MINOR ( ver1 ) > = SYNTHVID_VER_GET_MINOR ( ver2 ) ) )
return true ;
return false ;
}
2013-04-30 02:05:42 +04:00
/* Check synthetic video protocol version with the host */
static int synthvid_negotiate_ver ( struct hv_device * hdev , u32 ver )
{
struct fb_info * info = hv_get_drvdata ( hdev ) ;
struct hvfb_par * par = info - > par ;
struct synthvid_msg * msg = ( struct synthvid_msg * ) par - > init_buf ;
2015-01-29 13:24:16 +03:00
int ret = 0 ;
unsigned long t ;
2013-04-30 02:05:42 +04:00
memset ( msg , 0 , sizeof ( struct synthvid_msg ) ) ;
msg - > vid_hdr . type = SYNTHVID_VERSION_REQUEST ;
msg - > vid_hdr . size = sizeof ( struct synthvid_msg_hdr ) +
sizeof ( struct synthvid_version_req ) ;
msg - > ver_req . version = ver ;
synthvid_send ( hdev , msg ) ;
t = wait_for_completion_timeout ( & par - > wait , VSP_TIMEOUT ) ;
if ( ! t ) {
pr_err ( " Time out on waiting version response \n " ) ;
ret = - ETIMEDOUT ;
goto out ;
}
if ( ! msg - > ver_resp . is_accepted ) {
ret = - ENODEV ;
goto out ;
}
par - > synthvid_version = ver ;
2019-09-05 12:11:53 +03:00
pr_info ( " Synthvid Version major %d, minor %d \n " ,
SYNTHVID_VER_GET_MAJOR ( ver ) , SYNTHVID_VER_GET_MINOR ( ver ) ) ;
out :
return ret ;
}
/* Get current resolution from the host */
static int synthvid_get_supported_resolution ( struct hv_device * hdev )
{
struct fb_info * info = hv_get_drvdata ( hdev ) ;
struct hvfb_par * par = info - > par ;
struct synthvid_msg * msg = ( struct synthvid_msg * ) par - > init_buf ;
int ret = 0 ;
unsigned long t ;
u8 index ;
int i ;
memset ( msg , 0 , sizeof ( struct synthvid_msg ) ) ;
msg - > vid_hdr . type = SYNTHVID_RESOLUTION_REQUEST ;
msg - > vid_hdr . size = sizeof ( struct synthvid_msg_hdr ) +
sizeof ( struct synthvid_supported_resolution_req ) ;
msg - > resolution_req . maximum_resolution_count =
SYNTHVID_MAX_RESOLUTION_COUNT ;
synthvid_send ( hdev , msg ) ;
t = wait_for_completion_timeout ( & par - > wait , VSP_TIMEOUT ) ;
if ( ! t ) {
pr_err ( " Time out on waiting resolution response \n " ) ;
2019-11-14 20:27:20 +03:00
ret = - ETIMEDOUT ;
goto out ;
2019-09-05 12:11:53 +03:00
}
if ( msg - > resolution_resp . resolution_count = = 0 ) {
pr_err ( " No supported resolutions \n " ) ;
ret = - ENODEV ;
goto out ;
}
index = msg - > resolution_resp . default_resolution_index ;
if ( index > = msg - > resolution_resp . resolution_count ) {
pr_err ( " Invalid resolution index: %d \n " , index ) ;
ret = - ENODEV ;
goto out ;
}
for ( i = 0 ; i < msg - > resolution_resp . resolution_count ; i + + ) {
screen_width_max = max_t ( unsigned int , screen_width_max ,
msg - > resolution_resp . supported_resolution [ i ] . width ) ;
screen_height_max = max_t ( unsigned int , screen_height_max ,
msg - > resolution_resp . supported_resolution [ i ] . height ) ;
}
screen_width =
msg - > resolution_resp . supported_resolution [ index ] . width ;
screen_height =
msg - > resolution_resp . supported_resolution [ index ] . height ;
2013-04-30 02:05:42 +04:00
out :
return ret ;
}
/* Connect to VSP (Virtual Service Provider) on host */
static int synthvid_connect_vsp ( struct hv_device * hdev )
{
struct fb_info * info = hv_get_drvdata ( hdev ) ;
struct hvfb_par * par = info - > par ;
int ret ;
ret = vmbus_open ( hdev - > channel , RING_BUFSIZE , RING_BUFSIZE ,
NULL , 0 , synthvid_receive , hdev ) ;
if ( ret ) {
pr_err ( " Unable to open vmbus channel \n " ) ;
return ret ;
}
/* Negotiate the protocol version with host */
2019-09-05 12:11:53 +03:00
switch ( vmbus_proto_version ) {
case VERSION_WIN10 :
case VERSION_WIN10_V5 :
ret = synthvid_negotiate_ver ( hdev , SYNTHVID_VERSION_WIN10 ) ;
if ( ! ret )
break ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2019-09-05 12:11:53 +03:00
case VERSION_WIN8 :
case VERSION_WIN8_1 :
2013-04-30 02:05:42 +04:00
ret = synthvid_negotiate_ver ( hdev , SYNTHVID_VERSION_WIN8 ) ;
2019-09-05 12:11:53 +03:00
if ( ! ret )
break ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2019-09-05 12:11:53 +03:00
case VERSION_WS2008 :
case VERSION_WIN7 :
ret = synthvid_negotiate_ver ( hdev , SYNTHVID_VERSION_WIN7 ) ;
break ;
default :
ret = synthvid_negotiate_ver ( hdev , SYNTHVID_VERSION_WIN10 ) ;
break ;
}
2013-04-30 02:05:42 +04:00
if ( ret ) {
pr_err ( " Synthetic video device version not accepted \n " ) ;
goto error ;
}
2014-02-26 14:51:24 +04:00
if ( par - > synthvid_version = = SYNTHVID_VERSION_WIN7 )
2013-04-30 02:05:42 +04:00
screen_depth = SYNTHVID_DEPTH_WIN7 ;
2014-02-26 14:51:24 +04:00
else
2013-04-30 02:05:42 +04:00
screen_depth = SYNTHVID_DEPTH_WIN8 ;
2014-02-26 14:51:24 +04:00
2019-09-05 12:11:53 +03:00
if ( synthvid_ver_ge ( par - > synthvid_version , SYNTHVID_VERSION_WIN10 ) ) {
ret = synthvid_get_supported_resolution ( hdev ) ;
if ( ret )
pr_info ( " Failed to get supported resolution from host, use default \n " ) ;
}
2014-02-26 14:51:24 +04:00
screen_fb_size = hdev - > channel - > offermsg . offer .
mmio_megabytes * 1024 * 1024 ;
2013-04-30 02:05:42 +04:00
return 0 ;
error :
vmbus_close ( hdev - > channel ) ;
return ret ;
}
/* Send VRAM and Situation messages to the host */
static int synthvid_send_config ( struct hv_device * hdev )
{
struct fb_info * info = hv_get_drvdata ( hdev ) ;
struct hvfb_par * par = info - > par ;
struct synthvid_msg * msg = ( struct synthvid_msg * ) par - > init_buf ;
2015-01-29 13:24:16 +03:00
int ret = 0 ;
unsigned long t ;
2013-04-30 02:05:42 +04:00
/* Send VRAM location */
memset ( msg , 0 , sizeof ( struct synthvid_msg ) ) ;
msg - > vid_hdr . type = SYNTHVID_VRAM_LOCATION ;
msg - > vid_hdr . size = sizeof ( struct synthvid_msg_hdr ) +
sizeof ( struct synthvid_vram_location ) ;
2019-09-18 09:03:20 +03:00
msg - > vram . user_ctx = msg - > vram . vram_gpa = par - > mmio_pp ;
2013-04-30 02:05:42 +04:00
msg - > vram . is_vram_gpa_specified = 1 ;
synthvid_send ( hdev , msg ) ;
t = wait_for_completion_timeout ( & par - > wait , VSP_TIMEOUT ) ;
if ( ! t ) {
pr_err ( " Time out on waiting vram location ack \n " ) ;
ret = - ETIMEDOUT ;
goto out ;
}
2019-09-18 09:03:20 +03:00
if ( msg - > vram_ack . user_ctx ! = par - > mmio_pp ) {
2013-04-30 02:05:42 +04:00
pr_err ( " Unable to set VRAM location \n " ) ;
ret = - ENODEV ;
goto out ;
}
/* Send pointer and situation update */
synthvid_send_ptr ( hdev ) ;
synthvid_send_situ ( hdev ) ;
out :
return ret ;
}
/*
* Delayed work callback :
2019-09-18 09:03:20 +03:00
* It is scheduled to call whenever update request is received and it has
* not been called in last HVFB_ONDEMAND_THROTTLE time interval .
2013-04-30 02:05:42 +04:00
*/
static void hvfb_update_work ( struct work_struct * w )
{
struct hvfb_par * par = container_of ( w , struct hvfb_par , dwork . work ) ;
struct fb_info * info = par - > info ;
2019-09-18 09:03:20 +03:00
unsigned long flags ;
int x1 , x2 , y1 , y2 ;
int j ;
spin_lock_irqsave ( & par - > delayed_refresh_lock , flags ) ;
/* Reset the request flag */
par - > delayed_refresh = false ;
/* Store the dirty rectangle to local variables */
x1 = par - > x1 ;
x2 = par - > x2 ;
y1 = par - > y1 ;
y2 = par - > y2 ;
/* Clear dirty rectangle */
par - > x1 = par - > y1 = INT_MAX ;
par - > x2 = par - > y2 = 0 ;
spin_unlock_irqrestore ( & par - > delayed_refresh_lock , flags ) ;
if ( x1 > info - > var . xres | | x2 > info - > var . xres | |
y1 > info - > var . yres | | y2 > info - > var . yres | | x2 < = x1 )
return ;
/* Copy the dirty rectangle to frame buffer memory */
2019-12-09 10:57:49 +03:00
if ( par - > need_docopy )
for ( j = y1 ; j < y2 ; j + + )
hvfb_docopy ( par ,
j * info - > fix . line_length +
( x1 * screen_depth / 8 ) ,
( x2 - x1 ) * screen_depth / 8 ) ;
2019-09-18 09:03:20 +03:00
/* Refresh */
if ( par - > fb_ready & & par - > update )
synthvid_update ( info , x1 , y1 , x2 , y2 ) ;
}
2013-04-30 02:05:42 +04:00
2019-09-18 09:03:20 +03:00
/*
* Control the on - demand refresh frequency . It schedules a delayed
* screen update if it has not yet .
*/
static void hvfb_ondemand_refresh_throttle ( struct hvfb_par * par ,
int x1 , int y1 , int w , int h )
{
unsigned long flags ;
int x2 = x1 + w ;
int y2 = y1 + h ;
spin_lock_irqsave ( & par - > delayed_refresh_lock , flags ) ;
/* Merge dirty rectangle */
par - > x1 = min_t ( int , par - > x1 , x1 ) ;
par - > y1 = min_t ( int , par - > y1 , y1 ) ;
par - > x2 = max_t ( int , par - > x2 , x2 ) ;
par - > y2 = max_t ( int , par - > y2 , y2 ) ;
/* Schedule a delayed screen update if not yet */
if ( par - > delayed_refresh = = false ) {
schedule_delayed_work ( & par - > dwork ,
HVFB_ONDEMAND_THROTTLE ) ;
par - > delayed_refresh = true ;
}
2013-04-30 02:05:42 +04:00
2019-09-18 09:03:20 +03:00
spin_unlock_irqrestore ( & par - > delayed_refresh_lock , flags ) ;
2013-04-30 02:05:42 +04:00
}
2014-08-01 15:27:54 +04:00
static int hvfb_on_panic ( struct notifier_block * nb ,
unsigned long e , void * p )
{
struct hvfb_par * par ;
struct fb_info * info ;
par = container_of ( nb , struct hvfb_par , hvfb_panic_nb ) ;
par - > synchronous_fb = true ;
info = par - > info ;
2019-12-09 10:57:49 +03:00
if ( par - > need_docopy )
hvfb_docopy ( par , 0 , dio_fb_size ) ;
2019-09-18 09:03:20 +03:00
synthvid_update ( info , 0 , 0 , INT_MAX , INT_MAX ) ;
2014-08-01 15:27:54 +04:00
return NOTIFY_DONE ;
}
2013-04-30 02:05:42 +04:00
/* Framebuffer operation handlers */
static int hvfb_check_var ( struct fb_var_screeninfo * var , struct fb_info * info )
{
if ( var - > xres < HVFB_WIDTH_MIN | | var - > yres < HVFB_HEIGHT_MIN | |
var - > xres > screen_width | | var - > yres > screen_height | |
var - > bits_per_pixel ! = screen_depth )
return - EINVAL ;
var - > xres_virtual = var - > xres ;
var - > yres_virtual = var - > yres ;
return 0 ;
}
static int hvfb_set_par ( struct fb_info * info )
{
struct hv_device * hdev = device_to_hv_device ( info - > device ) ;
return synthvid_send_situ ( hdev ) ;
}
static inline u32 chan_to_field ( u32 chan , struct fb_bitfield * bf )
{
return ( ( chan & 0xffff ) > > ( 16 - bf - > length ) ) < < bf - > offset ;
}
static int hvfb_setcolreg ( unsigned regno , unsigned red , unsigned green ,
unsigned blue , unsigned transp , struct fb_info * info )
{
u32 * pal = info - > pseudo_palette ;
if ( regno > 15 )
return - EINVAL ;
pal [ regno ] = chan_to_field ( red , & info - > var . red )
| chan_to_field ( green , & info - > var . green )
| chan_to_field ( blue , & info - > var . blue )
| chan_to_field ( transp , & info - > var . transp ) ;
return 0 ;
}
2013-10-02 15:55:11 +04:00
static int hvfb_blank ( int blank , struct fb_info * info )
{
return 1 ; /* get fb_blank to set the colormap to all black */
}
2013-04-30 02:05:42 +04:00
2014-08-01 15:27:54 +04:00
static void hvfb_cfb_fillrect ( struct fb_info * p ,
const struct fb_fillrect * rect )
{
struct hvfb_par * par = p - > par ;
cfb_fillrect ( p , rect ) ;
if ( par - > synchronous_fb )
2019-09-18 09:03:20 +03:00
synthvid_update ( p , 0 , 0 , INT_MAX , INT_MAX ) ;
else
hvfb_ondemand_refresh_throttle ( par , rect - > dx , rect - > dy ,
rect - > width , rect - > height ) ;
2014-08-01 15:27:54 +04:00
}
static void hvfb_cfb_copyarea ( struct fb_info * p ,
const struct fb_copyarea * area )
{
struct hvfb_par * par = p - > par ;
cfb_copyarea ( p , area ) ;
if ( par - > synchronous_fb )
2019-09-18 09:03:20 +03:00
synthvid_update ( p , 0 , 0 , INT_MAX , INT_MAX ) ;
else
hvfb_ondemand_refresh_throttle ( par , area - > dx , area - > dy ,
area - > width , area - > height ) ;
2014-08-01 15:27:54 +04:00
}
static void hvfb_cfb_imageblit ( struct fb_info * p ,
const struct fb_image * image )
{
struct hvfb_par * par = p - > par ;
cfb_imageblit ( p , image ) ;
if ( par - > synchronous_fb )
2019-09-18 09:03:20 +03:00
synthvid_update ( p , 0 , 0 , INT_MAX , INT_MAX ) ;
else
hvfb_ondemand_refresh_throttle ( par , image - > dx , image - > dy ,
image - > width , image - > height ) ;
2014-08-01 15:27:54 +04:00
}
2019-12-03 19:38:50 +03:00
static const struct fb_ops hvfb_ops = {
2013-04-30 02:05:42 +04:00
. owner = THIS_MODULE ,
. fb_check_var = hvfb_check_var ,
. fb_set_par = hvfb_set_par ,
. fb_setcolreg = hvfb_setcolreg ,
2014-08-01 15:27:54 +04:00
. fb_fillrect = hvfb_cfb_fillrect ,
. fb_copyarea = hvfb_cfb_copyarea ,
. fb_imageblit = hvfb_cfb_imageblit ,
2013-10-02 15:55:11 +04:00
. fb_blank = hvfb_blank ,
2013-04-30 02:05:42 +04:00
} ;
/* Get options from kernel paramenter "video=" */
static void hvfb_get_option ( struct fb_info * info )
{
struct hvfb_par * par = info - > par ;
char * opt = NULL , * p ;
uint x = 0 , y = 0 ;
if ( fb_get_options ( KBUILD_MODNAME , & opt ) | | ! opt | | ! * opt )
return ;
p = strsep ( & opt , " x " ) ;
if ( ! * p | | kstrtouint ( p , 0 , & x ) | |
! opt | | ! * opt | | kstrtouint ( opt , 0 , & y ) ) {
pr_err ( " Screen option is invalid: skipped \n " ) ;
return ;
}
if ( x < HVFB_WIDTH_MIN | | y < HVFB_HEIGHT_MIN | |
2019-09-05 12:11:53 +03:00
( synthvid_ver_ge ( par - > synthvid_version , SYNTHVID_VERSION_WIN10 ) & &
( x > screen_width_max | | y > screen_height_max ) ) | |
2013-04-30 02:05:42 +04:00
( par - > synthvid_version = = SYNTHVID_VERSION_WIN8 & &
x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8 ) | |
( par - > synthvid_version = = SYNTHVID_VERSION_WIN7 & &
( x > SYNTHVID_WIDTH_MAX_WIN7 | | y > SYNTHVID_HEIGHT_MAX_WIN7 ) ) ) {
pr_err ( " Screen resolution option is out of range: skipped \n " ) ;
return ;
}
screen_width = x ;
screen_height = y ;
return ;
}
2019-12-09 10:57:49 +03:00
/*
* Allocate enough contiguous physical memory .
* Return physical address if succeeded or - 1 if failed .
*/
static phys_addr_t hvfb_get_phymem ( struct hv_device * hdev ,
unsigned int request_size )
{
struct page * page = NULL ;
dma_addr_t dma_handle ;
void * vmem ;
phys_addr_t paddr = 0 ;
unsigned int order = get_order ( request_size ) ;
if ( request_size = = 0 )
return - 1 ;
if ( order < MAX_ORDER ) {
/* Call alloc_pages if the size is less than 2^MAX_ORDER */
page = alloc_pages ( GFP_KERNEL | __GFP_ZERO , order ) ;
if ( ! page )
return - 1 ;
paddr = ( page_to_pfn ( page ) < < PAGE_SHIFT ) ;
} else {
/* Allocate from CMA */
hdev - > device . coherent_dma_mask = DMA_BIT_MASK ( 64 ) ;
vmem = dma_alloc_coherent ( & hdev - > device ,
round_up ( request_size , PAGE_SIZE ) ,
& dma_handle ,
GFP_KERNEL | __GFP_NOWARN ) ;
if ( ! vmem )
return - 1 ;
paddr = virt_to_phys ( vmem ) ;
}
return paddr ;
}
/* Release contiguous physical memory */
static void hvfb_release_phymem ( struct hv_device * hdev ,
phys_addr_t paddr , unsigned int size )
{
unsigned int order = get_order ( size ) ;
if ( order < MAX_ORDER )
__free_pages ( pfn_to_page ( paddr > > PAGE_SHIFT ) , order ) ;
else
dma_free_coherent ( & hdev - > device ,
round_up ( size , PAGE_SIZE ) ,
phys_to_virt ( paddr ) ,
paddr ) ;
}
2013-04-30 02:05:42 +04:00
/* Get framebuffer memory from Hyper-V video pci space */
2015-08-05 10:52:37 +03:00
static int hvfb_getmem ( struct hv_device * hdev , struct fb_info * info )
2013-04-30 02:05:42 +04:00
{
2014-02-26 14:51:24 +04:00
struct hvfb_par * par = info - > par ;
struct pci_dev * pdev = NULL ;
2013-04-30 02:05:42 +04:00
void __iomem * fb_virt ;
2014-02-26 14:51:24 +04:00
int gen2vm = efi_enabled ( EFI_BOOT ) ;
2015-08-05 10:52:37 +03:00
resource_size_t pot_start , pot_end ;
2019-12-09 10:57:49 +03:00
phys_addr_t paddr ;
2014-02-26 14:51:24 +04:00
int ret ;
2013-04-30 02:05:42 +04:00
2019-12-09 10:57:49 +03:00
info - > apertures = alloc_apertures ( 1 ) ;
if ( ! info - > apertures )
return - ENOMEM ;
2019-09-18 09:03:20 +03:00
2019-12-09 10:57:49 +03:00
if ( ! gen2vm ) {
2014-02-26 14:51:24 +04:00
pdev = pci_get_device ( PCI_VENDOR_ID_MICROSOFT ,
2019-12-09 10:57:49 +03:00
PCI_DEVICE_ID_HYPERV_VIDEO , NULL ) ;
2014-02-26 14:51:24 +04:00
if ( ! pdev ) {
pr_err ( " Unable to find PCI Hyper-V video \n " ) ;
return - ENODEV ;
}
2013-04-30 02:05:42 +04:00
2019-12-09 10:57:49 +03:00
info - > apertures - > ranges [ 0 ] . base = pci_resource_start ( pdev , 0 ) ;
info - > apertures - > ranges [ 0 ] . size = pci_resource_len ( pdev , 0 ) ;
/*
* For Gen 1 VM , we can directly use the contiguous memory
* from VM . If we succeed , deferred IO happens directly
* on this allocated framebuffer memory , avoiding extra
* memory copy .
*/
paddr = hvfb_get_phymem ( hdev , screen_fb_size ) ;
if ( paddr ! = ( phys_addr_t ) - 1 ) {
par - > mmio_pp = paddr ;
par - > mmio_vp = par - > dio_vp = __va ( paddr ) ;
info - > fix . smem_start = paddr ;
info - > fix . smem_len = screen_fb_size ;
info - > screen_base = par - > mmio_vp ;
info - > screen_size = screen_fb_size ;
par - > need_docopy = false ;
goto getmem_done ;
}
pr_info ( " Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead. \n " ) ;
} else {
info - > apertures - > ranges [ 0 ] . base = screen_info . lfb_base ;
info - > apertures - > ranges [ 0 ] . size = screen_info . lfb_size ;
}
/*
* Cannot use the contiguous physical memory .
* Allocate mmio space for framebuffer .
*/
dio_fb_size =
screen_width * screen_height * screen_depth / 8 ;
if ( gen2vm ) {
pot_start = 0 ;
pot_end = - 1 ;
} else {
2014-02-26 14:51:24 +04:00
if ( ! ( pci_resource_flags ( pdev , 0 ) & IORESOURCE_MEM ) | |
2019-09-05 12:11:53 +03:00
pci_resource_len ( pdev , 0 ) < screen_fb_size ) {
pr_err ( " Resource not available or (0x%lx < 0x%lx) \n " ,
( unsigned long ) pci_resource_len ( pdev , 0 ) ,
( unsigned long ) screen_fb_size ) ;
2014-02-26 14:51:24 +04:00
goto err1 ;
2019-09-05 12:11:53 +03:00
}
2013-04-30 02:05:42 +04:00
2015-08-05 10:52:37 +03:00
pot_end = pci_resource_end ( pdev , 0 ) ;
pot_start = pot_end - screen_fb_size + 1 ;
}
ret = vmbus_allocate_mmio ( & par - > mem , hdev , pot_start , pot_end ,
screen_fb_size , 0x100000 , true ) ;
if ( ret ! = 0 ) {
pr_err ( " Unable to allocate framebuffer memory \n " ) ;
goto err1 ;
2014-02-26 14:51:24 +04:00
}
2013-04-30 02:05:42 +04:00
video: hyperv_fb: Fix the cache type when mapping the VRAM
x86 Hyper-V used to essentially always overwrite the effective cache type
of guest memory accesses to WB. This was problematic in cases where there
is a physical device assigned to the VM, since that often requires that
the VM should have control over cache types. Thus, on newer Hyper-V since
2018, Hyper-V always honors the VM's cache type, but unexpectedly Linux VM
users start to complain that Linux VM's VRAM becomes very slow, and it
turns out that Linux VM should not map the VRAM uncacheable by ioremap().
Fix this slowness issue by using ioremap_cache().
On ARM64, ioremap_cache() is also required as the host also maps the VRAM
cacheable, otherwise VM Connect can't display properly with ioremap() or
ioremap_wc().
With this change, the VRAM on new Hyper-V is as fast as regular RAM, so
it's no longer necessary to use the hacks we added to mitigate the
slowness, i.e. we no longer need to allocate physical memory and use
it to back up the VRAM in Generation-1 VM, and we also no longer need to
allocate physical memory to back up the framebuffer in a Generation-2 VM
and copy the framebuffer to the real VRAM. A further big change will
address these for v5.11.
Fixes: 68a2d20b79b1 ("drivers/video: add Hyper-V Synthetic Video Frame Buffer Driver")
Tested-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
Link: https://lore.kernel.org/r/20201118000305.24797-1-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2020-11-18 03:03:05 +03:00
/*
* Map the VRAM cacheable for performance . This is also required for
* VM Connect to display properly for ARM64 Linux VM , as the host also
* maps the VRAM cacheable .
*/
fb_virt = ioremap_cache ( par - > mem - > start , screen_fb_size ) ;
2013-04-30 02:05:42 +04:00
if ( ! fb_virt )
goto err2 ;
2019-09-18 09:03:20 +03:00
/* Allocate memory for deferred IO */
par - > dio_vp = vzalloc ( round_up ( dio_fb_size , PAGE_SIZE ) ) ;
if ( par - > dio_vp = = NULL )
goto err3 ;
/* Physical address of FB device */
par - > mmio_pp = par - > mem - > start ;
/* Virtual address of FB device */
par - > mmio_vp = ( unsigned char * ) fb_virt ;
2015-08-05 10:52:37 +03:00
info - > fix . smem_start = par - > mem - > start ;
2019-09-18 09:03:20 +03:00
info - > fix . smem_len = dio_fb_size ;
info - > screen_base = par - > dio_vp ;
info - > screen_size = dio_fb_size ;
2013-04-30 02:05:42 +04:00
2019-12-09 10:57:49 +03:00
getmem_done :
remove_conflicting_framebuffers ( info - > apertures ,
KBUILD_MODNAME , false ) ;
2020-10-14 12:24:29 +03:00
if ( gen2vm ) {
/* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
screen_info . lfb_size = 0 ;
screen_info . lfb_base = 0 ;
screen_info . orig_video_isVGA = 0 ;
} else {
2014-02-26 14:51:24 +04:00
pci_dev_put ( pdev ) ;
2020-10-14 12:24:29 +03:00
}
2014-02-26 14:51:24 +04:00
2013-04-30 02:05:42 +04:00
return 0 ;
err3 :
iounmap ( fb_virt ) ;
err2 :
2016-04-05 20:22:52 +03:00
vmbus_free_mmio ( par - > mem - > start , screen_fb_size ) ;
2015-08-05 10:52:37 +03:00
par - > mem = NULL ;
2013-04-30 02:05:42 +04:00
err1 :
2014-02-26 14:51:24 +04:00
if ( ! gen2vm )
pci_dev_put ( pdev ) ;
2013-04-30 02:05:42 +04:00
return - ENOMEM ;
}
/* Release the framebuffer */
2019-12-09 10:57:49 +03:00
static void hvfb_putmem ( struct hv_device * hdev , struct fb_info * info )
2013-04-30 02:05:42 +04:00
{
2014-02-26 14:51:24 +04:00
struct hvfb_par * par = info - > par ;
2019-12-09 10:57:49 +03:00
if ( par - > need_docopy ) {
vfree ( par - > dio_vp ) ;
iounmap ( info - > screen_base ) ;
vmbus_free_mmio ( par - > mem - > start , screen_fb_size ) ;
} else {
hvfb_release_phymem ( hdev , info - > fix . smem_start ,
screen_fb_size ) ;
}
2015-08-05 10:52:37 +03:00
par - > mem = NULL ;
2013-04-30 02:05:42 +04:00
}
static int hvfb_probe ( struct hv_device * hdev ,
const struct hv_vmbus_device_id * dev_id )
{
struct fb_info * info ;
struct hvfb_par * par ;
int ret ;
info = framebuffer_alloc ( sizeof ( struct hvfb_par ) , & hdev - > device ) ;
2019-06-28 13:30:08 +03:00
if ( ! info )
2013-04-30 02:05:42 +04:00
return - ENOMEM ;
par = info - > par ;
par - > info = info ;
par - > fb_ready = false ;
2019-12-09 10:57:49 +03:00
par - > need_docopy = true ;
2013-04-30 02:05:42 +04:00
init_completion ( & par - > wait ) ;
INIT_DELAYED_WORK ( & par - > dwork , hvfb_update_work ) ;
2019-09-18 09:03:20 +03:00
par - > delayed_refresh = false ;
spin_lock_init ( & par - > delayed_refresh_lock ) ;
par - > x1 = par - > y1 = INT_MAX ;
par - > x2 = par - > y2 = 0 ;
2013-04-30 02:05:42 +04:00
/* Connect to VSP */
hv_set_drvdata ( hdev , info ) ;
ret = synthvid_connect_vsp ( hdev ) ;
if ( ret ) {
pr_err ( " Unable to connect to VSP \n " ) ;
goto error1 ;
}
2019-09-05 12:11:53 +03:00
hvfb_get_option ( info ) ;
pr_info ( " Screen resolution: %dx%d, Color depth: %d \n " ,
screen_width , screen_height , screen_depth ) ;
2015-08-05 10:52:37 +03:00
ret = hvfb_getmem ( hdev , info ) ;
2013-04-30 02:05:42 +04:00
if ( ret ) {
pr_err ( " No memory for framebuffer \n " ) ;
goto error2 ;
}
/* Set up fb_info */
info - > flags = FBINFO_DEFAULT ;
info - > var . xres_virtual = info - > var . xres = screen_width ;
info - > var . yres_virtual = info - > var . yres = screen_height ;
info - > var . bits_per_pixel = screen_depth ;
if ( info - > var . bits_per_pixel = = 16 ) {
info - > var . red = ( struct fb_bitfield ) { 11 , 5 , 0 } ;
info - > var . green = ( struct fb_bitfield ) { 5 , 6 , 0 } ;
info - > var . blue = ( struct fb_bitfield ) { 0 , 5 , 0 } ;
info - > var . transp = ( struct fb_bitfield ) { 0 , 0 , 0 } ;
} else {
info - > var . red = ( struct fb_bitfield ) { 16 , 8 , 0 } ;
info - > var . green = ( struct fb_bitfield ) { 8 , 8 , 0 } ;
info - > var . blue = ( struct fb_bitfield ) { 0 , 8 , 0 } ;
info - > var . transp = ( struct fb_bitfield ) { 24 , 8 , 0 } ;
}
info - > var . activate = FB_ACTIVATE_NOW ;
info - > var . height = - 1 ;
info - > var . width = - 1 ;
info - > var . vmode = FB_VMODE_NONINTERLACED ;
strcpy ( info - > fix . id , KBUILD_MODNAME ) ;
info - > fix . type = FB_TYPE_PACKED_PIXELS ;
info - > fix . visual = FB_VISUAL_TRUECOLOR ;
info - > fix . line_length = screen_width * screen_depth / 8 ;
info - > fix . accel = FB_ACCEL_NONE ;
info - > fbops = & hvfb_ops ;
info - > pseudo_palette = par - > pseudo_palette ;
2019-09-18 09:03:20 +03:00
/* Initialize deferred IO */
info - > fbdefio = & synthvid_defio ;
fb_deferred_io_init ( info ) ;
2013-04-30 02:05:42 +04:00
/* Send config to host */
ret = synthvid_send_config ( hdev ) ;
if ( ret )
goto error ;
ret = register_framebuffer ( info ) ;
if ( ret ) {
pr_err ( " Unable to register framebuffer \n " ) ;
goto error ;
}
par - > fb_ready = true ;
2014-08-01 15:27:54 +04:00
par - > synchronous_fb = false ;
par - > hvfb_panic_nb . notifier_call = hvfb_on_panic ;
atomic_notifier_chain_register ( & panic_notifier_list ,
& par - > hvfb_panic_nb ) ;
2013-04-30 02:05:42 +04:00
return 0 ;
error :
2019-09-18 09:03:20 +03:00
fb_deferred_io_cleanup ( info ) ;
2019-12-09 10:57:49 +03:00
hvfb_putmem ( hdev , info ) ;
2013-04-30 02:05:42 +04:00
error2 :
vmbus_close ( hdev - > channel ) ;
error1 :
cancel_delayed_work_sync ( & par - > dwork ) ;
hv_set_drvdata ( hdev , NULL ) ;
framebuffer_release ( info ) ;
return ret ;
}
static int hvfb_remove ( struct hv_device * hdev )
{
struct fb_info * info = hv_get_drvdata ( hdev ) ;
struct hvfb_par * par = info - > par ;
2014-08-01 15:27:54 +04:00
atomic_notifier_chain_unregister ( & panic_notifier_list ,
& par - > hvfb_panic_nb ) ;
2013-04-30 02:05:42 +04:00
par - > update = false ;
par - > fb_ready = false ;
2019-09-18 09:03:20 +03:00
fb_deferred_io_cleanup ( info ) ;
2013-04-30 02:05:42 +04:00
unregister_framebuffer ( info ) ;
cancel_delayed_work_sync ( & par - > dwork ) ;
vmbus_close ( hdev - > channel ) ;
hv_set_drvdata ( hdev , NULL ) ;
2019-12-09 10:57:49 +03:00
hvfb_putmem ( hdev , info ) ;
2013-04-30 02:05:42 +04:00
framebuffer_release ( info ) ;
return 0 ;
}
2019-09-12 02:34:10 +03:00
static int hvfb_suspend ( struct hv_device * hdev )
{
struct fb_info * info = hv_get_drvdata ( hdev ) ;
struct hvfb_par * par = info - > par ;
console_lock ( ) ;
/* 1 means do suspend */
fb_set_suspend ( info , 1 ) ;
cancel_delayed_work_sync ( & par - > dwork ) ;
2020-01-07 01:41:51 +03:00
cancel_delayed_work_sync ( & info - > deferred_work ) ;
2019-09-12 02:34:10 +03:00
par - > update_saved = par - > update ;
par - > update = false ;
par - > fb_ready = false ;
vmbus_close ( hdev - > channel ) ;
console_unlock ( ) ;
return 0 ;
}
static int hvfb_resume ( struct hv_device * hdev )
{
struct fb_info * info = hv_get_drvdata ( hdev ) ;
struct hvfb_par * par = info - > par ;
int ret ;
console_lock ( ) ;
ret = synthvid_connect_vsp ( hdev ) ;
if ( ret ! = 0 )
goto out ;
ret = synthvid_send_config ( hdev ) ;
if ( ret ! = 0 ) {
vmbus_close ( hdev - > channel ) ;
goto out ;
}
par - > fb_ready = true ;
par - > update = par - > update_saved ;
2020-01-07 01:41:51 +03:00
schedule_delayed_work ( & info - > deferred_work , info - > fbdefio - > delay ) ;
2019-09-12 02:34:10 +03:00
schedule_delayed_work ( & par - > dwork , HVFB_UPDATE_DELAY ) ;
/* 0 means do resume */
fb_set_suspend ( info , 0 ) ;
out :
console_unlock ( ) ;
return ret ;
}
2013-04-30 02:05:42 +04:00
2014-08-08 17:56:03 +04:00
static const struct pci_device_id pci_stub_id_table [ ] = {
2013-10-02 15:55:10 +04:00
{
. vendor = PCI_VENDOR_ID_MICROSOFT ,
. device = PCI_DEVICE_ID_HYPERV_VIDEO ,
} ,
{ /* end of list */ }
} ;
2013-04-30 02:05:42 +04:00
static const struct hv_vmbus_device_id id_table [ ] = {
/* Synthetic Video Device GUID */
{ HV_SYNTHVID_GUID } ,
{ }
} ;
2013-10-02 15:55:10 +04:00
MODULE_DEVICE_TABLE ( pci , pci_stub_id_table ) ;
2013-04-30 02:05:42 +04:00
MODULE_DEVICE_TABLE ( vmbus , id_table ) ;
static struct hv_driver hvfb_drv = {
. name = KBUILD_MODNAME ,
. id_table = id_table ,
. probe = hvfb_probe ,
. remove = hvfb_remove ,
2019-09-12 02:34:10 +03:00
. suspend = hvfb_suspend ,
. resume = hvfb_resume ,
2018-06-05 23:37:49 +03:00
. driver = {
. probe_type = PROBE_PREFER_ASYNCHRONOUS ,
} ,
2013-04-30 02:05:42 +04:00
} ;
2013-10-02 15:55:10 +04:00
static int hvfb_pci_stub_probe ( struct pci_dev * pdev ,
const struct pci_device_id * ent )
{
return 0 ;
}
static void hvfb_pci_stub_remove ( struct pci_dev * pdev )
{
}
static struct pci_driver hvfb_pci_stub_driver = {
. name = KBUILD_MODNAME ,
. id_table = pci_stub_id_table ,
. probe = hvfb_pci_stub_probe ,
. remove = hvfb_pci_stub_remove ,
2018-06-05 23:37:49 +03:00
. driver = {
. probe_type = PROBE_PREFER_ASYNCHRONOUS ,
}
2013-10-02 15:55:10 +04:00
} ;
2013-04-30 02:05:42 +04:00
static int __init hvfb_drv_init ( void )
{
2013-10-02 15:55:10 +04:00
int ret ;
ret = vmbus_driver_register ( & hvfb_drv ) ;
if ( ret ! = 0 )
return ret ;
ret = pci_register_driver ( & hvfb_pci_stub_driver ) ;
if ( ret ! = 0 ) {
vmbus_driver_unregister ( & hvfb_drv ) ;
return ret ;
}
return 0 ;
2013-04-30 02:05:42 +04:00
}
static void __exit hvfb_drv_exit ( void )
{
2013-10-02 15:55:10 +04:00
pci_unregister_driver ( & hvfb_pci_stub_driver ) ;
2013-04-30 02:05:42 +04:00
vmbus_driver_unregister ( & hvfb_drv ) ;
}
module_init ( hvfb_drv_init ) ;
module_exit ( hvfb_drv_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Microsoft Hyper-V Synthetic Video Frame Buffer Driver " ) ;