2013-09-09 10:02:56 +10:00
/*
* Copyright ( C ) 2015 Red Hat , Inc .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*/
# ifndef VIRTIO_DRV_H
# define VIRTIO_DRV_H
# include <linux/virtio.h>
# include <linux/virtio_ids.h>
# include <linux/virtio_config.h>
# include <linux/virtio_gpu.h>
2016-05-31 08:50:47 +02:00
# include <drm/drm_atomic.h>
2016-11-28 20:51:09 +02:00
# include <drm/drm_encoder.h>
2018-09-19 07:09:53 +00:00
# include <drm/drm_fb_helper.h>
2019-06-30 08:19:16 +02:00
# include <drm/drm_gem.h>
# include <drm/drm_ioctl.h>
2019-01-17 22:03:34 +01:00
# include <drm/drm_probe_helper.h>
2017-04-24 13:50:34 +09:00
# include <drm/ttm/ttm_bo_api.h>
# include <drm/ttm/ttm_bo_driver.h>
# include <drm/ttm/ttm_module.h>
2019-06-30 08:19:16 +02:00
# include <drm/ttm/ttm_placement.h>
2013-09-09 10:02:56 +10:00
# define DRIVER_NAME "virtio_gpu"
# define DRIVER_DESC "virtio GPU"
# define DRIVER_DATE "0"
# define DRIVER_MAJOR 0
2018-11-12 17:51:57 +01:00
# define DRIVER_MINOR 1
# define DRIVER_PATCHLEVEL 0
2013-09-09 10:02:56 +10:00
2019-03-18 12:33:29 +01:00
struct virtio_gpu_object_params {
2019-03-18 12:33:30 +01:00
uint32_t format ;
uint32_t width ;
uint32_t height ;
2019-03-18 12:33:29 +01:00
unsigned long size ;
2019-03-18 12:33:32 +01:00
bool dumb ;
2019-03-18 12:33:31 +01:00
/* 3d */
2019-03-18 12:33:32 +01:00
bool virgl ;
2019-03-18 12:33:31 +01:00
uint32_t target ;
uint32_t bind ;
uint32_t depth ;
uint32_t array_size ;
uint32_t last_level ;
uint32_t nr_samples ;
uint32_t flags ;
2019-03-18 12:33:29 +01:00
} ;
2013-09-09 10:02:56 +10:00
struct virtio_gpu_object {
struct drm_gem_object gem_base ;
uint32_t hw_res_handle ;
struct sg_table * pages ;
2018-08-29 14:20:26 +02:00
uint32_t mapped ;
2013-09-09 10:02:56 +10:00
void * vmap ;
bool dumb ;
struct ttm_place placement_code ;
struct ttm_placement placement ;
struct ttm_buffer_object tbo ;
struct ttm_bo_kmap_obj kmap ;
2018-10-19 08:18:42 +02:00
bool created ;
2013-09-09 10:02:56 +10:00
} ;
# define gem_to_virtio_gpu_obj(gobj) \
container_of ( ( gobj ) , struct virtio_gpu_object , gem_base )
struct virtio_gpu_vbuffer ;
struct virtio_gpu_device ;
typedef void ( * virtio_gpu_resp_cb ) ( struct virtio_gpu_device * vgdev ,
struct virtio_gpu_vbuffer * vbuf ) ;
struct virtio_gpu_fence_driver {
atomic64_t last_seq ;
uint64_t sync_seq ;
2016-08-31 12:26:53 -04:00
uint64_t context ;
2013-09-09 10:02:56 +10:00
struct list_head fences ;
spinlock_t lock ;
} ;
struct virtio_gpu_fence {
2016-10-25 13:00:45 +01:00
struct dma_fence f ;
2013-09-09 10:02:56 +10:00
struct virtio_gpu_fence_driver * drv ;
struct list_head node ;
} ;
# define to_virtio_fence(x) \
container_of ( x , struct virtio_gpu_fence , f )
struct virtio_gpu_vbuffer {
char * buf ;
int size ;
void * data_buf ;
uint32_t data_size ;
char * resp_buf ;
int resp_size ;
virtio_gpu_resp_cb resp_cb ;
struct list_head list ;
} ;
struct virtio_gpu_output {
int index ;
struct drm_crtc crtc ;
struct drm_connector conn ;
struct drm_encoder enc ;
struct virtio_gpu_display_one info ;
struct virtio_gpu_update_cursor cursor ;
2018-10-30 07:32:06 +01:00
struct edid * edid ;
2013-09-09 10:02:56 +10:00
int cur_x ;
int cur_y ;
2018-08-13 17:28:55 +02:00
bool enabled ;
2013-09-09 10:02:56 +10:00
} ;
# define drm_crtc_to_virtio_gpu_output(x) \
container_of ( x , struct virtio_gpu_output , crtc )
# define drm_connector_to_virtio_gpu_output(x) \
container_of ( x , struct virtio_gpu_output , conn )
# define drm_encoder_to_virtio_gpu_output(x) \
container_of ( x , struct virtio_gpu_output , enc )
struct virtio_gpu_framebuffer {
struct drm_framebuffer base ;
2018-11-12 17:51:54 +01:00
struct virtio_gpu_fence * fence ;
2013-09-09 10:02:56 +10:00
} ;
# define to_virtio_gpu_framebuffer(x) \
container_of ( x , struct virtio_gpu_framebuffer , base )
struct virtio_gpu_mman {
struct ttm_bo_device bdev ;
} ;
struct virtio_gpu_queue {
struct virtqueue * vq ;
spinlock_t qlock ;
wait_queue_head_t ack_queue ;
struct work_struct dequeue_work ;
} ;
2014-10-28 12:48:00 +01:00
struct virtio_gpu_drv_capset {
uint32_t id ;
uint32_t max_version ;
uint32_t max_size ;
} ;
struct virtio_gpu_drv_cap_cache {
struct list_head head ;
void * caps_cache ;
uint32_t id ;
uint32_t version ;
uint32_t size ;
atomic_t is_valid ;
} ;
2013-09-09 10:02:56 +10:00
struct virtio_gpu_device {
struct device * dev ;
struct drm_device * ddev ;
struct virtio_device * vdev ;
struct virtio_gpu_mman mman ;
struct virtio_gpu_output outputs [ VIRTIO_GPU_MAX_SCANOUTS ] ;
uint32_t num_scanouts ;
struct virtio_gpu_queue ctrlq ;
struct virtio_gpu_queue cursorq ;
2017-03-01 15:09:08 +01:00
struct kmem_cache * vbufs ;
2013-09-09 10:02:56 +10:00
bool vqs_ready ;
2018-09-26 09:00:28 -07:00
struct ida resource_ida ;
2013-09-09 10:02:56 +10:00
wait_queue_head_t resp_wq ;
/* current display info */
spinlock_t display_info_lock ;
2015-06-16 14:25:34 +10:00
bool display_info_pending ;
2013-09-09 10:02:56 +10:00
struct virtio_gpu_fence_driver fence_drv ;
2018-09-26 09:00:28 -07:00
struct ida ctx_id_ida ;
2013-09-09 10:02:56 +10:00
2014-10-28 12:48:00 +01:00
bool has_virgl_3d ;
2018-10-30 07:32:06 +01:00
bool has_edid ;
2014-10-28 12:48:00 +01:00
2013-09-09 10:02:56 +10:00
struct work_struct config_changed_work ;
2014-10-28 12:48:00 +01:00
struct virtio_gpu_drv_capset * capsets ;
uint32_t num_capsets ;
struct list_head cap_cache ;
2013-09-09 10:02:56 +10:00
} ;
struct virtio_gpu_fpriv {
uint32_t ctx_id ;
} ;
/* virtio_ioctl.c */
# define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls [ DRM_VIRTIO_NUM_IOCTLS ] ;
2019-03-18 12:33:32 +01:00
int virtio_gpu_object_list_validate ( struct ww_acquire_ctx * ticket ,
struct list_head * head ) ;
void virtio_gpu_unref_list ( struct list_head * head ) ;
2013-09-09 10:02:56 +10:00
/* virtio_kms.c */
2019-01-08 11:59:30 -03:00
int virtio_gpu_init ( struct drm_device * dev ) ;
void virtio_gpu_deinit ( struct drm_device * dev ) ;
2014-10-28 12:48:00 +01:00
int virtio_gpu_driver_open ( struct drm_device * dev , struct drm_file * file ) ;
void virtio_gpu_driver_postclose ( struct drm_device * dev , struct drm_file * file ) ;
2013-09-09 10:02:56 +10:00
/* virtio_gem.c */
void virtio_gpu_gem_free_object ( struct drm_gem_object * gem_obj ) ;
int virtio_gpu_gem_init ( struct virtio_gpu_device * vgdev ) ;
void virtio_gpu_gem_fini ( struct virtio_gpu_device * vgdev ) ;
int virtio_gpu_gem_create ( struct drm_file * file ,
struct drm_device * dev ,
2019-03-18 12:33:29 +01:00
struct virtio_gpu_object_params * params ,
2013-09-09 10:02:56 +10:00
struct drm_gem_object * * obj_p ,
uint32_t * handle_p ) ;
2014-10-28 12:48:00 +01:00
int virtio_gpu_gem_object_open ( struct drm_gem_object * obj ,
struct drm_file * file ) ;
void virtio_gpu_gem_object_close ( struct drm_gem_object * obj ,
struct drm_file * file ) ;
2019-03-18 12:33:29 +01:00
struct virtio_gpu_object *
virtio_gpu_alloc_object ( struct drm_device * dev ,
2019-03-18 12:33:32 +01:00
struct virtio_gpu_object_params * params ,
struct virtio_gpu_fence * fence ) ;
2013-09-09 10:02:56 +10:00
int virtio_gpu_mode_dumb_create ( struct drm_file * file_priv ,
struct drm_device * dev ,
struct drm_mode_create_dumb * args ) ;
int virtio_gpu_mode_dumb_mmap ( struct drm_file * file_priv ,
struct drm_device * dev ,
uint32_t handle , uint64_t * offset_p ) ;
/* virtio vg */
int virtio_gpu_alloc_vbufs ( struct virtio_gpu_device * vgdev ) ;
void virtio_gpu_free_vbufs ( struct virtio_gpu_device * vgdev ) ;
void virtio_gpu_cmd_create_resource ( struct virtio_gpu_device * vgdev ,
2018-10-19 08:18:42 +02:00
struct virtio_gpu_object * bo ,
2019-03-18 12:33:32 +01:00
struct virtio_gpu_object_params * params ,
struct virtio_gpu_fence * fence ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_cmd_unref_resource ( struct virtio_gpu_device * vgdev ,
uint32_t resource_id ) ;
void virtio_gpu_cmd_transfer_to_host_2d ( struct virtio_gpu_device * vgdev ,
2018-09-20 08:29:23 +02:00
struct virtio_gpu_object * bo ,
uint64_t offset ,
2013-09-09 10:02:56 +10:00
__le32 width , __le32 height ,
__le32 x , __le32 y ,
2018-11-28 16:10:20 +01:00
struct virtio_gpu_fence * fence ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_cmd_resource_flush ( struct virtio_gpu_device * vgdev ,
uint32_t resource_id ,
uint32_t x , uint32_t y ,
uint32_t width , uint32_t height ) ;
void virtio_gpu_cmd_set_scanout ( struct virtio_gpu_device * vgdev ,
uint32_t scanout_id , uint32_t resource_id ,
uint32_t width , uint32_t height ,
uint32_t x , uint32_t y ) ;
int virtio_gpu_object_attach ( struct virtio_gpu_device * vgdev ,
struct virtio_gpu_object * obj ,
2018-11-28 16:10:20 +01:00
struct virtio_gpu_fence * fence ) ;
2018-08-29 14:20:25 +02:00
void virtio_gpu_object_detach ( struct virtio_gpu_device * vgdev ,
struct virtio_gpu_object * obj ) ;
2013-09-09 10:02:56 +10:00
int virtio_gpu_attach_status_page ( struct virtio_gpu_device * vgdev ) ;
int virtio_gpu_detach_status_page ( struct virtio_gpu_device * vgdev ) ;
void virtio_gpu_cursor_ping ( struct virtio_gpu_device * vgdev ,
struct virtio_gpu_output * output ) ;
int virtio_gpu_cmd_get_display_info ( struct virtio_gpu_device * vgdev ) ;
2014-10-28 12:48:00 +01:00
int virtio_gpu_cmd_get_capset_info ( struct virtio_gpu_device * vgdev , int idx ) ;
int virtio_gpu_cmd_get_capset ( struct virtio_gpu_device * vgdev ,
int idx , int version ,
struct virtio_gpu_drv_cap_cache * * cache_p ) ;
2018-10-30 07:32:06 +01:00
int virtio_gpu_cmd_get_edids ( struct virtio_gpu_device * vgdev ) ;
2014-10-28 12:48:00 +01:00
void virtio_gpu_cmd_context_create ( struct virtio_gpu_device * vgdev , uint32_t id ,
uint32_t nlen , const char * name ) ;
void virtio_gpu_cmd_context_destroy ( struct virtio_gpu_device * vgdev ,
uint32_t id ) ;
void virtio_gpu_cmd_context_attach_resource ( struct virtio_gpu_device * vgdev ,
uint32_t ctx_id ,
uint32_t resource_id ) ;
void virtio_gpu_cmd_context_detach_resource ( struct virtio_gpu_device * vgdev ,
uint32_t ctx_id ,
uint32_t resource_id ) ;
void virtio_gpu_cmd_submit ( struct virtio_gpu_device * vgdev ,
void * data , uint32_t data_size ,
2018-11-28 16:10:20 +01:00
uint32_t ctx_id , struct virtio_gpu_fence * fence ) ;
2014-10-28 12:48:00 +01:00
void virtio_gpu_cmd_transfer_from_host_3d ( struct virtio_gpu_device * vgdev ,
uint32_t resource_id , uint32_t ctx_id ,
uint64_t offset , uint32_t level ,
struct virtio_gpu_box * box ,
2018-11-28 16:10:20 +01:00
struct virtio_gpu_fence * fence ) ;
2014-10-28 12:48:00 +01:00
void virtio_gpu_cmd_transfer_to_host_3d ( struct virtio_gpu_device * vgdev ,
2018-09-20 08:29:23 +02:00
struct virtio_gpu_object * bo ,
uint32_t ctx_id ,
2014-10-28 12:48:00 +01:00
uint64_t offset , uint32_t level ,
struct virtio_gpu_box * box ,
2018-11-28 16:10:20 +01:00
struct virtio_gpu_fence * fence ) ;
2014-10-28 12:48:00 +01:00
void
virtio_gpu_cmd_resource_create_3d ( struct virtio_gpu_device * vgdev ,
2018-10-19 08:18:42 +02:00
struct virtio_gpu_object * bo ,
2019-03-18 12:33:32 +01:00
struct virtio_gpu_object_params * params ,
struct virtio_gpu_fence * fence ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_ctrl_ack ( struct virtqueue * vq ) ;
void virtio_gpu_cursor_ack ( struct virtqueue * vq ) ;
2014-10-28 12:48:00 +01:00
void virtio_gpu_fence_ack ( struct virtqueue * vq ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_dequeue_ctrl_func ( struct work_struct * work ) ;
void virtio_gpu_dequeue_cursor_func ( struct work_struct * work ) ;
2014-10-28 12:48:00 +01:00
void virtio_gpu_dequeue_fence_func ( struct work_struct * work ) ;
2013-09-09 10:02:56 +10:00
/* virtio_gpu_display.c */
int virtio_gpu_framebuffer_init ( struct drm_device * dev ,
struct virtio_gpu_framebuffer * vgfb ,
2015-11-11 19:11:29 +02:00
const struct drm_mode_fb_cmd2 * mode_cmd ,
2013-09-09 10:02:56 +10:00
struct drm_gem_object * obj ) ;
2019-01-08 11:59:30 -03:00
void virtio_gpu_modeset_init ( struct virtio_gpu_device * vgdev ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_modeset_fini ( struct virtio_gpu_device * vgdev ) ;
/* virtio_gpu_plane.c */
2017-04-03 09:08:44 +02:00
uint32_t virtio_gpu_translate_format ( uint32_t drm_fourcc ) ;
2013-09-09 10:02:56 +10:00
struct drm_plane * virtio_gpu_plane_init ( struct virtio_gpu_device * vgdev ,
2016-05-26 11:42:52 +02:00
enum drm_plane_type type ,
2013-09-09 10:02:56 +10:00
int index ) ;
/* virtio_gpu_ttm.c */
int virtio_gpu_ttm_init ( struct virtio_gpu_device * vgdev ) ;
void virtio_gpu_ttm_fini ( struct virtio_gpu_device * vgdev ) ;
int virtio_gpu_mmap ( struct file * filp , struct vm_area_struct * vma ) ;
/* virtio_gpu_fence.c */
2019-03-18 12:33:32 +01:00
bool virtio_fence_signaled ( struct dma_fence * f ) ;
2018-11-12 17:51:54 +01:00
struct virtio_gpu_fence * virtio_gpu_fence_alloc (
struct virtio_gpu_device * vgdev ) ;
2019-05-06 11:10:34 +02:00
void virtio_gpu_fence_emit ( struct virtio_gpu_device * vgdev ,
2013-09-09 10:02:56 +10:00
struct virtio_gpu_ctrl_hdr * cmd_hdr ,
2018-11-28 16:10:20 +01:00
struct virtio_gpu_fence * fence ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_fence_event_process ( struct virtio_gpu_device * vdev ,
u64 last_seq ) ;
/* virtio_gpu_object */
int virtio_gpu_object_create ( struct virtio_gpu_device * vgdev ,
2019-03-18 12:33:29 +01:00
struct virtio_gpu_object_params * params ,
2019-03-18 12:33:32 +01:00
struct virtio_gpu_object * * bo_ptr ,
struct virtio_gpu_fence * fence ) ;
2018-09-25 18:16:04 +02:00
void virtio_gpu_object_kunmap ( struct virtio_gpu_object * bo ) ;
2018-09-25 18:16:05 +02:00
int virtio_gpu_object_kmap ( struct virtio_gpu_object * bo ) ;
2013-09-09 10:02:56 +10:00
int virtio_gpu_object_get_sg_table ( struct virtio_gpu_device * qdev ,
struct virtio_gpu_object * bo ) ;
void virtio_gpu_object_free_sg_table ( struct virtio_gpu_object * bo ) ;
2015-01-23 13:04:11 +10:00
/* virtgpu_prime.c */
2019-02-27 15:44:41 +01:00
struct sg_table * virtgpu_gem_prime_get_sg_table ( struct drm_gem_object * obj ) ;
2019-04-24 10:52:20 +10:00
struct drm_gem_object * virtgpu_gem_prime_import_sg_table (
struct drm_device * dev , struct dma_buf_attachment * attach ,
struct sg_table * sgt ) ;
2015-01-23 13:04:11 +10:00
void * virtgpu_gem_prime_vmap ( struct drm_gem_object * obj ) ;
void virtgpu_gem_prime_vunmap ( struct drm_gem_object * obj , void * vaddr ) ;
int virtgpu_gem_prime_mmap ( struct drm_gem_object * obj ,
2018-02-22 20:59:47 -03:00
struct vm_area_struct * vma ) ;
2015-01-23 13:04:11 +10:00
2013-09-09 10:02:56 +10:00
static inline struct virtio_gpu_object *
virtio_gpu_object_ref ( struct virtio_gpu_object * bo )
{
2018-07-31 08:21:26 +02:00
ttm_bo_get ( & bo - > tbo ) ;
2013-09-09 10:02:56 +10:00
return bo ;
}
static inline void virtio_gpu_object_unref ( struct virtio_gpu_object * * bo )
{
struct ttm_buffer_object * tbo ;
if ( ( * bo ) = = NULL )
return ;
tbo = & ( ( * bo ) - > tbo ) ;
2018-07-31 08:21:27 +02:00
ttm_bo_put ( tbo ) ;
* bo = NULL ;
2013-09-09 10:02:56 +10:00
}
static inline u64 virtio_gpu_object_mmap_offset ( struct virtio_gpu_object * bo )
{
2019-08-05 16:01:10 +02:00
return drm_vma_node_offset_addr ( & bo - > tbo . base . vma_node ) ;
2013-09-09 10:02:56 +10:00
}
static inline int virtio_gpu_object_reserve ( struct virtio_gpu_object * bo ,
bool no_wait )
{
int r ;
2016-04-06 11:12:03 +02:00
r = ttm_bo_reserve ( & bo - > tbo , true , no_wait , NULL ) ;
2013-09-09 10:02:56 +10:00
if ( unlikely ( r ! = 0 ) ) {
if ( r ! = - ERESTARTSYS ) {
struct virtio_gpu_device * qdev =
bo - > gem_base . dev - > dev_private ;
dev_err ( qdev - > dev , " %p reserve failed \n " , bo ) ;
}
return r ;
}
return 0 ;
}
static inline void virtio_gpu_object_unreserve ( struct virtio_gpu_object * bo )
{
ttm_bo_unreserve ( & bo - > tbo ) ;
}
/* virgl debufs */
int virtio_gpu_debugfs_init ( struct drm_minor * minor ) ;
# endif