2013-09-09 10:02:56 +10:00
/*
* Copyright ( C ) 2015 Red Hat , Inc .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*/
# ifndef VIRTIO_DRV_H
# define VIRTIO_DRV_H
# include <linux/virtio.h>
# include <linux/virtio_ids.h>
# include <linux/virtio_config.h>
# include <linux/virtio_gpu.h>
2016-05-31 08:50:47 +02:00
# include <drm/drm_atomic.h>
2020-02-11 14:58:04 +01:00
# include <drm/drm_drv.h>
2016-11-28 20:51:09 +02:00
# include <drm/drm_encoder.h>
2018-09-19 07:09:53 +00:00
# include <drm/drm_fb_helper.h>
2019-06-30 08:19:16 +02:00
# include <drm/drm_gem.h>
2019-08-29 12:32:57 +02:00
# include <drm/drm_gem_shmem_helper.h>
2019-06-30 08:19:16 +02:00
# include <drm/drm_ioctl.h>
2019-01-17 22:03:34 +01:00
# include <drm/drm_probe_helper.h>
2019-10-23 08:25:37 +02:00
# include <drm/virtgpu_drm.h>
2013-09-09 10:02:56 +10:00
# define DRIVER_NAME "virtio_gpu"
# define DRIVER_DESC "virtio GPU"
# define DRIVER_DATE "0"
# define DRIVER_MAJOR 0
2018-11-12 17:51:57 +01:00
# define DRIVER_MINOR 1
# define DRIVER_PATCHLEVEL 0
2013-09-09 10:02:56 +10:00
2019-03-18 12:33:29 +01:00
struct virtio_gpu_object_params {
2019-03-18 12:33:30 +01:00
uint32_t format ;
uint32_t width ;
uint32_t height ;
2019-03-18 12:33:29 +01:00
unsigned long size ;
2019-03-18 12:33:32 +01:00
bool dumb ;
2019-03-18 12:33:31 +01:00
/* 3d */
2019-03-18 12:33:32 +01:00
bool virgl ;
2019-03-18 12:33:31 +01:00
uint32_t target ;
uint32_t bind ;
uint32_t depth ;
uint32_t array_size ;
uint32_t last_level ;
uint32_t nr_samples ;
uint32_t flags ;
2019-03-18 12:33:29 +01:00
} ;
2013-09-09 10:02:56 +10:00
struct virtio_gpu_object {
2019-08-29 12:32:57 +02:00
struct drm_gem_shmem_object base ;
2013-09-09 10:02:56 +10:00
uint32_t hw_res_handle ;
struct sg_table * pages ;
2018-08-29 14:20:26 +02:00
uint32_t mapped ;
2020-02-07 08:46:38 +01:00
2013-09-09 10:02:56 +10:00
bool dumb ;
2018-10-19 08:18:42 +02:00
bool created ;
2013-09-09 10:02:56 +10:00
} ;
# define gem_to_virtio_gpu_obj(gobj) \
2019-08-29 12:32:57 +02:00
container_of ( ( gobj ) , struct virtio_gpu_object , base . base )
2013-09-09 10:02:56 +10:00
2019-08-29 12:32:50 +02:00
struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket ;
2019-08-30 08:01:16 +02:00
struct list_head next ;
2019-08-29 12:32:50 +02:00
u32 nents , total ;
struct drm_gem_object * objs [ ] ;
} ;
2013-09-09 10:02:56 +10:00
struct virtio_gpu_vbuffer ;
struct virtio_gpu_device ;
typedef void ( * virtio_gpu_resp_cb ) ( struct virtio_gpu_device * vgdev ,
struct virtio_gpu_vbuffer * vbuf ) ;
struct virtio_gpu_fence_driver {
atomic64_t last_seq ;
uint64_t sync_seq ;
2016-08-31 12:26:53 -04:00
uint64_t context ;
2013-09-09 10:02:56 +10:00
struct list_head fences ;
spinlock_t lock ;
} ;
struct virtio_gpu_fence {
2016-10-25 13:00:45 +01:00
struct dma_fence f ;
2013-09-09 10:02:56 +10:00
struct virtio_gpu_fence_driver * drv ;
struct list_head node ;
} ;
struct virtio_gpu_vbuffer {
char * buf ;
int size ;
void * data_buf ;
uint32_t data_size ;
char * resp_buf ;
int resp_size ;
virtio_gpu_resp_cb resp_cb ;
2020-02-07 08:46:36 +01:00
void * resp_cb_data ;
2013-09-09 10:02:56 +10:00
2019-08-29 12:32:51 +02:00
struct virtio_gpu_object_array * objs ;
2013-09-09 10:02:56 +10:00
struct list_head list ;
} ;
struct virtio_gpu_output {
int index ;
struct drm_crtc crtc ;
struct drm_connector conn ;
struct drm_encoder enc ;
struct virtio_gpu_display_one info ;
struct virtio_gpu_update_cursor cursor ;
2018-10-30 07:32:06 +01:00
struct edid * edid ;
2013-09-09 10:02:56 +10:00
int cur_x ;
int cur_y ;
2018-08-13 17:28:55 +02:00
bool enabled ;
2013-09-09 10:02:56 +10:00
} ;
# define drm_crtc_to_virtio_gpu_output(x) \
container_of ( x , struct virtio_gpu_output , crtc )
struct virtio_gpu_framebuffer {
struct drm_framebuffer base ;
2018-11-12 17:51:54 +01:00
struct virtio_gpu_fence * fence ;
2013-09-09 10:02:56 +10:00
} ;
# define to_virtio_gpu_framebuffer(x) \
container_of ( x , struct virtio_gpu_framebuffer , base )
struct virtio_gpu_queue {
struct virtqueue * vq ;
spinlock_t qlock ;
wait_queue_head_t ack_queue ;
struct work_struct dequeue_work ;
} ;
2014-10-28 12:48:00 +01:00
struct virtio_gpu_drv_capset {
uint32_t id ;
uint32_t max_version ;
uint32_t max_size ;
} ;
struct virtio_gpu_drv_cap_cache {
struct list_head head ;
void * caps_cache ;
uint32_t id ;
uint32_t version ;
uint32_t size ;
atomic_t is_valid ;
} ;
2013-09-09 10:02:56 +10:00
struct virtio_gpu_device {
struct device * dev ;
struct drm_device * ddev ;
struct virtio_device * vdev ;
struct virtio_gpu_output outputs [ VIRTIO_GPU_MAX_SCANOUTS ] ;
uint32_t num_scanouts ;
struct virtio_gpu_queue ctrlq ;
struct virtio_gpu_queue cursorq ;
2017-03-01 15:09:08 +01:00
struct kmem_cache * vbufs ;
2013-09-09 10:02:56 +10:00
2019-12-12 13:53:45 +01:00
bool disable_notify ;
bool pending_notify ;
2018-09-26 09:00:28 -07:00
struct ida resource_ida ;
2013-09-09 10:02:56 +10:00
wait_queue_head_t resp_wq ;
/* current display info */
spinlock_t display_info_lock ;
2015-06-16 14:25:34 +10:00
bool display_info_pending ;
2013-09-09 10:02:56 +10:00
struct virtio_gpu_fence_driver fence_drv ;
2018-09-26 09:00:28 -07:00
struct ida ctx_id_ida ;
2013-09-09 10:02:56 +10:00
2014-10-28 12:48:00 +01:00
bool has_virgl_3d ;
2018-10-30 07:32:06 +01:00
bool has_edid ;
2020-02-07 07:46:53 +01:00
bool has_indirect ;
2014-10-28 12:48:00 +01:00
2013-09-09 10:02:56 +10:00
struct work_struct config_changed_work ;
2014-10-28 12:48:00 +01:00
2019-08-30 08:01:16 +02:00
struct work_struct obj_free_work ;
spinlock_t obj_free_lock ;
struct list_head obj_free_list ;
2014-10-28 12:48:00 +01:00
struct virtio_gpu_drv_capset * capsets ;
uint32_t num_capsets ;
struct list_head cap_cache ;
2013-09-09 10:02:56 +10:00
} ;
struct virtio_gpu_fpriv {
uint32_t ctx_id ;
} ;
/* virtio_ioctl.c */
# define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls [ DRM_VIRTIO_NUM_IOCTLS ] ;
/* virtio_kms.c */
2019-01-08 11:59:30 -03:00
int virtio_gpu_init ( struct drm_device * dev ) ;
void virtio_gpu_deinit ( struct drm_device * dev ) ;
2020-02-11 14:58:04 +01:00
void virtio_gpu_release ( struct drm_device * dev ) ;
2014-10-28 12:48:00 +01:00
int virtio_gpu_driver_open ( struct drm_device * dev , struct drm_file * file ) ;
void virtio_gpu_driver_postclose ( struct drm_device * dev , struct drm_file * file ) ;
2013-09-09 10:02:56 +10:00
/* virtio_gem.c */
void virtio_gpu_gem_free_object ( struct drm_gem_object * gem_obj ) ;
int virtio_gpu_gem_init ( struct virtio_gpu_device * vgdev ) ;
void virtio_gpu_gem_fini ( struct virtio_gpu_device * vgdev ) ;
int virtio_gpu_gem_create ( struct drm_file * file ,
struct drm_device * dev ,
2019-03-18 12:33:29 +01:00
struct virtio_gpu_object_params * params ,
2013-09-09 10:02:56 +10:00
struct drm_gem_object * * obj_p ,
uint32_t * handle_p ) ;
2014-10-28 12:48:00 +01:00
int virtio_gpu_gem_object_open ( struct drm_gem_object * obj ,
struct drm_file * file ) ;
void virtio_gpu_gem_object_close ( struct drm_gem_object * obj ,
struct drm_file * file ) ;
2013-09-09 10:02:56 +10:00
int virtio_gpu_mode_dumb_create ( struct drm_file * file_priv ,
struct drm_device * dev ,
struct drm_mode_create_dumb * args ) ;
int virtio_gpu_mode_dumb_mmap ( struct drm_file * file_priv ,
struct drm_device * dev ,
uint32_t handle , uint64_t * offset_p ) ;
2019-08-29 12:32:50 +02:00
struct virtio_gpu_object_array * virtio_gpu_array_alloc ( u32 nents ) ;
struct virtio_gpu_object_array *
virtio_gpu_array_from_handles ( struct drm_file * drm_file , u32 * handles , u32 nents ) ;
void virtio_gpu_array_add_obj ( struct virtio_gpu_object_array * objs ,
struct drm_gem_object * obj ) ;
int virtio_gpu_array_lock_resv ( struct virtio_gpu_object_array * objs ) ;
void virtio_gpu_array_unlock_resv ( struct virtio_gpu_object_array * objs ) ;
void virtio_gpu_array_add_fence ( struct virtio_gpu_object_array * objs ,
struct dma_fence * fence ) ;
void virtio_gpu_array_put_free ( struct virtio_gpu_object_array * objs ) ;
2019-08-30 08:01:16 +02:00
void virtio_gpu_array_put_free_delayed ( struct virtio_gpu_device * vgdev ,
struct virtio_gpu_object_array * objs ) ;
void virtio_gpu_array_put_free_work ( struct work_struct * work ) ;
2019-08-29 12:32:50 +02:00
2013-09-09 10:02:56 +10:00
/* virtio vg */
int virtio_gpu_alloc_vbufs ( struct virtio_gpu_device * vgdev ) ;
void virtio_gpu_free_vbufs ( struct virtio_gpu_device * vgdev ) ;
void virtio_gpu_cmd_create_resource ( struct virtio_gpu_device * vgdev ,
2018-10-19 08:18:42 +02:00
struct virtio_gpu_object * bo ,
2019-03-18 12:33:32 +01:00
struct virtio_gpu_object_params * params ,
2019-08-29 12:32:52 +02:00
struct virtio_gpu_object_array * objs ,
2019-03-18 12:33:32 +01:00
struct virtio_gpu_fence * fence ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_cmd_unref_resource ( struct virtio_gpu_device * vgdev ,
2020-02-07 08:46:36 +01:00
struct virtio_gpu_object * bo ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_cmd_transfer_to_host_2d ( struct virtio_gpu_device * vgdev ,
2018-09-20 08:29:23 +02:00
uint64_t offset ,
2019-10-18 14:23:52 +02:00
uint32_t width , uint32_t height ,
uint32_t x , uint32_t y ,
2019-08-29 12:32:54 +02:00
struct virtio_gpu_object_array * objs ,
2018-11-28 16:10:20 +01:00
struct virtio_gpu_fence * fence ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_cmd_resource_flush ( struct virtio_gpu_device * vgdev ,
uint32_t resource_id ,
uint32_t x , uint32_t y ,
uint32_t width , uint32_t height ) ;
void virtio_gpu_cmd_set_scanout ( struct virtio_gpu_device * vgdev ,
uint32_t scanout_id , uint32_t resource_id ,
uint32_t width , uint32_t height ,
uint32_t x , uint32_t y ) ;
int virtio_gpu_object_attach ( struct virtio_gpu_device * vgdev ,
struct virtio_gpu_object * obj ,
2020-02-07 08:46:38 +01:00
struct virtio_gpu_mem_entry * ents ,
unsigned int nents ) ;
2013-09-09 10:02:56 +10:00
int virtio_gpu_attach_status_page ( struct virtio_gpu_device * vgdev ) ;
int virtio_gpu_detach_status_page ( struct virtio_gpu_device * vgdev ) ;
void virtio_gpu_cursor_ping ( struct virtio_gpu_device * vgdev ,
struct virtio_gpu_output * output ) ;
int virtio_gpu_cmd_get_display_info ( struct virtio_gpu_device * vgdev ) ;
2014-10-28 12:48:00 +01:00
int virtio_gpu_cmd_get_capset_info ( struct virtio_gpu_device * vgdev , int idx ) ;
int virtio_gpu_cmd_get_capset ( struct virtio_gpu_device * vgdev ,
int idx , int version ,
struct virtio_gpu_drv_cap_cache * * cache_p ) ;
2018-10-30 07:32:06 +01:00
int virtio_gpu_cmd_get_edids ( struct virtio_gpu_device * vgdev ) ;
2014-10-28 12:48:00 +01:00
void virtio_gpu_cmd_context_create ( struct virtio_gpu_device * vgdev , uint32_t id ,
uint32_t nlen , const char * name ) ;
void virtio_gpu_cmd_context_destroy ( struct virtio_gpu_device * vgdev ,
uint32_t id ) ;
void virtio_gpu_cmd_context_attach_resource ( struct virtio_gpu_device * vgdev ,
uint32_t ctx_id ,
2019-08-29 12:32:55 +02:00
struct virtio_gpu_object_array * objs ) ;
2014-10-28 12:48:00 +01:00
void virtio_gpu_cmd_context_detach_resource ( struct virtio_gpu_device * vgdev ,
uint32_t ctx_id ,
2019-08-29 12:32:55 +02:00
struct virtio_gpu_object_array * objs ) ;
2014-10-28 12:48:00 +01:00
void virtio_gpu_cmd_submit ( struct virtio_gpu_device * vgdev ,
void * data , uint32_t data_size ,
2019-08-29 12:32:51 +02:00
uint32_t ctx_id ,
struct virtio_gpu_object_array * objs ,
struct virtio_gpu_fence * fence ) ;
2014-10-28 12:48:00 +01:00
void virtio_gpu_cmd_transfer_from_host_3d ( struct virtio_gpu_device * vgdev ,
2019-08-29 12:32:53 +02:00
uint32_t ctx_id ,
2014-10-28 12:48:00 +01:00
uint64_t offset , uint32_t level ,
2019-10-23 08:25:37 +02:00
struct drm_virtgpu_3d_box * box ,
2019-08-29 12:32:53 +02:00
struct virtio_gpu_object_array * objs ,
2018-11-28 16:10:20 +01:00
struct virtio_gpu_fence * fence ) ;
2014-10-28 12:48:00 +01:00
void virtio_gpu_cmd_transfer_to_host_3d ( struct virtio_gpu_device * vgdev ,
2018-09-20 08:29:23 +02:00
uint32_t ctx_id ,
2014-10-28 12:48:00 +01:00
uint64_t offset , uint32_t level ,
2019-10-23 08:25:37 +02:00
struct drm_virtgpu_3d_box * box ,
2019-08-29 12:32:54 +02:00
struct virtio_gpu_object_array * objs ,
2018-11-28 16:10:20 +01:00
struct virtio_gpu_fence * fence ) ;
2014-10-28 12:48:00 +01:00
void
virtio_gpu_cmd_resource_create_3d ( struct virtio_gpu_device * vgdev ,
2018-10-19 08:18:42 +02:00
struct virtio_gpu_object * bo ,
2019-03-18 12:33:32 +01:00
struct virtio_gpu_object_params * params ,
2019-08-29 12:32:52 +02:00
struct virtio_gpu_object_array * objs ,
2019-03-18 12:33:32 +01:00
struct virtio_gpu_fence * fence ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_ctrl_ack ( struct virtqueue * vq ) ;
void virtio_gpu_cursor_ack ( struct virtqueue * vq ) ;
2014-10-28 12:48:00 +01:00
void virtio_gpu_fence_ack ( struct virtqueue * vq ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_dequeue_ctrl_func ( struct work_struct * work ) ;
void virtio_gpu_dequeue_cursor_func ( struct work_struct * work ) ;
2014-10-28 12:48:00 +01:00
void virtio_gpu_dequeue_fence_func ( struct work_struct * work ) ;
2013-09-09 10:02:56 +10:00
2019-12-12 13:53:45 +01:00
void virtio_gpu_disable_notify ( struct virtio_gpu_device * vgdev ) ;
void virtio_gpu_enable_notify ( struct virtio_gpu_device * vgdev ) ;
2013-09-09 10:02:56 +10:00
/* virtio_gpu_display.c */
2019-01-08 11:59:30 -03:00
void virtio_gpu_modeset_init ( struct virtio_gpu_device * vgdev ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_modeset_fini ( struct virtio_gpu_device * vgdev ) ;
/* virtio_gpu_plane.c */
2017-04-03 09:08:44 +02:00
uint32_t virtio_gpu_translate_format ( uint32_t drm_fourcc ) ;
2013-09-09 10:02:56 +10:00
struct drm_plane * virtio_gpu_plane_init ( struct virtio_gpu_device * vgdev ,
2016-05-26 11:42:52 +02:00
enum drm_plane_type type ,
2013-09-09 10:02:56 +10:00
int index ) ;
/* virtio_gpu_fence.c */
2018-11-12 17:51:54 +01:00
struct virtio_gpu_fence * virtio_gpu_fence_alloc (
struct virtio_gpu_device * vgdev ) ;
2019-05-06 11:10:34 +02:00
void virtio_gpu_fence_emit ( struct virtio_gpu_device * vgdev ,
2013-09-09 10:02:56 +10:00
struct virtio_gpu_ctrl_hdr * cmd_hdr ,
2018-11-28 16:10:20 +01:00
struct virtio_gpu_fence * fence ) ;
2013-09-09 10:02:56 +10:00
void virtio_gpu_fence_event_process ( struct virtio_gpu_device * vdev ,
u64 last_seq ) ;
/* virtio_gpu_object */
2020-02-07 08:46:36 +01:00
void virtio_gpu_cleanup_object ( struct virtio_gpu_object * bo ) ;
2019-08-29 12:32:57 +02:00
struct drm_gem_object * virtio_gpu_create_object ( struct drm_device * dev ,
size_t size ) ;
2013-09-09 10:02:56 +10:00
int virtio_gpu_object_create ( struct virtio_gpu_device * vgdev ,
2019-03-18 12:33:29 +01:00
struct virtio_gpu_object_params * params ,
2019-03-18 12:33:32 +01:00
struct virtio_gpu_object * * bo_ptr ,
struct virtio_gpu_fence * fence ) ;
2015-01-23 13:04:11 +10:00
/* virtgpu_prime.c */
2019-04-24 10:52:20 +10:00
struct drm_gem_object * virtgpu_gem_prime_import_sg_table (
struct drm_device * dev , struct dma_buf_attachment * attach ,
struct sg_table * sgt ) ;
2015-01-23 13:04:11 +10:00
2019-12-18 16:57:31 -08:00
/* virgl debugfs */
2013-09-09 10:02:56 +10:00
int virtio_gpu_debugfs_init ( struct drm_minor * minor ) ;
# endif