2014-10-28 12:48:00 +01:00
/*
* Copyright ( C ) 2015 Red Hat , Inc .
* All Rights Reserved .
*
* Authors :
* Dave Airlie
* Alon Levy
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
2019-06-30 08:19:16 +02:00
# include <linux/file.h>
2018-11-12 17:51:56 +01:00
# include <linux/sync_file.h>
2017-04-24 13:50:34 +09:00
2019-06-30 08:19:16 +02:00
# include <drm/drm_file.h>
# include <drm/virtgpu_drm.h>
2017-04-24 13:50:34 +09:00
# include "virtgpu_drv.h"
2014-10-28 12:48:00 +01:00
2020-02-24 16:08:00 -08:00
static void virtio_gpu_create_context ( struct drm_device * dev ,
struct drm_file * file )
2020-02-24 16:07:58 -08:00
{
struct virtio_gpu_device * vgdev = dev - > dev_private ;
struct virtio_gpu_fpriv * vfpriv = file - > driver_priv ;
char dbgname [ TASK_COMM_LEN ] ;
2020-02-24 16:07:59 -08:00
mutex_lock ( & vfpriv - > context_lock ) ;
if ( vfpriv - > context_created )
goto out_unlock ;
2020-02-24 16:07:58 -08:00
get_task_comm ( dbgname , current ) ;
virtio_gpu_cmd_context_create ( vgdev , vfpriv - > ctx_id ,
strlen ( dbgname ) , dbgname ) ;
virtio_gpu_notify ( vgdev ) ;
2020-02-24 16:07:59 -08:00
vfpriv - > context_created = true ;
out_unlock :
mutex_unlock ( & vfpriv - > context_lock ) ;
2020-02-24 16:07:58 -08:00
}
2014-10-28 12:48:00 +01:00
static int virtio_gpu_map_ioctl ( struct drm_device * dev , void * data ,
2020-02-24 16:07:57 -08:00
struct drm_file * file )
2014-10-28 12:48:00 +01:00
{
struct virtio_gpu_device * vgdev = dev - > dev_private ;
struct drm_virtgpu_map * virtio_gpu_map = data ;
2020-02-24 16:07:57 -08:00
return virtio_gpu_mode_dumb_mmap ( file , vgdev - > ddev ,
2014-10-28 12:48:00 +01:00
virtio_gpu_map - > handle ,
& virtio_gpu_map - > offset ) ;
}
2016-08-31 12:26:52 -04:00
/*
* Usage of execbuffer :
* Relocations need to take into account the full VIRTIO_GPUDrawable size .
* However , the command as passed from user space must * not * contain the initial
* VIRTIO_GPUReleaseInfo struct ( first XXX bytes )
*/
static int virtio_gpu_execbuffer_ioctl ( struct drm_device * dev , void * data ,
2020-02-24 16:07:57 -08:00
struct drm_file * file )
2014-10-28 12:48:00 +01:00
{
2016-08-31 12:26:52 -04:00
struct drm_virtgpu_execbuffer * exbuf = data ;
2014-10-28 12:48:00 +01:00
struct virtio_gpu_device * vgdev = dev - > dev_private ;
2020-02-24 16:07:57 -08:00
struct virtio_gpu_fpriv * vfpriv = file - > driver_priv ;
2018-11-12 17:51:56 +01:00
struct virtio_gpu_fence * out_fence ;
2014-10-28 12:48:00 +01:00
int ret ;
uint32_t * bo_handles = NULL ;
void __user * user_bo_handles = NULL ;
2019-08-29 12:32:51 +02:00
struct virtio_gpu_object_array * buflist = NULL ;
2018-11-12 17:51:56 +01:00
struct sync_file * sync_file ;
int in_fence_fd = exbuf - > fence_fd ;
int out_fence_fd = - 1 ;
2014-10-28 12:48:00 +01:00
void * buf ;
if ( vgdev - > has_virgl_3d = = false )
return - ENOSYS ;
2018-11-12 17:51:55 +01:00
if ( ( exbuf - > flags & ~ VIRTGPU_EXECBUF_FLAGS ) )
return - EINVAL ;
exbuf - > fence_fd = - 1 ;
2020-02-24 16:08:00 -08:00
virtio_gpu_create_context ( dev , file ) ;
2018-11-12 17:51:56 +01:00
if ( exbuf - > flags & VIRTGPU_EXECBUF_FENCE_FD_IN ) {
struct dma_fence * in_fence ;
in_fence = sync_file_get_fence ( in_fence_fd ) ;
if ( ! in_fence )
return - EINVAL ;
/*
* Wait if the fence is from a foreign context , or if the fence
* array contains any fence from a foreign context .
*/
ret = 0 ;
if ( ! dma_fence_match_context ( in_fence , vgdev - > fence_drv . context ) )
ret = dma_fence_wait ( in_fence , true ) ;
dma_fence_put ( in_fence ) ;
if ( ret )
return ret ;
}
if ( exbuf - > flags & VIRTGPU_EXECBUF_FENCE_FD_OUT ) {
out_fence_fd = get_unused_fd_flags ( O_CLOEXEC ) ;
if ( out_fence_fd < 0 )
return out_fence_fd ;
}
2014-10-28 12:48:00 +01:00
if ( exbuf - > num_bo_handles ) {
2017-05-17 14:23:12 +02:00
bo_handles = kvmalloc_array ( exbuf - > num_bo_handles ,
2019-08-29 12:32:51 +02:00
sizeof ( uint32_t ) , GFP_KERNEL ) ;
if ( ! bo_handles ) {
2018-11-12 17:51:56 +01:00
ret = - ENOMEM ;
goto out_unused_fd ;
2014-10-28 12:48:00 +01:00
}
2019-06-04 16:44:28 -07:00
user_bo_handles = u64_to_user_ptr ( exbuf - > bo_handles ) ;
2014-10-28 12:48:00 +01:00
if ( copy_from_user ( bo_handles , user_bo_handles ,
exbuf - > num_bo_handles * sizeof ( uint32_t ) ) ) {
ret = - EFAULT ;
2018-11-12 17:51:56 +01:00
goto out_unused_fd ;
2014-10-28 12:48:00 +01:00
}
2020-02-24 16:07:57 -08:00
buflist = virtio_gpu_array_from_handles ( file , bo_handles ,
2019-08-29 12:32:51 +02:00
exbuf - > num_bo_handles ) ;
if ( ! buflist ) {
ret = - ENOENT ;
goto out_unused_fd ;
2014-10-28 12:48:00 +01:00
}
2017-05-17 14:23:12 +02:00
kvfree ( bo_handles ) ;
2018-11-12 17:51:56 +01:00
bo_handles = NULL ;
2014-10-28 12:48:00 +01:00
}
2019-09-11 11:14:03 -07:00
buf = vmemdup_user ( u64_to_user_ptr ( exbuf - > command ) , exbuf - > size ) ;
2016-08-18 22:35:14 +02:00
if ( IS_ERR ( buf ) ) {
ret = PTR_ERR ( buf ) ;
2020-02-11 14:50:46 +01:00
goto out_unused_fd ;
}
if ( buflist ) {
ret = virtio_gpu_array_lock_resv ( buflist ) ;
if ( ret )
goto out_memdup ;
2014-10-28 12:48:00 +01:00
}
2018-11-12 17:51:54 +01:00
2018-11-12 17:51:56 +01:00
out_fence = virtio_gpu_fence_alloc ( vgdev ) ;
if ( ! out_fence ) {
2018-11-12 17:51:54 +01:00
ret = - ENOMEM ;
2020-02-11 14:50:46 +01:00
goto out_unresv ;
2018-11-12 17:51:54 +01:00
}
2018-11-12 17:51:56 +01:00
if ( out_fence_fd > = 0 ) {
sync_file = sync_file_create ( & out_fence - > f ) ;
if ( ! sync_file ) {
dma_fence_put ( & out_fence - > f ) ;
ret = - ENOMEM ;
goto out_memdup ;
}
exbuf - > fence_fd = out_fence_fd ;
fd_install ( out_fence_fd , sync_file - > file ) ;
}
2014-10-28 12:48:00 +01:00
virtio_gpu_cmd_submit ( vgdev , buf , exbuf - > size ,
2019-08-29 12:32:51 +02:00
vfpriv - > ctx_id , buflist , out_fence ) ;
2020-02-14 13:55:35 +01:00
virtio_gpu_notify ( vgdev ) ;
2014-10-28 12:48:00 +01:00
return 0 ;
out_unresv :
2019-08-29 12:32:51 +02:00
if ( buflist )
virtio_gpu_array_unlock_resv ( buflist ) ;
2020-02-11 14:50:46 +01:00
out_memdup :
kvfree ( buf ) ;
2018-11-12 17:51:56 +01:00
out_unused_fd :
kvfree ( bo_handles ) ;
2019-08-29 12:32:51 +02:00
if ( buflist )
virtio_gpu_array_put_free ( buflist ) ;
2018-11-12 17:51:56 +01:00
if ( out_fence_fd > = 0 )
put_unused_fd ( out_fence_fd ) ;
2014-10-28 12:48:00 +01:00
return ret ;
}
static int virtio_gpu_getparam_ioctl ( struct drm_device * dev , void * data ,
2020-02-24 16:07:57 -08:00
struct drm_file * file )
2014-10-28 12:48:00 +01:00
{
struct virtio_gpu_device * vgdev = dev - > dev_private ;
struct drm_virtgpu_getparam * param = data ;
int value ;
switch ( param - > param ) {
case VIRTGPU_PARAM_3D_FEATURES :
value = vgdev - > has_virgl_3d = = true ? 1 : 0 ;
break ;
2018-02-21 11:50:03 +10:00
case VIRTGPU_PARAM_CAPSET_QUERY_FIX :
value = 1 ;
break ;
2014-10-28 12:48:00 +01:00
default :
return - EINVAL ;
}
2019-06-04 16:44:28 -07:00
if ( copy_to_user ( u64_to_user_ptr ( param - > value ) , & value , sizeof ( int ) ) )
2014-10-28 12:48:00 +01:00
return - EFAULT ;
2019-06-04 16:44:28 -07:00
2014-10-28 12:48:00 +01:00
return 0 ;
}
static int virtio_gpu_resource_create_ioctl ( struct drm_device * dev , void * data ,
2020-02-24 16:07:57 -08:00
struct drm_file * file )
2014-10-28 12:48:00 +01:00
{
struct virtio_gpu_device * vgdev = dev - > dev_private ;
struct drm_virtgpu_resource_create * rc = data ;
2019-03-18 12:33:32 +01:00
struct virtio_gpu_fence * fence ;
2014-10-28 12:48:00 +01:00
int ret ;
struct virtio_gpu_object * qobj ;
struct drm_gem_object * obj ;
uint32_t handle = 0 ;
2019-03-18 12:33:29 +01:00
struct virtio_gpu_object_params params = { 0 } ;
2014-10-28 12:48:00 +01:00
2020-02-24 16:08:00 -08:00
if ( vgdev - > has_virgl_3d ) {
virtio_gpu_create_context ( dev , file ) ;
params . virgl = true ;
params . target = rc - > target ;
params . bind = rc - > bind ;
params . depth = rc - > depth ;
params . array_size = rc - > array_size ;
params . last_level = rc - > last_level ;
params . nr_samples = rc - > nr_samples ;
params . flags = rc - > flags ;
} else {
2014-10-28 12:48:00 +01:00
if ( rc - > depth > 1 )
return - EINVAL ;
if ( rc - > nr_samples > 1 )
return - EINVAL ;
if ( rc - > last_level > 1 )
return - EINVAL ;
if ( rc - > target ! = 2 )
return - EINVAL ;
if ( rc - > array_size > 1 )
return - EINVAL ;
}
2019-03-18 12:33:30 +01:00
params . format = rc - > format ;
params . width = rc - > width ;
params . height = rc - > height ;
2019-03-18 12:33:29 +01:00
params . size = rc - > size ;
2014-10-28 12:48:00 +01:00
/* allocate a single page size object */
2019-03-18 12:33:29 +01:00
if ( params . size = = 0 )
params . size = PAGE_SIZE ;
2014-10-28 12:48:00 +01:00
2019-03-18 12:33:32 +01:00
fence = virtio_gpu_fence_alloc ( vgdev ) ;
if ( ! fence )
return - ENOMEM ;
2019-08-29 12:32:58 +02:00
ret = virtio_gpu_object_create ( vgdev , & params , & qobj , fence ) ;
2019-03-18 12:33:32 +01:00
dma_fence_put ( & fence - > f ) ;
2019-08-29 12:32:58 +02:00
if ( ret < 0 )
return ret ;
2019-08-29 12:32:57 +02:00
obj = & qobj - > base . base ;
2014-10-28 12:48:00 +01:00
2020-02-24 16:07:57 -08:00
ret = drm_gem_handle_create ( file , obj , & handle ) ;
2014-10-28 12:48:00 +01:00
if ( ret ) {
drm_gem_object_release ( obj ) ;
return ret ;
}
2017-09-29 15:33:39 +05:30
drm_gem_object_put_unlocked ( obj ) ;
2014-10-28 12:48:00 +01:00
2018-10-19 08:18:45 +02:00
rc - > res_handle = qobj - > hw_res_handle ; /* similiar to a VM address */
2014-10-28 12:48:00 +01:00
rc - > bo_handle = handle ;
return 0 ;
}
static int virtio_gpu_resource_info_ioctl ( struct drm_device * dev , void * data ,
2020-02-24 16:07:57 -08:00
struct drm_file * file )
2014-10-28 12:48:00 +01:00
{
struct drm_virtgpu_resource_info * ri = data ;
struct drm_gem_object * gobj = NULL ;
struct virtio_gpu_object * qobj = NULL ;
2020-02-24 16:07:57 -08:00
gobj = drm_gem_object_lookup ( file , ri - > bo_handle ) ;
2014-10-28 12:48:00 +01:00
if ( gobj = = NULL )
return - ENOENT ;
qobj = gem_to_virtio_gpu_obj ( gobj ) ;
2019-08-29 12:32:57 +02:00
ri - > size = qobj - > base . base . size ;
2014-10-28 12:48:00 +01:00
ri - > res_handle = qobj - > hw_res_handle ;
2017-09-29 15:33:39 +05:30
drm_gem_object_put_unlocked ( gobj ) ;
2014-10-28 12:48:00 +01:00
return 0 ;
}
static int virtio_gpu_transfer_from_host_ioctl ( struct drm_device * dev ,
void * data ,
struct drm_file * file )
{
struct virtio_gpu_device * vgdev = dev - > dev_private ;
struct virtio_gpu_fpriv * vfpriv = file - > driver_priv ;
struct drm_virtgpu_3d_transfer_from_host * args = data ;
2019-08-29 12:32:53 +02:00
struct virtio_gpu_object_array * objs ;
2014-10-28 12:48:00 +01:00
struct virtio_gpu_fence * fence ;
int ret ;
u32 offset = args - > offset ;
if ( vgdev - > has_virgl_3d = = false )
return - ENOSYS ;
2020-02-24 16:08:00 -08:00
virtio_gpu_create_context ( dev , file ) ;
2019-08-29 12:32:53 +02:00
objs = virtio_gpu_array_from_handles ( file , & args - > bo_handle , 1 ) ;
if ( objs = = NULL )
2014-10-28 12:48:00 +01:00
return - ENOENT ;
2019-08-29 12:32:53 +02:00
ret = virtio_gpu_array_lock_resv ( objs ) ;
if ( ret ! = 0 )
goto err_put_free ;
2014-10-28 12:48:00 +01:00
2018-11-12 17:51:54 +01:00
fence = virtio_gpu_fence_alloc ( vgdev ) ;
if ( ! fence ) {
ret = - ENOMEM ;
2019-08-29 12:32:53 +02:00
goto err_unlock ;
2018-11-12 17:51:54 +01:00
}
2014-10-28 12:48:00 +01:00
virtio_gpu_cmd_transfer_from_host_3d
2019-08-29 12:32:53 +02:00
( vgdev , vfpriv - > ctx_id , offset , args - > level ,
2019-10-23 08:25:37 +02:00
& args - > box , objs , fence ) ;
2016-10-25 13:00:45 +01:00
dma_fence_put ( & fence - > f ) ;
2020-02-14 13:55:35 +01:00
virtio_gpu_notify ( vgdev ) ;
2019-08-29 12:32:53 +02:00
return 0 ;
err_unlock :
virtio_gpu_array_unlock_resv ( objs ) ;
err_put_free :
virtio_gpu_array_put_free ( objs ) ;
2014-10-28 12:48:00 +01:00
return ret ;
}
static int virtio_gpu_transfer_to_host_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file )
{
struct virtio_gpu_device * vgdev = dev - > dev_private ;
struct virtio_gpu_fpriv * vfpriv = file - > driver_priv ;
struct drm_virtgpu_3d_transfer_to_host * args = data ;
2019-08-29 12:32:54 +02:00
struct virtio_gpu_object_array * objs ;
2014-10-28 12:48:00 +01:00
struct virtio_gpu_fence * fence ;
int ret ;
u32 offset = args - > offset ;
2019-08-29 12:32:54 +02:00
objs = virtio_gpu_array_from_handles ( file , & args - > bo_handle , 1 ) ;
if ( objs = = NULL )
2014-10-28 12:48:00 +01:00
return - ENOENT ;
if ( ! vgdev - > has_virgl_3d ) {
virtio_gpu_cmd_transfer_to_host_2d
2019-08-29 12:32:54 +02:00
( vgdev , offset ,
2019-10-23 08:25:37 +02:00
args - > box . w , args - > box . h , args - > box . x , args - > box . y ,
2019-08-29 12:32:54 +02:00
objs , NULL ) ;
2014-10-28 12:48:00 +01:00
} else {
2020-02-24 16:08:00 -08:00
virtio_gpu_create_context ( dev , file ) ;
2019-08-29 12:32:54 +02:00
ret = virtio_gpu_array_lock_resv ( objs ) ;
if ( ret ! = 0 )
goto err_put_free ;
ret = - ENOMEM ;
2018-11-12 17:51:54 +01:00
fence = virtio_gpu_fence_alloc ( vgdev ) ;
2019-08-29 12:32:54 +02:00
if ( ! fence )
goto err_unlock ;
2014-10-28 12:48:00 +01:00
virtio_gpu_cmd_transfer_to_host_3d
2019-08-29 12:32:54 +02:00
( vgdev ,
2014-10-28 12:48:00 +01:00
vfpriv ? vfpriv - > ctx_id : 0 , offset ,
2019-10-23 08:25:37 +02:00
args - > level , & args - > box , objs , fence ) ;
2016-10-25 13:00:45 +01:00
dma_fence_put ( & fence - > f ) ;
2014-10-28 12:48:00 +01:00
}
2020-02-14 13:55:32 +01:00
virtio_gpu_notify ( vgdev ) ;
2019-08-29 12:32:54 +02:00
return 0 ;
2014-10-28 12:48:00 +01:00
2019-08-29 12:32:54 +02:00
err_unlock :
virtio_gpu_array_unlock_resv ( objs ) ;
err_put_free :
virtio_gpu_array_put_free ( objs ) ;
2014-10-28 12:48:00 +01:00
return ret ;
}
static int virtio_gpu_wait_ioctl ( struct drm_device * dev , void * data ,
2019-08-29 12:32:45 +02:00
struct drm_file * file )
2014-10-28 12:48:00 +01:00
{
struct drm_virtgpu_3d_wait * args = data ;
2019-08-29 12:32:45 +02:00
struct drm_gem_object * obj ;
long timeout = 15 * HZ ;
2014-10-28 12:48:00 +01:00
int ret ;
2019-08-29 12:32:45 +02:00
obj = drm_gem_object_lookup ( file , args - > handle ) ;
if ( obj = = NULL )
2014-10-28 12:48:00 +01:00
return - ENOENT ;
2019-08-29 12:32:45 +02:00
if ( args - > flags & VIRTGPU_WAIT_NOWAIT ) {
ret = dma_resv_test_signaled_rcu ( obj - > resv , true ) ;
} else {
ret = dma_resv_wait_timeout_rcu ( obj - > resv , true , true ,
timeout ) ;
}
if ( ret = = 0 )
ret = - EBUSY ;
else if ( ret > 0 )
ret = 0 ;
2014-10-28 12:48:00 +01:00
2019-08-29 12:32:45 +02:00
drm_gem_object_put_unlocked ( obj ) ;
2014-10-28 12:48:00 +01:00
return ret ;
}
static int virtio_gpu_get_caps_ioctl ( struct drm_device * dev ,
void * data , struct drm_file * file )
{
struct virtio_gpu_device * vgdev = dev - > dev_private ;
struct drm_virtgpu_get_caps * args = data ;
2018-02-21 11:50:03 +10:00
unsigned size , host_caps_size ;
2014-10-28 12:48:00 +01:00
int i ;
int found_valid = - 1 ;
int ret ;
struct virtio_gpu_drv_cap_cache * cache_ent ;
void * ptr ;
2018-02-22 21:00:00 -03:00
2014-10-28 12:48:00 +01:00
if ( vgdev - > num_capsets = = 0 )
return - ENOSYS ;
2018-02-21 11:50:03 +10:00
/* don't allow userspace to pass 0 */
if ( args - > size = = 0 )
return - EINVAL ;
2014-10-28 12:48:00 +01:00
spin_lock ( & vgdev - > display_info_lock ) ;
for ( i = 0 ; i < vgdev - > num_capsets ; i + + ) {
if ( vgdev - > capsets [ i ] . id = = args - > cap_set_id ) {
if ( vgdev - > capsets [ i ] . max_version > = args - > cap_set_ver ) {
found_valid = i ;
break ;
}
}
}
if ( found_valid = = - 1 ) {
spin_unlock ( & vgdev - > display_info_lock ) ;
return - EINVAL ;
}
2018-02-21 11:50:03 +10:00
host_caps_size = vgdev - > capsets [ found_valid ] . max_size ;
/* only copy to user the minimum of the host caps size or the guest caps size */
size = min ( args - > size , host_caps_size ) ;
2014-10-28 12:48:00 +01:00
list_for_each_entry ( cache_ent , & vgdev - > cap_cache , head ) {
if ( cache_ent - > id = = args - > cap_set_id & &
cache_ent - > version = = args - > cap_set_ver ) {
spin_unlock ( & vgdev - > display_info_lock ) ;
goto copy_exit ;
}
}
spin_unlock ( & vgdev - > display_info_lock ) ;
/* not in cache - need to talk to hw */
virtio_gpu_cmd_get_capset ( vgdev , found_valid , args - > cap_set_ver ,
& cache_ent ) ;
2020-02-14 13:55:35 +01:00
virtio_gpu_notify ( vgdev ) ;
2014-10-28 12:48:00 +01:00
2019-06-05 16:44:20 -07:00
copy_exit :
2014-10-28 12:48:00 +01:00
ret = wait_event_timeout ( vgdev - > resp_wq ,
atomic_read ( & cache_ent - > is_valid ) , 5 * HZ ) ;
2017-11-27 15:21:25 +01:00
if ( ! ret )
return - EBUSY ;
2014-10-28 12:48:00 +01:00
2019-06-10 14:18:10 -07:00
/* is_valid check must proceed before copy of the cache entry. */
smp_rmb ( ) ;
2014-10-28 12:48:00 +01:00
ptr = cache_ent - > caps_cache ;
2019-06-04 16:44:28 -07:00
if ( copy_to_user ( u64_to_user_ptr ( args - > addr ) , ptr , size ) )
2014-10-28 12:48:00 +01:00
return - EFAULT ;
return 0 ;
}
struct drm_ioctl_desc virtio_gpu_ioctls [ DRM_VIRTIO_NUM_IOCTLS ] = {
DRM_IOCTL_DEF_DRV ( VIRTGPU_MAP , virtio_gpu_map_ioctl ,
2019-05-27 09:17:40 +01:00
DRM_RENDER_ALLOW ) ,
2014-10-28 12:48:00 +01:00
DRM_IOCTL_DEF_DRV ( VIRTGPU_EXECBUFFER , virtio_gpu_execbuffer_ioctl ,
2019-05-27 09:17:40 +01:00
DRM_RENDER_ALLOW ) ,
2014-10-28 12:48:00 +01:00
DRM_IOCTL_DEF_DRV ( VIRTGPU_GETPARAM , virtio_gpu_getparam_ioctl ,
2019-05-27 09:17:40 +01:00
DRM_RENDER_ALLOW ) ,
2014-10-28 12:48:00 +01:00
DRM_IOCTL_DEF_DRV ( VIRTGPU_RESOURCE_CREATE ,
virtio_gpu_resource_create_ioctl ,
2019-05-27 09:17:40 +01:00
DRM_RENDER_ALLOW ) ,
2014-10-28 12:48:00 +01:00
DRM_IOCTL_DEF_DRV ( VIRTGPU_RESOURCE_INFO , virtio_gpu_resource_info_ioctl ,
2019-05-27 09:17:40 +01:00
DRM_RENDER_ALLOW ) ,
2014-10-28 12:48:00 +01:00
/* make transfer async to the main ring? - no sure, can we
2018-02-22 21:00:17 -03:00
* thread these in the underlying GL
*/
2014-10-28 12:48:00 +01:00
DRM_IOCTL_DEF_DRV ( VIRTGPU_TRANSFER_FROM_HOST ,
virtio_gpu_transfer_from_host_ioctl ,
2019-05-27 09:17:40 +01:00
DRM_RENDER_ALLOW ) ,
2014-10-28 12:48:00 +01:00
DRM_IOCTL_DEF_DRV ( VIRTGPU_TRANSFER_TO_HOST ,
virtio_gpu_transfer_to_host_ioctl ,
2019-05-27 09:17:40 +01:00
DRM_RENDER_ALLOW ) ,
2014-10-28 12:48:00 +01:00
DRM_IOCTL_DEF_DRV ( VIRTGPU_WAIT , virtio_gpu_wait_ioctl ,
2019-05-27 09:17:40 +01:00
DRM_RENDER_ALLOW ) ,
2014-10-28 12:48:00 +01:00
DRM_IOCTL_DEF_DRV ( VIRTGPU_GET_CAPS , virtio_gpu_get_caps_ioctl ,
2019-05-27 09:17:40 +01:00
DRM_RENDER_ALLOW ) ,
2014-10-28 12:48:00 +01:00
} ;