2013-09-09 04:02:56 +04:00
/*
* Copyright ( C ) 2015 Red Hat , Inc .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*/
2020-02-07 10:46:37 +03:00
# include <linux/dma-mapping.h>
2019-08-28 11:55:16 +03:00
# include <linux/moduleparam.h>
2013-09-09 04:02:56 +04:00
# include "virtgpu_drv.h"
2019-08-22 13:26:14 +03:00
static int virtio_gpu_virglrenderer_workaround = 1 ;
module_param_named ( virglhack , virtio_gpu_virglrenderer_workaround , int , 0400 ) ;
2020-09-24 03:32:05 +03:00
int virtio_gpu_resource_id_get ( struct virtio_gpu_device * vgdev , uint32_t * resid )
2018-10-19 09:18:47 +03:00
{
2019-08-22 13:26:14 +03:00
if ( virtio_gpu_virglrenderer_workaround ) {
/*
* Hack to avoid re - using resource IDs .
*
* virglrenderer versions up to ( and including ) 0.7 .0
* can ' t deal with that . virglrenderer commit
* " f91a9dd35715 Fix unlinking resources from hash
* table . " (Feb 2019) fixes the bug.
*/
2020-02-21 01:53:19 +03:00
static atomic_t seqno = ATOMIC_INIT ( 0 ) ;
int handle = atomic_inc_return ( & seqno ) ;
2019-08-22 13:26:14 +03:00
* resid = handle + 1 ;
} else {
int handle = ida_alloc ( & vgdev - > resource_ida , GFP_KERNEL ) ;
if ( handle < 0 )
return handle ;
* resid = handle + 1 ;
}
2018-10-30 19:53:51 +03:00
return 0 ;
2018-10-19 09:18:47 +03:00
}
static void virtio_gpu_resource_id_put ( struct virtio_gpu_device * vgdev , uint32_t id )
{
2019-08-22 13:26:14 +03:00
if ( ! virtio_gpu_virglrenderer_workaround ) {
ida_free ( & vgdev - > resource_ida , id - 1 ) ;
}
2018-10-19 09:18:47 +03:00
}
2020-02-07 10:46:36 +03:00
void virtio_gpu_cleanup_object ( struct virtio_gpu_object * bo )
{
struct virtio_gpu_device * vgdev = bo - > base . base . dev - > dev_private ;
2020-03-05 04:32:12 +03:00
virtio_gpu_resource_id_put ( vgdev , bo - > hw_res_handle ) ;
if ( virtio_gpu_is_shmem ( bo ) ) {
2021-11-08 12:31:49 +03:00
drm_gem_shmem_free ( & bo - > base ) ;
2020-09-24 03:32:07 +03:00
} else if ( virtio_gpu_is_vram ( bo ) ) {
struct virtio_gpu_object_vram * vram = to_virtio_gpu_vram ( bo ) ;
spin_lock ( & vgdev - > host_visible_lock ) ;
if ( drm_mm_node_allocated ( & vram - > vram_node ) )
drm_mm_remove_node ( & vram - > vram_node ) ;
spin_unlock ( & vgdev - > host_visible_lock ) ;
drm_gem_free_mmap_offset ( & vram - > base . base . base ) ;
drm_gem_object_release ( & vram - > base . base . base ) ;
kfree ( vram ) ;
2020-02-07 10:46:37 +03:00
}
2020-02-07 10:46:36 +03:00
}
2019-08-29 13:32:57 +03:00
static void virtio_gpu_free_object ( struct drm_gem_object * obj )
2013-09-09 04:02:56 +04:00
{
2019-08-29 13:32:57 +03:00
struct virtio_gpu_object * bo = gem_to_virtio_gpu_obj ( obj ) ;
struct virtio_gpu_device * vgdev = bo - > base . base . dev - > dev_private ;
2013-09-09 04:02:56 +04:00
2020-02-07 10:46:36 +03:00
if ( bo - > created ) {
virtio_gpu_cmd_unref_resource ( vgdev , bo ) ;
2020-02-14 15:55:35 +03:00
virtio_gpu_notify ( vgdev ) ;
2020-02-07 10:46:36 +03:00
/* completion handler calls virtio_gpu_cleanup_object() */
return ;
}
virtio_gpu_cleanup_object ( bo ) ;
2013-09-09 04:02:56 +04:00
}
2020-02-27 03:25:55 +03:00
static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
2019-08-29 13:32:57 +03:00
. free = virtio_gpu_free_object ,
. open = virtio_gpu_gem_object_open ,
. close = virtio_gpu_gem_object_close ,
2021-11-08 12:31:48 +03:00
. print_info = drm_gem_shmem_object_print_info ,
2020-09-23 13:21:55 +03:00
. export = virtgpu_gem_prime_export ,
2021-11-08 12:31:48 +03:00
. pin = drm_gem_shmem_object_pin ,
. unpin = drm_gem_shmem_object_unpin ,
. get_sg_table = drm_gem_shmem_object_get_sg_table ,
. vmap = drm_gem_shmem_object_vmap ,
. vunmap = drm_gem_shmem_object_vunmap ,
. mmap = drm_gem_shmem_object_mmap ,
2022-02-09 18:56:33 +03:00
. vm_ops = & drm_gem_shmem_vm_ops ,
2019-08-29 13:32:57 +03:00
} ;
2020-03-05 04:32:12 +03:00
bool virtio_gpu_is_shmem ( struct virtio_gpu_object * bo )
2020-02-27 03:25:55 +03:00
{
2020-03-05 04:32:12 +03:00
return bo - > base . base . funcs = = & virtio_gpu_shmem_funcs ;
2020-02-27 03:25:55 +03:00
}
2019-08-29 13:32:57 +03:00
struct drm_gem_object * virtio_gpu_create_object ( struct drm_device * dev ,
size_t size )
2013-09-09 04:02:56 +04:00
{
2020-03-19 13:04:21 +03:00
struct virtio_gpu_object_shmem * shmem ;
struct drm_gem_shmem_object * dshmem ;
2019-08-29 13:32:57 +03:00
2020-03-19 13:04:21 +03:00
shmem = kzalloc ( sizeof ( * shmem ) , GFP_KERNEL ) ;
if ( ! shmem )
2021-11-30 12:52:55 +03:00
return ERR_PTR ( - ENOMEM ) ;
2013-09-09 04:02:56 +04:00
2020-03-19 13:04:21 +03:00
dshmem = & shmem - > base . base ;
dshmem - > base . funcs = & virtio_gpu_shmem_funcs ;
return & dshmem - > base ;
2013-09-09 04:02:56 +04:00
}
2020-02-07 10:46:38 +03:00
static int virtio_gpu_object_shmem_init ( struct virtio_gpu_device * vgdev ,
struct virtio_gpu_object * bo ,
struct virtio_gpu_mem_entry * * ents ,
unsigned int * nents )
{
2020-06-25 02:17:04 +03:00
bool use_dma_api = ! virtio_has_dma_quirk ( vgdev - > vdev ) ;
2020-02-07 10:46:38 +03:00
struct scatterlist * sg ;
2022-06-30 23:07:24 +03:00
struct sg_table * pages ;
int si ;
2020-02-07 10:46:38 +03:00
2022-06-30 23:07:24 +03:00
pages = drm_gem_shmem_get_pages_sgt ( & bo - > base ) ;
if ( IS_ERR ( pages ) )
return PTR_ERR ( pages ) ;
2020-02-07 10:46:38 +03:00
2022-06-30 23:07:24 +03:00
if ( use_dma_api )
* nents = pages - > nents ;
else
* nents = pages - > orig_nents ;
2020-02-07 10:46:38 +03:00
2020-11-05 04:47:44 +03:00
* ents = kvmalloc_array ( * nents ,
sizeof ( struct virtio_gpu_mem_entry ) ,
GFP_KERNEL ) ;
2020-02-07 10:46:38 +03:00
if ( ! ( * ents ) ) {
DRM_ERROR ( " failed to allocate ent list \n " ) ;
return - ENOMEM ;
}
2020-04-28 14:10:19 +03:00
if ( use_dma_api ) {
2022-06-30 23:07:24 +03:00
for_each_sgtable_dma_sg ( pages , sg , si ) {
2020-04-28 14:10:19 +03:00
( * ents ) [ si ] . addr = cpu_to_le64 ( sg_dma_address ( sg ) ) ;
( * ents ) [ si ] . length = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
( * ents ) [ si ] . padding = 0 ;
}
} else {
2022-06-30 23:07:24 +03:00
for_each_sgtable_sg ( pages , sg , si ) {
2020-04-28 14:10:19 +03:00
( * ents ) [ si ] . addr = cpu_to_le64 ( sg_phys ( sg ) ) ;
( * ents ) [ si ] . length = cpu_to_le32 ( sg - > length ) ;
( * ents ) [ si ] . padding = 0 ;
}
2020-02-07 10:46:38 +03:00
}
2020-04-28 14:10:19 +03:00
2020-02-07 10:46:38 +03:00
return 0 ;
}
2013-09-09 04:02:56 +04:00
int virtio_gpu_object_create ( struct virtio_gpu_device * vgdev ,
2019-03-18 14:33:29 +03:00
struct virtio_gpu_object_params * params ,
2019-03-18 14:33:32 +03:00
struct virtio_gpu_object * * bo_ptr ,
struct virtio_gpu_fence * fence )
2013-09-09 04:02:56 +04:00
{
2019-08-29 13:32:52 +03:00
struct virtio_gpu_object_array * objs = NULL ;
2019-08-29 13:32:57 +03:00
struct drm_gem_shmem_object * shmem_obj ;
2013-09-09 04:02:56 +04:00
struct virtio_gpu_object * bo ;
2020-02-07 10:46:38 +03:00
struct virtio_gpu_mem_entry * ents ;
unsigned int nents ;
2013-09-09 04:02:56 +04:00
int ret ;
* bo_ptr = NULL ;
2019-08-29 13:32:57 +03:00
params - > size = roundup ( params - > size , PAGE_SIZE ) ;
shmem_obj = drm_gem_shmem_create ( vgdev - > ddev , params - > size ) ;
if ( IS_ERR ( shmem_obj ) )
return PTR_ERR ( shmem_obj ) ;
bo = gem_to_virtio_gpu_obj ( & shmem_obj - > base ) ;
2013-09-09 04:02:56 +04:00
2018-10-30 19:53:51 +03:00
ret = virtio_gpu_resource_id_get ( vgdev , & bo - > hw_res_handle ) ;
2019-08-29 13:32:52 +03:00
if ( ret < 0 )
goto err_free_gem ;
2019-03-18 14:33:32 +03:00
bo - > dumb = params - > dumb ;
2022-06-30 23:07:23 +03:00
ret = virtio_gpu_object_shmem_init ( vgdev , bo , & ents , & nents ) ;
if ( ret ! = 0 )
goto err_put_id ;
2019-08-29 13:32:52 +03:00
if ( fence ) {
ret = - ENOMEM ;
objs = virtio_gpu_array_alloc ( 1 ) ;
if ( ! objs )
goto err_put_id ;
2019-08-29 13:32:57 +03:00
virtio_gpu_array_add_obj ( objs , & bo - > base . base ) ;
2019-08-29 13:32:52 +03:00
ret = virtio_gpu_array_lock_resv ( objs ) ;
if ( ret ! = 0 )
goto err_put_objs ;
}
2020-09-24 03:32:13 +03:00
if ( params - > blob ) {
2021-04-13 08:26:14 +03:00
if ( params - > blob_mem = = VIRTGPU_BLOB_MEM_GUEST )
bo - > guest_blob = true ;
2020-09-24 03:32:13 +03:00
virtio_gpu_cmd_resource_create_blob ( vgdev , bo , params ,
ents , nents ) ;
} else if ( params - > virgl ) {
2019-08-29 13:32:52 +03:00
virtio_gpu_cmd_resource_create_3d ( vgdev , bo , params ,
objs , fence ) ;
2020-09-24 03:31:56 +03:00
virtio_gpu_object_attach ( vgdev , bo , ents , nents ) ;
2019-03-18 14:33:32 +03:00
} else {
2019-08-29 13:32:52 +03:00
virtio_gpu_cmd_create_resource ( vgdev , bo , params ,
objs , fence ) ;
2020-09-24 03:31:56 +03:00
virtio_gpu_object_attach ( vgdev , bo , ents , nents ) ;
2019-03-18 14:33:32 +03:00
}
2013-09-09 04:02:56 +04:00
* bo_ptr = bo ;
return 0 ;
2019-08-29 13:32:52 +03:00
err_put_objs :
virtio_gpu_array_put_free ( objs ) ;
err_put_id :
virtio_gpu_resource_id_put ( vgdev , bo - > hw_res_handle ) ;
err_free_gem :
2021-11-08 12:31:49 +03:00
drm_gem_shmem_free ( shmem_obj ) ;
2019-08-29 13:32:52 +03:00
return ret ;
2013-09-09 04:02:56 +04:00
}