2013-09-09 04:02:56 +04:00
/*
* Copyright ( C ) 2015 Red Hat , Inc .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*/
2020-02-07 10:46:37 +03:00
# include <linux/dma-mapping.h>
2019-08-28 11:55:16 +03:00
# include <linux/moduleparam.h>
2013-09-09 04:02:56 +04:00
# include "virtgpu_drv.h"
2019-08-22 13:26:14 +03:00
static int virtio_gpu_virglrenderer_workaround = 1 ;
module_param_named ( virglhack , virtio_gpu_virglrenderer_workaround , int , 0400 ) ;
2018-10-30 19:53:51 +03:00
static int virtio_gpu_resource_id_get ( struct virtio_gpu_device * vgdev ,
2018-10-19 09:18:47 +03:00
uint32_t * resid )
{
2019-08-22 13:26:14 +03:00
if ( virtio_gpu_virglrenderer_workaround ) {
/*
* Hack to avoid re - using resource IDs .
*
* virglrenderer versions up to ( and including ) 0.7 .0
* can ' t deal with that . virglrenderer commit
* " f91a9dd35715 Fix unlinking resources from hash
* table . " (Feb 2019) fixes the bug.
*/
static int handle ;
handle + + ;
* resid = handle + 1 ;
} else {
int handle = ida_alloc ( & vgdev - > resource_ida , GFP_KERNEL ) ;
if ( handle < 0 )
return handle ;
* resid = handle + 1 ;
}
2018-10-30 19:53:51 +03:00
return 0 ;
2018-10-19 09:18:47 +03:00
}
static void virtio_gpu_resource_id_put ( struct virtio_gpu_device * vgdev , uint32_t id )
{
2019-08-22 13:26:14 +03:00
if ( ! virtio_gpu_virglrenderer_workaround ) {
ida_free ( & vgdev - > resource_ida , id - 1 ) ;
}
2018-10-19 09:18:47 +03:00
}
2020-02-07 10:46:36 +03:00
void virtio_gpu_cleanup_object ( struct virtio_gpu_object * bo )
{
struct virtio_gpu_device * vgdev = bo - > base . base . dev - > dev_private ;
2020-02-07 10:46:37 +03:00
if ( bo - > pages ) {
if ( bo - > mapped ) {
dma_unmap_sg ( vgdev - > vdev - > dev . parent ,
bo - > pages - > sgl , bo - > mapped ,
DMA_TO_DEVICE ) ;
bo - > mapped = 0 ;
}
sg_free_table ( bo - > pages ) ;
bo - > pages = NULL ;
drm_gem_shmem_unpin ( & bo - > base . base ) ;
}
2020-02-07 10:46:36 +03:00
virtio_gpu_resource_id_put ( vgdev , bo - > hw_res_handle ) ;
drm_gem_shmem_free_object ( & bo - > base . base ) ;
}
2019-08-29 13:32:57 +03:00
static void virtio_gpu_free_object ( struct drm_gem_object * obj )
2013-09-09 04:02:56 +04:00
{
2019-08-29 13:32:57 +03:00
struct virtio_gpu_object * bo = gem_to_virtio_gpu_obj ( obj ) ;
struct virtio_gpu_device * vgdev = bo - > base . base . dev - > dev_private ;
2013-09-09 04:02:56 +04:00
2020-02-07 10:46:36 +03:00
if ( bo - > created ) {
virtio_gpu_cmd_unref_resource ( vgdev , bo ) ;
/* completion handler calls virtio_gpu_cleanup_object() */
return ;
}
virtio_gpu_cleanup_object ( bo ) ;
2013-09-09 04:02:56 +04:00
}
2019-08-29 13:32:57 +03:00
static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
. free = virtio_gpu_free_object ,
. open = virtio_gpu_gem_object_open ,
. close = virtio_gpu_gem_object_close ,
. print_info = drm_gem_shmem_print_info ,
. pin = drm_gem_shmem_pin ,
. unpin = drm_gem_shmem_unpin ,
. get_sg_table = drm_gem_shmem_get_sg_table ,
. vmap = drm_gem_shmem_vmap ,
. vunmap = drm_gem_shmem_vunmap ,
2019-10-16 14:51:54 +03:00
. mmap = & drm_gem_shmem_mmap ,
2019-08-29 13:32:57 +03:00
} ;
struct drm_gem_object * virtio_gpu_create_object ( struct drm_device * dev ,
size_t size )
2013-09-09 04:02:56 +04:00
{
2019-08-29 13:32:57 +03:00
struct virtio_gpu_object * bo ;
bo = kzalloc ( sizeof ( * bo ) , GFP_KERNEL ) ;
if ( ! bo )
return NULL ;
2013-09-09 04:02:56 +04:00
2019-08-29 13:32:57 +03:00
bo - > base . base . funcs = & virtio_gpu_gem_funcs ;
return & bo - > base . base ;
2013-09-09 04:02:56 +04:00
}
2020-02-07 10:46:38 +03:00
static int virtio_gpu_object_shmem_init ( struct virtio_gpu_device * vgdev ,
struct virtio_gpu_object * bo ,
struct virtio_gpu_mem_entry * * ents ,
unsigned int * nents )
{
bool use_dma_api = ! virtio_has_iommu_quirk ( vgdev - > vdev ) ;
struct scatterlist * sg ;
int si , ret ;
ret = drm_gem_shmem_pin ( & bo - > base . base ) ;
if ( ret < 0 )
return - EINVAL ;
bo - > pages = drm_gem_shmem_get_sg_table ( & bo - > base . base ) ;
if ( ! bo - > pages ) {
drm_gem_shmem_unpin ( & bo - > base . base ) ;
return - EINVAL ;
}
if ( use_dma_api ) {
bo - > mapped = dma_map_sg ( vgdev - > vdev - > dev . parent ,
bo - > pages - > sgl , bo - > pages - > nents ,
DMA_TO_DEVICE ) ;
* nents = bo - > mapped ;
} else {
* nents = bo - > pages - > nents ;
}
* ents = kmalloc_array ( * nents , sizeof ( struct virtio_gpu_mem_entry ) ,
GFP_KERNEL ) ;
if ( ! ( * ents ) ) {
DRM_ERROR ( " failed to allocate ent list \n " ) ;
return - ENOMEM ;
}
for_each_sg ( bo - > pages - > sgl , sg , * nents , si ) {
( * ents ) [ si ] . addr = cpu_to_le64 ( use_dma_api
? sg_dma_address ( sg )
: sg_phys ( sg ) ) ;
( * ents ) [ si ] . length = cpu_to_le32 ( sg - > length ) ;
( * ents ) [ si ] . padding = 0 ;
}
return 0 ;
}
2013-09-09 04:02:56 +04:00
int virtio_gpu_object_create ( struct virtio_gpu_device * vgdev ,
2019-03-18 14:33:29 +03:00
struct virtio_gpu_object_params * params ,
2019-03-18 14:33:32 +03:00
struct virtio_gpu_object * * bo_ptr ,
struct virtio_gpu_fence * fence )
2013-09-09 04:02:56 +04:00
{
2019-08-29 13:32:52 +03:00
struct virtio_gpu_object_array * objs = NULL ;
2019-08-29 13:32:57 +03:00
struct drm_gem_shmem_object * shmem_obj ;
2013-09-09 04:02:56 +04:00
struct virtio_gpu_object * bo ;
2020-02-07 10:46:38 +03:00
struct virtio_gpu_mem_entry * ents ;
unsigned int nents ;
2013-09-09 04:02:56 +04:00
int ret ;
* bo_ptr = NULL ;
2019-08-29 13:32:57 +03:00
params - > size = roundup ( params - > size , PAGE_SIZE ) ;
shmem_obj = drm_gem_shmem_create ( vgdev - > ddev , params - > size ) ;
if ( IS_ERR ( shmem_obj ) )
return PTR_ERR ( shmem_obj ) ;
bo = gem_to_virtio_gpu_obj ( & shmem_obj - > base ) ;
2013-09-09 04:02:56 +04:00
2018-10-30 19:53:51 +03:00
ret = virtio_gpu_resource_id_get ( vgdev , & bo - > hw_res_handle ) ;
2019-08-29 13:32:52 +03:00
if ( ret < 0 )
goto err_free_gem ;
2019-03-18 14:33:32 +03:00
bo - > dumb = params - > dumb ;
2019-08-29 13:32:52 +03:00
if ( fence ) {
ret = - ENOMEM ;
objs = virtio_gpu_array_alloc ( 1 ) ;
if ( ! objs )
goto err_put_id ;
2019-08-29 13:32:57 +03:00
virtio_gpu_array_add_obj ( objs , & bo - > base . base ) ;
2019-08-29 13:32:52 +03:00
ret = virtio_gpu_array_lock_resv ( objs ) ;
if ( ret ! = 0 )
goto err_put_objs ;
}
2019-03-18 14:33:32 +03:00
if ( params - > virgl ) {
2019-08-29 13:32:52 +03:00
virtio_gpu_cmd_resource_create_3d ( vgdev , bo , params ,
objs , fence ) ;
2019-03-18 14:33:32 +03:00
} else {
2019-08-29 13:32:52 +03:00
virtio_gpu_cmd_create_resource ( vgdev , bo , params ,
objs , fence ) ;
2019-03-18 14:33:32 +03:00
}
2013-09-09 04:02:56 +04:00
2020-02-07 10:46:38 +03:00
ret = virtio_gpu_object_shmem_init ( vgdev , bo , & ents , & nents ) ;
if ( ret ! = 0 ) {
virtio_gpu_free_object ( & shmem_obj - > base ) ;
return ret ;
}
ret = virtio_gpu_object_attach ( vgdev , bo , ents , nents ) ;
2019-08-29 13:32:57 +03:00
if ( ret ! = 0 ) {
virtio_gpu_free_object ( & shmem_obj - > base ) ;
2015-10-13 09:55:48 +03:00
return ret ;
2019-08-29 13:32:57 +03:00
}
2013-09-09 04:02:56 +04:00
2020-02-14 15:55:33 +03:00
virtio_gpu_notify ( vgdev ) ;
2013-09-09 04:02:56 +04:00
* bo_ptr = bo ;
return 0 ;
2019-08-29 13:32:52 +03:00
err_put_objs :
virtio_gpu_array_put_free ( objs ) ;
err_put_id :
virtio_gpu_resource_id_put ( vgdev , bo - > hw_res_handle ) ;
err_free_gem :
2019-08-29 13:32:57 +03:00
drm_gem_shmem_free_object ( & shmem_obj - > base ) ;
2019-08-29 13:32:52 +03:00
return ret ;
2013-09-09 04:02:56 +04:00
}