2015-01-23 13:04:11 +10:00
/*
* Copyright 2014 Canonical
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Andreas Pokorny
*/
2019-06-30 08:19:16 +02:00
# include <drm/drm_prime.h>
2020-08-18 16:13:43 +09:00
# include <linux/virtio_dma_buf.h>
2019-06-30 08:19:16 +02:00
2015-01-23 13:04:11 +10:00
# include "virtgpu_drv.h"
2020-08-18 16:13:43 +09:00
static int virtgpu_virtio_get_uuid ( struct dma_buf * buf ,
uuid_t * uuid )
{
struct drm_gem_object * obj = buf - > priv ;
struct virtio_gpu_object * bo = gem_to_virtio_gpu_obj ( obj ) ;
struct virtio_gpu_device * vgdev = obj - > dev - > dev_private ;
2020-09-23 17:31:57 -07:00
wait_event ( vgdev - > resp_wq , bo - > uuid_state ! = STATE_INITIALIZING ) ;
if ( bo - > uuid_state ! = STATE_OK )
2020-08-18 16:13:43 +09:00
return - ENODEV ;
uuid_copy ( uuid , & bo - > uuid ) ;
return 0 ;
}
2021-08-13 09:54:41 +09:00
static struct sg_table *
virtgpu_gem_map_dma_buf ( struct dma_buf_attachment * attach ,
enum dma_data_direction dir )
{
struct drm_gem_object * obj = attach - > dmabuf - > priv ;
struct virtio_gpu_object * bo = gem_to_virtio_gpu_obj ( obj ) ;
if ( virtio_gpu_is_vram ( bo ) )
return virtio_gpu_vram_map_dma_buf ( bo , attach - > dev , dir ) ;
return drm_gem_map_dma_buf ( attach , dir ) ;
}
static void virtgpu_gem_unmap_dma_buf ( struct dma_buf_attachment * attach ,
struct sg_table * sgt ,
enum dma_data_direction dir )
{
struct drm_gem_object * obj = attach - > dmabuf - > priv ;
struct virtio_gpu_object * bo = gem_to_virtio_gpu_obj ( obj ) ;
if ( virtio_gpu_is_vram ( bo ) ) {
virtio_gpu_vram_unmap_dma_buf ( attach - > dev , sgt , dir ) ;
return ;
}
drm_gem_unmap_dma_buf ( attach , sgt , dir ) ;
}
2020-11-14 15:16:13 +08:00
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
2020-08-18 16:13:43 +09:00
. ops = {
. cache_sgt_mapping = true ,
. attach = virtio_dma_buf_attach ,
. detach = drm_gem_map_detach ,
2021-08-13 09:54:41 +09:00
. map_dma_buf = virtgpu_gem_map_dma_buf ,
. unmap_dma_buf = virtgpu_gem_unmap_dma_buf ,
2020-08-18 16:13:43 +09:00
. release = drm_gem_dmabuf_release ,
. mmap = drm_gem_dmabuf_mmap ,
. vmap = drm_gem_dmabuf_vmap ,
. vunmap = drm_gem_dmabuf_vunmap ,
} ,
. device_attach = drm_gem_map_attach ,
. get_uuid = virtgpu_virtio_get_uuid ,
} ;
2020-09-23 17:32:10 -07:00
int virtio_gpu_resource_assign_uuid ( struct virtio_gpu_device * vgdev ,
struct virtio_gpu_object * bo )
{
struct virtio_gpu_object_array * objs ;
objs = virtio_gpu_array_alloc ( 1 ) ;
if ( ! objs )
return - ENOMEM ;
virtio_gpu_array_add_obj ( objs , & bo - > base . base ) ;
2022-04-29 05:49:11 +00:00
return virtio_gpu_cmd_resource_assign_uuid ( vgdev , objs ) ;
2020-09-23 17:32:10 -07:00
}
2020-08-18 16:13:43 +09:00
struct dma_buf * virtgpu_gem_prime_export ( struct drm_gem_object * obj ,
int flags )
{
struct dma_buf * buf ;
struct drm_device * dev = obj - > dev ;
struct virtio_gpu_device * vgdev = dev - > dev_private ;
struct virtio_gpu_object * bo = gem_to_virtio_gpu_obj ( obj ) ;
int ret = 0 ;
2020-09-23 17:32:10 -07:00
bool blob = bo - > host3d_blob | | bo - > guest_blob ;
2020-08-18 16:13:43 +09:00
DEFINE_DMA_BUF_EXPORT_INFO ( exp_info ) ;
2020-09-23 17:32:10 -07:00
if ( ! blob ) {
if ( vgdev - > has_resource_assign_uuid ) {
ret = virtio_gpu_resource_assign_uuid ( vgdev , bo ) ;
if ( ret )
return ERR_PTR ( ret ) ;
virtio_gpu_notify ( vgdev ) ;
} else {
bo - > uuid_state = STATE_ERR ;
}
2021-08-11 13:04:01 +09:00
} else if ( ! ( bo - > blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE ) ) {
bo - > uuid_state = STATE_ERR ;
2020-08-18 16:13:43 +09:00
}
exp_info . ops = & virtgpu_dmabuf_ops . ops ;
exp_info . size = obj - > size ;
exp_info . flags = flags ;
exp_info . priv = obj ;
exp_info . resv = obj - > resv ;
buf = virtio_dma_buf_export ( & exp_info ) ;
if ( IS_ERR ( buf ) )
return buf ;
drm_dev_get ( dev ) ;
drm_gem_object_get ( obj ) ;
return buf ;
}
struct drm_gem_object * virtgpu_gem_prime_import ( struct drm_device * dev ,
struct dma_buf * buf )
{
struct drm_gem_object * obj ;
if ( buf - > ops = = & virtgpu_dmabuf_ops . ops ) {
obj = buf - > priv ;
if ( obj - > dev = = dev ) {
/*
* Importing dmabuf exported from our own gem increases
* refcount on gem itself instead of f_count of dmabuf .
*/
drm_gem_object_get ( obj ) ;
return obj ;
}
}
return drm_gem_prime_import ( dev , buf ) ;
}
2015-01-23 13:04:11 +10:00
2019-04-24 10:52:20 +10:00
struct drm_gem_object * virtgpu_gem_prime_import_sg_table (
struct drm_device * dev , struct dma_buf_attachment * attach ,
struct sg_table * table )
{
return ERR_PTR ( - ENODEV ) ;
}