2015-04-20 16:55:21 -04:00
/*
2019-05-06 13:22:06 +02:00
* Copyright 2019 Advanced Micro Devices , Inc .
2015-04-20 16:55:21 -04:00
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* based on nouveau_prime . c
*
* Authors : Alex Deucher
*/
2018-05-29 18:33:41 +02:00
/**
* DOC : PRIME Buffer Sharing
*
* The following callback implementations are used for : ref : ` sharing GEM buffer
* objects between different devices via PRIME < prime_buffer_sharing > ` .
*/
2015-04-20 16:55:21 -04:00
# include "amdgpu.h"
2017-12-08 16:18:59 -05:00
# include "amdgpu_display.h"
2018-08-13 11:41:35 -05:00
# include "amdgpu_gem.h"
2018-05-30 14:42:24 +02:00
# include "amdgpu_dma_buf.h"
2015-04-20 16:55:21 -04:00
# include <drm/amdgpu_drm.h>
# include <linux/dma-buf.h>
2019-01-30 10:55:17 +00:00
# include <linux/dma-fence-array.h>
2015-04-20 16:55:21 -04:00
2018-05-29 18:33:41 +02:00
/**
* amdgpu_gem_prime_vmap - & dma_buf_ops . vmap implementation
2018-09-13 14:47:39 -04:00
* @ obj : GEM BO
2018-05-29 18:33:41 +02:00
*
2018-09-13 14:47:39 -04:00
* Sets up an in - kernel virtual mapping of the BO ' s memory .
2018-05-29 18:33:41 +02:00
*
* Returns :
* The virtual address of the mapping or an error pointer .
*/
2015-04-20 16:55:21 -04:00
void * amdgpu_gem_prime_vmap ( struct drm_gem_object * obj )
{
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( obj ) ;
int ret ;
ret = ttm_bo_kmap ( & bo - > tbo , 0 , bo - > tbo . num_pages ,
& bo - > dma_buf_vmap ) ;
if ( ret )
return ERR_PTR ( ret ) ;
return bo - > dma_buf_vmap . virtual ;
}
2018-05-29 18:33:41 +02:00
/**
* amdgpu_gem_prime_vunmap - & dma_buf_ops . vunmap implementation
2018-09-13 14:47:39 -04:00
* @ obj : GEM BO
* @ vaddr : Virtual address ( unused )
2018-05-29 18:33:41 +02:00
*
2018-09-13 14:47:39 -04:00
* Tears down the in - kernel virtual mapping of the BO ' s memory .
2018-05-29 18:33:41 +02:00
*/
2015-04-20 16:55:21 -04:00
void amdgpu_gem_prime_vunmap ( struct drm_gem_object * obj , void * vaddr )
{
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( obj ) ;
ttm_bo_kunmap ( & bo - > dma_buf_vmap ) ;
}
2018-05-29 18:33:41 +02:00
/**
* amdgpu_gem_prime_mmap - & drm_driver . gem_prime_mmap implementation
2018-09-13 14:47:39 -04:00
* @ obj : GEM BO
* @ vma : Virtual memory area
2018-05-29 18:33:41 +02:00
*
2018-09-13 14:47:39 -04:00
* Sets up a userspace mapping of the BO ' s memory in the given
2018-05-29 18:33:41 +02:00
* virtual memory area .
*
* Returns :
2018-09-13 14:47:39 -04:00
* 0 on success or a negative error code on failure .
2018-05-29 18:33:41 +02:00
*/
2019-05-06 13:22:06 +02:00
int amdgpu_gem_prime_mmap ( struct drm_gem_object * obj ,
struct vm_area_struct * vma )
2017-08-22 15:25:33 -04:00
{
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( obj ) ;
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > tbo . bdev ) ;
unsigned asize = amdgpu_bo_size ( bo ) ;
int ret ;
if ( ! vma - > vm_file )
return - ENODEV ;
if ( adev = = NULL )
return - ENODEV ;
/* Check for valid size. */
if ( asize < vma - > vm_end - vma - > vm_start )
return - EINVAL ;
if ( amdgpu_ttm_tt_get_usermm ( bo - > tbo . ttm ) | |
( bo - > flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS ) ) {
return - EPERM ;
}
vma - > vm_pgoff + = amdgpu_bo_mmap_offset ( bo ) > > PAGE_SHIFT ;
/* prime mmap does not need to check access, so allow here */
ret = drm_vma_node_allow ( & obj - > vma_node , vma - > vm_file - > private_data ) ;
if ( ret )
return ret ;
ret = ttm_bo_mmap ( vma - > vm_file , vma , & adev - > mman . bdev ) ;
drm_vma_node_revoke ( & obj - > vma_node , vma - > vm_file - > private_data ) ;
return ret ;
}
2019-01-30 10:55:17 +00:00
static int
2019-08-11 10:06:32 +02:00
__dma_resv_make_exclusive ( struct dma_resv * obj )
2019-01-30 10:55:17 +00:00
{
struct dma_fence * * fences ;
unsigned int count ;
int r ;
2019-08-11 10:06:32 +02:00
if ( ! dma_resv_get_list ( obj ) ) /* no shared fences to convert */
2019-01-30 10:55:17 +00:00
return 0 ;
2019-08-11 10:06:32 +02:00
r = dma_resv_get_fences_rcu ( obj , NULL , & count , & fences ) ;
2019-01-30 10:55:17 +00:00
if ( r )
return r ;
if ( count = = 0 ) {
/* Now that was unexpected. */
} else if ( count = = 1 ) {
2019-08-11 10:06:32 +02:00
dma_resv_add_excl_fence ( obj , fences [ 0 ] ) ;
2019-01-30 10:55:17 +00:00
dma_fence_put ( fences [ 0 ] ) ;
kfree ( fences ) ;
} else {
struct dma_fence_array * array ;
array = dma_fence_array_create ( count , fences ,
dma_fence_context_alloc ( 1 ) , 0 ,
false ) ;
if ( ! array )
goto err_fences_put ;
2019-08-11 10:06:32 +02:00
dma_resv_add_excl_fence ( obj , & array - > base ) ;
2019-01-30 10:55:17 +00:00
dma_fence_put ( & array - > base ) ;
}
return 0 ;
err_fences_put :
while ( count - - )
dma_fence_put ( fences [ count ] ) ;
kfree ( fences ) ;
return - ENOMEM ;
}
2018-05-29 18:33:41 +02:00
/**
2018-05-30 14:42:24 +02:00
* amdgpu_dma_buf_attach - & dma_buf_ops . attach implementation
*
* @ dmabuf : DMA - buf where we attach to
* @ attach : attachment to add
*
* Add the attachment as user to the exported DMA - buf .
*/
static int amdgpu_dma_buf_attach ( struct dma_buf * dmabuf ,
struct dma_buf_attachment * attach )
{
struct drm_gem_object * obj = dmabuf - > priv ;
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( obj ) ;
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > tbo . bdev ) ;
int r ;
if ( attach - > dev - > driver = = adev - > dev - > driver )
return 0 ;
r = amdgpu_bo_reserve ( bo , false ) ;
if ( unlikely ( r ! = 0 ) )
return r ;
/*
* We only create shared fences for internal use , but importers
* of the dmabuf rely on exclusive fences for implicitly
* tracking write hazards . As any of the current fences may
* correspond to a write , we need to convert all existing
* fences on the reservation object into a single exclusive
* fence .
*/
r = __dma_resv_make_exclusive ( bo - > tbo . base . resv ) ;
if ( r )
return r ;
bo - > prime_shared_count + + ;
amdgpu_bo_unreserve ( bo ) ;
return 0 ;
}
/**
* amdgpu_dma_buf_detach - & dma_buf_ops . detach implementation
*
* @ dmabuf : DMA - buf where we remove the attachment from
* @ attach : the attachment to remove
*
* Called when an attachment is removed from the DMA - buf .
*/
static void amdgpu_dma_buf_detach ( struct dma_buf * dmabuf ,
struct dma_buf_attachment * attach )
{
struct drm_gem_object * obj = dmabuf - > priv ;
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( obj ) ;
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > tbo . bdev ) ;
if ( attach - > dev - > driver ! = adev - > dev - > driver & & bo - > prime_shared_count )
bo - > prime_shared_count - - ;
}
/**
* amdgpu_dma_buf_map - & dma_buf_ops . map_dma_buf implementation
2018-05-29 18:33:41 +02:00
* @ attach : DMA - buf attachment
2018-05-30 14:42:24 +02:00
* @ dir : DMA direction
2018-05-29 18:33:41 +02:00
*
* Makes sure that the shared DMA buffer can be accessed by the target device .
* For now , simply pins it to the GTT domain , where it should be accessible by
* all DMA devices .
*
* Returns :
2018-05-30 14:42:24 +02:00
* sg_table filled with the DMA addresses to use or ERR_PRT with negative error
* code .
2018-05-29 18:33:41 +02:00
*/
2018-05-30 14:42:24 +02:00
static struct sg_table * amdgpu_dma_buf_map ( struct dma_buf_attachment * attach ,
enum dma_data_direction dir )
2015-04-20 16:55:21 -04:00
{
2018-05-30 14:42:24 +02:00
struct dma_buf * dma_buf = attach - > dmabuf ;
2018-02-16 13:16:11 +01:00
struct drm_gem_object * obj = dma_buf - > priv ;
2015-04-20 16:55:21 -04:00
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( obj ) ;
2018-05-30 14:42:24 +02:00
struct sg_table * sgt ;
2018-02-16 13:16:11 +01:00
long r ;
2015-04-20 16:55:21 -04:00
2018-06-25 12:51:14 +08:00
r = amdgpu_bo_pin ( bo , AMDGPU_GEM_DOMAIN_GTT ) ;
2018-02-19 11:29:35 +01:00
if ( r )
2018-05-30 14:42:24 +02:00
return ERR_PTR ( r ) ;
2018-02-19 11:29:35 +01:00
2018-05-30 14:42:24 +02:00
sgt = drm_prime_pages_to_sg ( bo - > tbo . ttm - > pages , bo - > tbo . num_pages ) ;
if ( IS_ERR ( sgt ) )
return sgt ;
drm/amdgpu: Attach exclusive fence to prime exported bo's. (v5)
External clients which import our bo's wait only
for exclusive dmabuf-fences, not on shared ones,
ditto for bo's which we import from external
providers and write to.
Therefore attach exclusive fences on prime shared buffers
if our exported buffer gets imported by an external
client, or if we import a buffer from an external
exporter.
See discussion in thread:
https://lists.freedesktop.org/archives/dri-devel/2016-October/122370.html
Prime export tested on Intel iGPU + AMD Tonga dGPU as
DRI3/Present Prime render offload, and with the Tonga
standalone as primary gpu.
v2: Add a wait for all shared fences before prime export,
as suggested by Christian Koenig.
v3: - Mark buffer prime_exported in amdgpu_gem_prime_pin,
so we only use the exclusive fence when exporting a
bo to external clients like a separate iGPU, but not
when exporting/importing from/to ourselves as part of
regular DRI3 fd passing.
- Propagate failure of reservation_object_wait_rcu back
to caller.
v4: - Switch to a prime_shared_count counter instead of a
flag, which gets in/decremented on prime_pin/unpin, so
we can switch back to shared fences if all clients
detach from our exported bo.
- Also switch to exclusive fence for prime imported bo's.
v5: - Drop lret, instead use int ret -> long ret, as proposed
by Christian.
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=95472
Tested-by: Mike Lothian <mike@fireburn.co.uk> (v1)
Signed-off-by: Mario Kleiner <mario.kleiner.de@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>.
Cc: Christian König <christian.koenig@amd.com>
Cc: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
2016-11-09 02:25:15 +01:00
2018-05-30 14:42:24 +02:00
if ( ! dma_map_sg_attrs ( attach - > dev , sgt - > sgl , sgt - > nents , dir ,
DMA_ATTR_SKIP_CPU_SYNC ) )
goto error_free ;
2018-02-16 13:16:11 +01:00
2018-05-30 14:42:24 +02:00
return sgt ;
error_free :
sg_free_table ( sgt ) ;
kfree ( sgt ) ;
return ERR_PTR ( - ENOMEM ) ;
2015-04-20 16:55:21 -04:00
}
2018-05-29 18:33:41 +02:00
/**
2018-05-30 14:42:24 +02:00
* amdgpu_dma_buf_unmap - & dma_buf_ops . unmap_dma_buf implementation
2018-05-29 18:33:41 +02:00
* @ attach : DMA - buf attachment
2018-05-30 14:42:24 +02:00
* @ sgt : sg_table to unmap
* @ dir : DMA direction
2018-05-29 18:33:41 +02:00
*
* This is called when a shared DMA buffer no longer needs to be accessible by
2018-09-13 14:47:39 -04:00
* another device . For now , simply unpins the buffer from GTT .
2018-05-29 18:33:41 +02:00
*/
2018-05-30 14:42:24 +02:00
static void amdgpu_dma_buf_unmap ( struct dma_buf_attachment * attach ,
struct sg_table * sgt ,
enum dma_data_direction dir )
2015-04-20 16:55:21 -04:00
{
2018-05-30 14:42:24 +02:00
struct drm_gem_object * obj = attach - > dmabuf - > priv ;
2015-04-20 16:55:21 -04:00
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( obj ) ;
2018-05-30 14:42:24 +02:00
dma_unmap_sg ( attach - > dev , sgt - > sgl , sgt - > nents , dir ) ;
sg_free_table ( sgt ) ;
kfree ( sgt ) ;
2015-04-20 16:55:21 -04:00
amdgpu_bo_unpin ( bo ) ;
}
2018-05-29 18:33:41 +02:00
/**
2019-05-06 13:22:06 +02:00
* amdgpu_dma_buf_begin_cpu_access - & dma_buf_ops . begin_cpu_access implementation
2018-09-13 14:47:39 -04:00
* @ dma_buf : Shared DMA buffer
* @ direction : Direction of DMA transfer
2018-05-29 18:33:41 +02:00
*
* This is called before CPU access to the shared DMA buffer ' s memory . If it ' s
* a read access , the buffer is moved to the GTT domain if possible , for optimal
* CPU read performance .
*
* Returns :
2018-09-13 14:47:39 -04:00
* 0 on success or a negative error code on failure .
2018-05-29 18:33:41 +02:00
*/
2019-05-06 13:22:06 +02:00
static int amdgpu_dma_buf_begin_cpu_access ( struct dma_buf * dma_buf ,
enum dma_data_direction direction )
2017-12-08 16:18:59 -05:00
{
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( dma_buf - > priv ) ;
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > tbo . bdev ) ;
struct ttm_operation_ctx ctx = { true , false } ;
2019-07-26 09:24:35 -04:00
u32 domain = amdgpu_display_supported_domains ( adev , bo - > flags ) ;
2017-12-08 16:18:59 -05:00
int ret ;
bool reads = ( direction = = DMA_BIDIRECTIONAL | |
direction = = DMA_FROM_DEVICE ) ;
if ( ! reads | | ! ( domain & AMDGPU_GEM_DOMAIN_GTT ) )
return 0 ;
/* move to gtt */
ret = amdgpu_bo_reserve ( bo , false ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
if ( ! bo - > pin_count & & ( bo - > allowed_domains & AMDGPU_GEM_DOMAIN_GTT ) ) {
2018-07-16 16:12:24 +02:00
amdgpu_bo_placement_from_domain ( bo , AMDGPU_GEM_DOMAIN_GTT ) ;
2017-12-08 16:18:59 -05:00
ret = ttm_bo_validate ( & bo - > tbo , & bo - > placement , & ctx ) ;
}
amdgpu_bo_unreserve ( bo ) ;
return ret ;
}
2018-11-20 21:00:29 -05:00
const struct dma_buf_ops amdgpu_dmabuf_ops = {
2018-05-30 14:42:24 +02:00
. dynamic_mapping = true ,
. attach = amdgpu_dma_buf_attach ,
. detach = amdgpu_dma_buf_detach ,
. map_dma_buf = amdgpu_dma_buf_map ,
. unmap_dma_buf = amdgpu_dma_buf_unmap ,
2017-12-08 16:18:59 -05:00
. release = drm_gem_dmabuf_release ,
2019-05-06 13:22:06 +02:00
. begin_cpu_access = amdgpu_dma_buf_begin_cpu_access ,
2017-12-08 16:18:59 -05:00
. mmap = drm_gem_dmabuf_mmap ,
. vmap = drm_gem_dmabuf_vmap ,
. vunmap = drm_gem_dmabuf_vunmap ,
} ;
2018-05-29 18:33:41 +02:00
/**
* amdgpu_gem_prime_export - & drm_driver . gem_prime_export implementation
2018-09-13 14:47:39 -04:00
* @ gobj : GEM BO
* @ flags : Flags such as DRM_CLOEXEC and DRM_RDWR .
2018-05-29 18:33:41 +02:00
*
2019-07-25 15:26:54 +02:00
* The main work is done by the & drm_gem_prime_export helper .
2018-05-29 18:33:41 +02:00
*
* Returns :
2018-09-13 14:47:39 -04:00
* Shared DMA buffer representing the GEM BO from the given device .
2018-05-29 18:33:41 +02:00
*/
2019-06-14 22:35:25 +02:00
struct dma_buf * amdgpu_gem_prime_export ( struct drm_gem_object * gobj ,
2015-04-20 16:55:21 -04:00
int flags )
{
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( gobj ) ;
2017-11-13 17:20:50 +01:00
struct dma_buf * buf ;
2015-04-20 16:55:21 -04:00
2017-08-25 09:14:43 +02:00
if ( amdgpu_ttm_tt_get_usermm ( bo - > tbo . ttm ) | |
bo - > flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID )
2015-04-20 16:55:21 -04:00
return ERR_PTR ( - EPERM ) ;
2019-06-14 22:35:25 +02:00
buf = drm_gem_prime_export ( gobj , flags ) ;
2019-11-27 10:25:23 +01:00
if ( ! IS_ERR ( buf ) )
2017-12-08 16:18:59 -05:00
buf - > ops = & amdgpu_dmabuf_ops ;
2017-11-13 17:20:50 +01:00
return buf ;
2015-04-20 16:55:21 -04:00
}
2017-12-08 16:18:59 -05:00
2019-05-06 13:22:06 +02:00
/**
2018-06-07 10:28:47 +02:00
* amdgpu_dma_buf_create_obj - create BO for DMA - buf import
*
2019-05-06 13:22:06 +02:00
* @ dev : DRM device
2018-06-07 10:28:47 +02:00
* @ dma_buf : DMA - buf
2019-05-06 13:22:06 +02:00
*
2018-06-07 10:28:47 +02:00
* Creates an empty SG BO for DMA - buf import .
2019-05-06 13:22:06 +02:00
*
* Returns :
* A new GEM BO of the given DRM device , representing the memory
* described by the given DMA - buf attachment and scatter / gather table .
*/
2018-06-07 10:28:47 +02:00
static struct drm_gem_object *
amdgpu_dma_buf_create_obj ( struct drm_device * dev , struct dma_buf * dma_buf )
2019-05-06 13:22:06 +02:00
{
2018-06-07 10:28:47 +02:00
struct dma_resv * resv = dma_buf - > resv ;
2019-05-06 13:22:06 +02:00
struct amdgpu_device * adev = dev - > dev_private ;
struct amdgpu_bo * bo ;
struct amdgpu_bo_param bp ;
int ret ;
memset ( & bp , 0 , sizeof ( bp ) ) ;
2018-06-07 10:28:47 +02:00
bp . size = dma_buf - > size ;
2019-05-06 13:22:06 +02:00
bp . byte_align = PAGE_SIZE ;
bp . domain = AMDGPU_GEM_DOMAIN_CPU ;
bp . flags = 0 ;
bp . type = ttm_bo_type_sg ;
bp . resv = resv ;
2019-08-11 10:06:32 +02:00
dma_resv_lock ( resv , NULL ) ;
2019-05-06 13:22:06 +02:00
ret = amdgpu_bo_create ( adev , & bp , & bo ) ;
if ( ret )
goto error ;
bo - > allowed_domains = AMDGPU_GEM_DOMAIN_GTT ;
bo - > preferred_domains = AMDGPU_GEM_DOMAIN_GTT ;
2018-06-07 10:28:47 +02:00
if ( dma_buf - > ops ! = & amdgpu_dmabuf_ops )
2019-05-06 13:22:06 +02:00
bo - > prime_shared_count = 1 ;
2019-08-11 10:06:32 +02:00
dma_resv_unlock ( resv ) ;
2019-08-05 16:01:07 +02:00
return & bo - > tbo . base ;
2019-05-06 13:22:06 +02:00
error :
2019-08-11 10:06:32 +02:00
dma_resv_unlock ( resv ) ;
2019-05-06 13:22:06 +02:00
return ERR_PTR ( ret ) ;
}
2018-05-29 18:33:41 +02:00
/**
* amdgpu_gem_prime_import - & drm_driver . gem_prime_import implementation
* @ dev : DRM device
* @ dma_buf : Shared DMA buffer
*
2018-06-07 10:28:47 +02:00
* Import a dma_buf into a the driver and potentially create a new GEM object .
2018-05-29 18:33:41 +02:00
*
* Returns :
2018-09-13 14:47:39 -04:00
* GEM BO representing the shared DMA buffer for the given device .
2018-05-29 18:33:41 +02:00
*/
2017-12-08 16:18:59 -05:00
struct drm_gem_object * amdgpu_gem_prime_import ( struct drm_device * dev ,
2018-06-07 10:28:47 +02:00
struct dma_buf * dma_buf )
2017-12-08 16:18:59 -05:00
{
2018-06-07 10:28:47 +02:00
struct dma_buf_attachment * attach ;
2017-12-08 16:18:59 -05:00
struct drm_gem_object * obj ;
if ( dma_buf - > ops = = & amdgpu_dmabuf_ops ) {
obj = dma_buf - > priv ;
if ( obj - > dev = = dev ) {
/*
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf .
*/
drm_gem_object_get ( obj ) ;
return obj ;
}
}
2018-06-07 10:28:47 +02:00
obj = amdgpu_dma_buf_create_obj ( dev , dma_buf ) ;
if ( IS_ERR ( obj ) )
return obj ;
attach = dma_buf_dynamic_attach ( dma_buf , dev - > dev , true ) ;
if ( IS_ERR ( attach ) ) {
drm_gem_object_put ( obj ) ;
return ERR_CAST ( attach ) ;
}
get_dma_buf ( dma_buf ) ;
obj - > import_attach = attach ;
return obj ;
2017-12-08 16:18:59 -05:00
}