2019-03-12 19:43:44 -05:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018 Noralf Trønnes
*/
# include <linux/dma-buf.h>
# include <linux/export.h>
# include <linux/mutex.h>
# include <linux/shmem_fs.h>
# include <linux/slab.h>
# include <linux/vmalloc.h>
2019-07-18 18:15:03 +02:00
# include <drm/drm.h>
2019-03-12 19:43:44 -05:00
# include <drm/drm_device.h>
# include <drm/drm_drv.h>
# include <drm/drm_gem_shmem_helper.h>
# include <drm/drm_prime.h>
# include <drm/drm_print.h>
/**
* DOC : overview
*
* This library provides helpers for GEM objects backed by shmem buffers
* allocated using anonymous pageable memory .
*/
static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
. free = drm_gem_shmem_free_object ,
. print_info = drm_gem_shmem_print_info ,
. pin = drm_gem_shmem_pin ,
. unpin = drm_gem_shmem_unpin ,
. get_sg_table = drm_gem_shmem_get_sg_table ,
. vmap = drm_gem_shmem_vmap ,
. vunmap = drm_gem_shmem_vunmap ,
2019-10-16 13:51:54 +02:00
. mmap = drm_gem_shmem_mmap ,
2019-03-12 19:43:44 -05:00
} ;
/**
* drm_gem_shmem_create - Allocate an object with the given size
* @ dev : DRM device
* @ size : Size of the object to allocate
*
* This function creates a shmem GEM object .
*
* Returns :
* A struct drm_gem_shmem_object * on success or an ERR_PTR ( ) - encoded negative
* error code on failure .
*/
struct drm_gem_shmem_object * drm_gem_shmem_create ( struct drm_device * dev , size_t size )
{
struct drm_gem_shmem_object * shmem ;
struct drm_gem_object * obj ;
int ret ;
size = PAGE_ALIGN ( size ) ;
if ( dev - > driver - > gem_create_object )
obj = dev - > driver - > gem_create_object ( dev , size ) ;
else
obj = kzalloc ( sizeof ( * shmem ) , GFP_KERNEL ) ;
if ( ! obj )
return ERR_PTR ( - ENOMEM ) ;
if ( ! obj - > funcs )
obj - > funcs = & drm_gem_shmem_funcs ;
ret = drm_gem_object_init ( dev , obj , size ) ;
if ( ret )
goto err_free ;
ret = drm_gem_create_mmap_offset ( obj ) ;
if ( ret )
goto err_release ;
shmem = to_drm_gem_shmem_obj ( obj ) ;
mutex_init ( & shmem - > pages_lock ) ;
mutex_init ( & shmem - > vmap_lock ) ;
2019-08-05 08:33:57 -06:00
INIT_LIST_HEAD ( & shmem - > madv_list ) ;
2019-03-12 19:43:44 -05:00
/*
* Our buffers are kept pinned , so allocating them
* from the MOVABLE zone is a really bad idea , and
* conflicts with CMA . See comments above new_inode ( )
* why this is required _and_ expected if you ' re
* going to pin these pages .
*/
mapping_set_gfp_mask ( obj - > filp - > f_mapping , GFP_HIGHUSER |
__GFP_RETRY_MAYFAIL | __GFP_NOWARN ) ;
return shmem ;
err_release :
drm_gem_object_release ( obj ) ;
err_free :
kfree ( obj ) ;
return ERR_PTR ( ret ) ;
}
EXPORT_SYMBOL_GPL ( drm_gem_shmem_create ) ;
/**
* drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
* @ obj : GEM object to free
*
* This function cleans up the GEM object state and frees the memory used to
* store the object itself .
*/
void drm_gem_shmem_free_object ( struct drm_gem_object * obj )
{
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
WARN_ON ( shmem - > vmap_use_count ) ;
if ( obj - > import_attach ) {
shmem - > pages_use_count - - ;
drm_prime_gem_destroy ( obj , shmem - > sgt ) ;
kvfree ( shmem - > pages ) ;
} else {
if ( shmem - > sgt ) {
dma_unmap_sg ( obj - > dev - > dev , shmem - > sgt - > sgl ,
shmem - > sgt - > nents , DMA_BIDIRECTIONAL ) ;
sg_free_table ( shmem - > sgt ) ;
kfree ( shmem - > sgt ) ;
}
2019-07-19 08:30:12 -06:00
if ( shmem - > pages )
drm_gem_shmem_put_pages ( shmem ) ;
2019-03-12 19:43:44 -05:00
}
WARN_ON ( shmem - > pages_use_count ) ;
drm_gem_object_release ( obj ) ;
mutex_destroy ( & shmem - > pages_lock ) ;
mutex_destroy ( & shmem - > vmap_lock ) ;
kfree ( shmem ) ;
}
EXPORT_SYMBOL_GPL ( drm_gem_shmem_free_object ) ;
static int drm_gem_shmem_get_pages_locked ( struct drm_gem_shmem_object * shmem )
{
struct drm_gem_object * obj = & shmem - > base ;
struct page * * pages ;
if ( shmem - > pages_use_count + + > 0 )
return 0 ;
pages = drm_gem_get_pages ( obj ) ;
if ( IS_ERR ( pages ) ) {
DRM_DEBUG_KMS ( " Failed to get pages (%ld) \n " , PTR_ERR ( pages ) ) ;
shmem - > pages_use_count = 0 ;
return PTR_ERR ( pages ) ;
}
shmem - > pages = pages ;
return 0 ;
}
/*
* drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
* @ shmem : shmem GEM object
*
* This function makes sure that backing pages exists for the shmem GEM object
* and increases the use count .
*
* Returns :
* 0 on success or a negative error code on failure .
*/
int drm_gem_shmem_get_pages ( struct drm_gem_shmem_object * shmem )
{
int ret ;
ret = mutex_lock_interruptible ( & shmem - > pages_lock ) ;
if ( ret )
return ret ;
ret = drm_gem_shmem_get_pages_locked ( shmem ) ;
mutex_unlock ( & shmem - > pages_lock ) ;
return ret ;
}
EXPORT_SYMBOL ( drm_gem_shmem_get_pages ) ;
static void drm_gem_shmem_put_pages_locked ( struct drm_gem_shmem_object * shmem )
{
struct drm_gem_object * obj = & shmem - > base ;
if ( WARN_ON_ONCE ( ! shmem - > pages_use_count ) )
return ;
if ( - - shmem - > pages_use_count > 0 )
return ;
drm_gem_put_pages ( obj , shmem - > pages ,
shmem - > pages_mark_dirty_on_put ,
shmem - > pages_mark_accessed_on_put ) ;
shmem - > pages = NULL ;
}
/*
* drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
* @ shmem : shmem GEM object
*
* This function decreases the use count and puts the backing pages when use drops to zero .
*/
void drm_gem_shmem_put_pages ( struct drm_gem_shmem_object * shmem )
{
mutex_lock ( & shmem - > pages_lock ) ;
drm_gem_shmem_put_pages_locked ( shmem ) ;
mutex_unlock ( & shmem - > pages_lock ) ;
}
EXPORT_SYMBOL ( drm_gem_shmem_put_pages ) ;
/**
* drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
* @ obj : GEM object
*
* This function makes sure the backing pages are pinned in memory while the
* buffer is exported .
*
* Returns :
* 0 on success or a negative error code on failure .
*/
int drm_gem_shmem_pin ( struct drm_gem_object * obj )
{
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
return drm_gem_shmem_get_pages ( shmem ) ;
}
EXPORT_SYMBOL ( drm_gem_shmem_pin ) ;
/**
* drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
* @ obj : GEM object
*
* This function removes the requirement that the backing pages are pinned in
* memory .
*/
void drm_gem_shmem_unpin ( struct drm_gem_object * obj )
{
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
drm_gem_shmem_put_pages ( shmem ) ;
}
EXPORT_SYMBOL ( drm_gem_shmem_unpin ) ;
static void * drm_gem_shmem_vmap_locked ( struct drm_gem_shmem_object * shmem )
{
struct drm_gem_object * obj = & shmem - > base ;
int ret ;
if ( shmem - > vmap_use_count + + > 0 )
return shmem - > vaddr ;
ret = drm_gem_shmem_get_pages ( shmem ) ;
if ( ret )
goto err_zero_use ;
if ( obj - > import_attach )
shmem - > vaddr = dma_buf_vmap ( obj - > import_attach - > dmabuf ) ;
else
2019-05-29 08:51:21 +02:00
shmem - > vaddr = vmap ( shmem - > pages , obj - > size > > PAGE_SHIFT ,
VM_MAP , pgprot_writecombine ( PAGE_KERNEL ) ) ;
2019-03-12 19:43:44 -05:00
if ( ! shmem - > vaddr ) {
DRM_DEBUG_KMS ( " Failed to vmap pages \n " ) ;
ret = - ENOMEM ;
goto err_put_pages ;
}
return shmem - > vaddr ;
err_put_pages :
drm_gem_shmem_put_pages ( shmem ) ;
err_zero_use :
shmem - > vmap_use_count = 0 ;
return ERR_PTR ( ret ) ;
}
/*
* drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
* @ shmem : shmem GEM object
*
* This function makes sure that a virtual address exists for the buffer backing
* the shmem GEM object .
*
* Returns :
* 0 on success or a negative error code on failure .
*/
void * drm_gem_shmem_vmap ( struct drm_gem_object * obj )
{
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
void * vaddr ;
int ret ;
ret = mutex_lock_interruptible ( & shmem - > vmap_lock ) ;
if ( ret )
return ERR_PTR ( ret ) ;
vaddr = drm_gem_shmem_vmap_locked ( shmem ) ;
mutex_unlock ( & shmem - > vmap_lock ) ;
return vaddr ;
}
EXPORT_SYMBOL ( drm_gem_shmem_vmap ) ;
static void drm_gem_shmem_vunmap_locked ( struct drm_gem_shmem_object * shmem )
{
struct drm_gem_object * obj = & shmem - > base ;
if ( WARN_ON_ONCE ( ! shmem - > vmap_use_count ) )
return ;
if ( - - shmem - > vmap_use_count > 0 )
return ;
if ( obj - > import_attach )
dma_buf_vunmap ( obj - > import_attach - > dmabuf , shmem - > vaddr ) ;
else
vunmap ( shmem - > vaddr ) ;
shmem - > vaddr = NULL ;
drm_gem_shmem_put_pages ( shmem ) ;
}
/*
* drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
* @ shmem : shmem GEM object
*
* This function removes the virtual address when use count drops to zero .
*/
void drm_gem_shmem_vunmap ( struct drm_gem_object * obj , void * vaddr )
{
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
mutex_lock ( & shmem - > vmap_lock ) ;
drm_gem_shmem_vunmap_locked ( shmem ) ;
mutex_unlock ( & shmem - > vmap_lock ) ;
}
EXPORT_SYMBOL ( drm_gem_shmem_vunmap ) ;
struct drm_gem_shmem_object *
drm_gem_shmem_create_with_handle ( struct drm_file * file_priv ,
struct drm_device * dev , size_t size ,
uint32_t * handle )
{
struct drm_gem_shmem_object * shmem ;
int ret ;
shmem = drm_gem_shmem_create ( dev , size ) ;
if ( IS_ERR ( shmem ) )
return shmem ;
/*
* Allocate an id of idr table where the obj is registered
* and handle has the id what user can see .
*/
ret = drm_gem_handle_create ( file_priv , & shmem - > base , handle ) ;
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put_unlocked ( & shmem - > base ) ;
if ( ret )
return ERR_PTR ( ret ) ;
return shmem ;
}
EXPORT_SYMBOL ( drm_gem_shmem_create_with_handle ) ;
2019-08-05 08:33:57 -06:00
/* Update madvise status, returns true if not purged, else
* false or - errno .
*/
int drm_gem_shmem_madvise ( struct drm_gem_object * obj , int madv )
{
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
mutex_lock ( & shmem - > pages_lock ) ;
if ( shmem - > madv > = 0 )
shmem - > madv = madv ;
madv = shmem - > madv ;
mutex_unlock ( & shmem - > pages_lock ) ;
return ( madv > = 0 ) ;
}
EXPORT_SYMBOL ( drm_gem_shmem_madvise ) ;
void drm_gem_shmem_purge_locked ( struct drm_gem_object * obj )
{
struct drm_device * dev = obj - > dev ;
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
WARN_ON ( ! drm_gem_shmem_is_purgeable ( shmem ) ) ;
drm/shmem: Do dma_unmap_sg before purging pages
Calling dma_unmap_sg() in drm_gem_shmem_free_object() is too late if the
backing pages have already been released by the shrinker. The result is
the following abort:
Unable to handle kernel paging request at virtual address ffff8000098ed000
Mem abort info:
ESR = 0x96000147
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000147
CM = 1, WnR = 1
swapper pgtable: 4k pages, 48-bit VAs, pgdp=0000000002f51000
[ffff8000098ed000] pgd=00000000401f8003, pud=00000000401f7003, pmd=00000000401b1003, pte=00e80000098ed712
Internal error: Oops: 96000147 [#1] SMP
Modules linked in: panfrost gpu_sched
CPU: 5 PID: 902 Comm: gnome-shell Not tainted 5.3.0-rc1+ #95
Hardware name: 96boards Rock960 (DT)
pstate: 40000005 (nZcv daif -PAN -UAO)
pc : __dma_inv_area+0x40/0x58
lr : arch_sync_dma_for_cpu+0x28/0x30
sp : ffff00001321ba30
x29: ffff00001321ba30 x28: ffff00001321bd08
x27: 0000000000000000 x26: 0000000000000009
x25: 0000ffffc1f86170 x24: 0000000000000000
x23: 0000000000000000 x22: 0000000000000000
x21: 0000000000021000 x20: ffff80003bb2d810
x19: 00000000098ed000 x18: 0000000000000000
x17: 0000000000000000 x16: ffff800023fd9480
x15: 0000000000000000 x14: 0000000000000000
x13: 0000000000000000 x12: 0000000000000000
x11: 00000000fffb9fff x10: 0000000000000000
x9 : 0000000000000000 x8 : ffff800023fd9c18
x7 : 0000000000000000 x6 : 00000000ffffffff
x5 : 0000000000000000 x4 : 0000000000021000
Purging 5693440 bytes
x3 : 000000000000003f x2 : 0000000000000040
x1 : ffff80000990e000 x0 : ffff8000098ed000
Call trace:
__dma_inv_area+0x40/0x58
dma_direct_sync_single_for_cpu+0x7c/0x80
dma_direct_unmap_page+0x80/0x88
dma_direct_unmap_sg+0x54/0x80
drm_gem_shmem_free_object+0xfc/0x108
panfrost_gem_free_object+0x118/0x128 [panfrost]
drm_gem_object_free+0x18/0x90
drm_gem_object_put_unlocked+0x58/0x80
drm_gem_object_handle_put_unlocked+0x64/0xb0
drm_gem_object_release_handle+0x70/0x98
drm_gem_handle_delete+0x64/0xb0
drm_gem_close_ioctl+0x28/0x38
drm_ioctl_kernel+0xb8/0x110
drm_ioctl+0x244/0x3f0
do_vfs_ioctl+0xbc/0x910
ksys_ioctl+0x78/0xa8
__arm64_sys_ioctl+0x1c/0x28
el0_svc_common.constprop.0+0x88/0x150
el0_svc_handler+0x28/0x78
el0_svc+0x8/0xc
Code: 8a230000 54000060 d50b7e20 14000002 (d5087620)
Fixes: 17acb9f35ed7 ("drm/shmem: Add madvise state and purge helpers")
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Maxime Ripard <maxime.ripard@bootlin.com>
Cc: Sean Paul <sean@poorly.run>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Reviewed-by: Steven Price <steven.price@arm.com>
Signed-off-by: Rob Herring <robh@kernel.org>
Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190823021216.5862-5-robh@kernel.org
2019-08-22 21:12:12 -05:00
dma_unmap_sg ( obj - > dev - > dev , shmem - > sgt - > sgl ,
shmem - > sgt - > nents , DMA_BIDIRECTIONAL ) ;
sg_free_table ( shmem - > sgt ) ;
kfree ( shmem - > sgt ) ;
shmem - > sgt = NULL ;
2019-08-05 08:33:57 -06:00
drm_gem_shmem_put_pages_locked ( shmem ) ;
shmem - > madv = - 1 ;
drm_vma_node_unmap ( & obj - > vma_node , dev - > anon_inode - > i_mapping ) ;
drm_gem_free_mmap_offset ( obj ) ;
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM .
* To do this we must instruct the shmfs to drop all of its
* backing pages , * now * .
*/
shmem_truncate_range ( file_inode ( obj - > filp ) , 0 , ( loff_t ) - 1 ) ;
invalidate_mapping_pages ( file_inode ( obj - > filp ) - > i_mapping ,
0 , ( loff_t ) - 1 ) ;
}
EXPORT_SYMBOL ( drm_gem_shmem_purge_locked ) ;
2019-08-22 21:12:13 -05:00
bool drm_gem_shmem_purge ( struct drm_gem_object * obj )
2019-08-05 08:33:57 -06:00
{
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
2019-08-22 21:12:13 -05:00
if ( ! mutex_trylock ( & shmem - > pages_lock ) )
return false ;
2019-08-05 08:33:57 -06:00
drm_gem_shmem_purge_locked ( obj ) ;
mutex_unlock ( & shmem - > pages_lock ) ;
2019-08-22 21:12:13 -05:00
return true ;
2019-08-05 08:33:57 -06:00
}
EXPORT_SYMBOL ( drm_gem_shmem_purge ) ;
2019-03-12 19:43:44 -05:00
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
* @ file : DRM file structure to create the dumb buffer for
* @ dev : DRM device
* @ args : IOCTL data
*
* This function computes the pitch of the dumb buffer and rounds it up to an
* integer number of bytes per pixel . Drivers for hardware that doesn ' t have
* any additional restrictions on the pitch can directly use this function as
* their & drm_driver . dumb_create callback .
*
* For hardware with additional restrictions , drivers can adjust the fields
* set up by userspace before calling into this function .
*
* Returns :
* 0 on success or a negative error code on failure .
*/
int drm_gem_shmem_dumb_create ( struct drm_file * file , struct drm_device * dev ,
struct drm_mode_create_dumb * args )
{
u32 min_pitch = DIV_ROUND_UP ( args - > width * args - > bpp , 8 ) ;
struct drm_gem_shmem_object * shmem ;
if ( ! args - > pitch | | ! args - > size ) {
args - > pitch = min_pitch ;
args - > size = args - > pitch * args - > height ;
} else {
/* ensure sane minimum values */
if ( args - > pitch < min_pitch )
args - > pitch = min_pitch ;
if ( args - > size < args - > pitch * args - > height )
args - > size = args - > pitch * args - > height ;
}
shmem = drm_gem_shmem_create_with_handle ( file , dev , args - > size , & args - > handle ) ;
return PTR_ERR_OR_ZERO ( shmem ) ;
}
EXPORT_SYMBOL_GPL ( drm_gem_shmem_dumb_create ) ;
static vm_fault_t drm_gem_shmem_fault ( struct vm_fault * vmf )
{
struct vm_area_struct * vma = vmf - > vma ;
struct drm_gem_object * obj = vma - > vm_private_data ;
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
loff_t num_pages = obj - > size > > PAGE_SHIFT ;
struct page * page ;
2019-03-22 09:41:25 +03:00
if ( vmf - > pgoff > = num_pages | | WARN_ON_ONCE ( ! shmem - > pages ) )
2019-03-12 19:43:44 -05:00
return VM_FAULT_SIGBUS ;
page = shmem - > pages [ vmf - > pgoff ] ;
return vmf_insert_page ( vma , vmf - > address , page ) ;
}
static void drm_gem_shmem_vm_open ( struct vm_area_struct * vma )
{
struct drm_gem_object * obj = vma - > vm_private_data ;
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
int ret ;
ret = drm_gem_shmem_get_pages ( shmem ) ;
WARN_ON_ONCE ( ret ! = 0 ) ;
drm_gem_vm_open ( vma ) ;
}
static void drm_gem_shmem_vm_close ( struct vm_area_struct * vma )
{
struct drm_gem_object * obj = vma - > vm_private_data ;
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
drm_gem_shmem_put_pages ( shmem ) ;
drm_gem_vm_close ( vma ) ;
}
2019-10-16 13:51:54 +02:00
static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
2019-03-12 19:43:44 -05:00
. fault = drm_gem_shmem_fault ,
. open = drm_gem_shmem_vm_open ,
. close = drm_gem_shmem_vm_close ,
} ;
/**
* drm_gem_shmem_mmap - Memory - map a shmem GEM object
2019-10-16 13:51:54 +02:00
* @ obj : gem object
2019-03-12 19:43:44 -05:00
* @ vma : VMA for the area to be mapped
*
* This function implements an augmented version of the GEM DRM file mmap
* operation for shmem objects . Drivers which employ the shmem helpers should
2019-10-16 13:51:54 +02:00
* use this function as their & drm_gem_object_funcs . mmap handler .
2019-03-12 19:43:44 -05:00
*
* Returns :
* 0 on success or a negative error code on failure .
*/
2019-10-16 13:51:54 +02:00
int drm_gem_shmem_mmap ( struct drm_gem_object * obj , struct vm_area_struct * vma )
2019-03-12 19:43:44 -05:00
{
struct drm_gem_shmem_object * shmem ;
int ret ;
2019-11-27 10:25:22 +01:00
/* Remove the fake offset */
vma - > vm_pgoff - = drm_vma_node_start ( & obj - > vma_node ) ;
2019-10-16 13:51:54 +02:00
shmem = to_drm_gem_shmem_obj ( obj ) ;
2019-03-12 19:43:44 -05:00
ret = drm_gem_shmem_get_pages ( shmem ) ;
if ( ret ) {
drm_gem_vm_close ( vma ) ;
return ret ;
}
2019-10-16 13:51:56 +02:00
vma - > vm_flags | = VM_MIXEDMAP | VM_DONTEXPAND ;
2019-10-16 13:51:54 +02:00
vma - > vm_page_prot = pgprot_writecombine ( vm_get_page_prot ( vma - > vm_flags ) ) ;
vma - > vm_page_prot = pgprot_decrypted ( vma - > vm_page_prot ) ;
vma - > vm_ops = & drm_gem_shmem_vm_ops ;
2019-03-12 19:43:44 -05:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( drm_gem_shmem_mmap ) ;
/**
* drm_gem_shmem_print_info ( ) - Print & drm_gem_shmem_object info for debugfs
* @ p : DRM printer
* @ indent : Tab indentation level
* @ obj : GEM object
*/
void drm_gem_shmem_print_info ( struct drm_printer * p , unsigned int indent ,
const struct drm_gem_object * obj )
{
const struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
drm_printf_indent ( p , indent , " pages_use_count=%u \n " , shmem - > pages_use_count ) ;
drm_printf_indent ( p , indent , " vmap_use_count=%u \n " , shmem - > vmap_use_count ) ;
drm_printf_indent ( p , indent , " vaddr=%p \n " , shmem - > vaddr ) ;
}
EXPORT_SYMBOL ( drm_gem_shmem_print_info ) ;
/**
* drm_gem_shmem_get_sg_table - Provide a scatter / gather table of pinned
* pages for a shmem GEM object
* @ obj : GEM object
*
* This function exports a scatter / gather table suitable for PRIME usage by
* calling the standard DMA mapping API .
*
* Returns :
* A pointer to the scatter / gather table of pinned pages or NULL on failure .
*/
struct sg_table * drm_gem_shmem_get_sg_table ( struct drm_gem_object * obj )
{
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
return drm_prime_pages_to_sg ( shmem - > pages , obj - > size > > PAGE_SHIFT ) ;
}
EXPORT_SYMBOL_GPL ( drm_gem_shmem_get_sg_table ) ;
/**
* drm_gem_shmem_get_pages_sgt - Pin pages , dma map them , and return a
* scatter / gather table for a shmem GEM object .
* @ obj : GEM object
*
* This function returns a scatter / gather table suitable for driver usage . If
* the sg table doesn ' t exist , the pages are pinned , dma - mapped , and a sg
* table created .
*
* Returns :
* A pointer to the scatter / gather table of pinned pages or errno on failure .
*/
struct sg_table * drm_gem_shmem_get_pages_sgt ( struct drm_gem_object * obj )
{
int ret ;
struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj ( obj ) ;
struct sg_table * sgt ;
if ( shmem - > sgt )
return shmem - > sgt ;
WARN_ON ( obj - > import_attach ) ;
ret = drm_gem_shmem_get_pages ( shmem ) ;
if ( ret )
return ERR_PTR ( ret ) ;
sgt = drm_gem_shmem_get_sg_table ( & shmem - > base ) ;
if ( IS_ERR ( sgt ) ) {
ret = PTR_ERR ( sgt ) ;
goto err_put_pages ;
}
/* Map the pages for use by the h/w. */
dma_map_sg ( obj - > dev - > dev , sgt - > sgl , sgt - > nents , DMA_BIDIRECTIONAL ) ;
shmem - > sgt = sgt ;
return sgt ;
err_put_pages :
drm_gem_shmem_put_pages ( shmem ) ;
return ERR_PTR ( ret ) ;
}
EXPORT_SYMBOL_GPL ( drm_gem_shmem_get_pages_sgt ) ;
/**
* drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
* another driver ' s scatter / gather table of pinned pages
* @ dev : Device to import into
* @ attach : DMA - BUF attachment
* @ sgt : Scatter / gather table of pinned pages
*
* This function imports a scatter / gather table exported via DMA - BUF by
* another driver . Drivers that use the shmem helpers should set this as their
* & drm_driver . gem_prime_import_sg_table callback .
*
* Returns :
* A pointer to a newly created GEM object or an ERR_PTR - encoded negative
* error code on failure .
*/
struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table ( struct drm_device * dev ,
struct dma_buf_attachment * attach ,
struct sg_table * sgt )
{
size_t size = PAGE_ALIGN ( attach - > dmabuf - > size ) ;
size_t npages = size > > PAGE_SHIFT ;
struct drm_gem_shmem_object * shmem ;
int ret ;
shmem = drm_gem_shmem_create ( dev , size ) ;
if ( IS_ERR ( shmem ) )
return ERR_CAST ( shmem ) ;
shmem - > pages = kvmalloc_array ( npages , sizeof ( struct page * ) , GFP_KERNEL ) ;
if ( ! shmem - > pages ) {
ret = - ENOMEM ;
goto err_free_gem ;
}
ret = drm_prime_sg_to_page_addr_arrays ( sgt , shmem - > pages , NULL , npages ) ;
if ( ret < 0 )
goto err_free_array ;
shmem - > sgt = sgt ;
shmem - > pages_use_count = 1 ; /* Permanently pinned from our point of view */
DRM_DEBUG_PRIME ( " size = %zu \n " , size ) ;
return & shmem - > base ;
err_free_array :
kvfree ( shmem - > pages ) ;
err_free_gem :
drm_gem_object_put_unlocked ( & shmem - > base ) ;
return ERR_PTR ( ret ) ;
}
EXPORT_SYMBOL_GPL ( drm_gem_shmem_prime_import_sg_table ) ;