2019-05-27 08:55:06 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2012-06-27 15:30:18 +02:00
/*
* drm gem CMA ( contiguous memory allocator ) helper functions
*
* Copyright ( C ) 2012 Sascha Hauer , Pengutronix
*
* Based on Samsung Exynos code
*
* Copyright ( c ) 2011 Samsung Electronics Co . , Ltd .
*/
2013-02-17 01:57:30 +01:00
# include <linux/dma-buf.h>
2012-06-27 15:30:18 +02:00
# include <linux/dma-mapping.h>
2019-05-26 19:35:35 +02:00
# include <linux/export.h>
# include <linux/mm.h>
2021-10-20 15:19:41 +02:00
# include <linux/module.h>
2019-05-26 19:35:35 +02:00
# include <linux/mutex.h>
# include <linux/slab.h>
2012-06-27 15:30:18 +02:00
# include <drm/drm.h>
2019-05-26 19:35:35 +02:00
# include <drm/drm_device.h>
# include <drm/drm_drv.h>
2012-06-27 15:30:18 +02:00
# include <drm/drm_gem_cma_helper.h>
2013-07-24 21:07:52 +02:00
# include <drm/drm_vma_manager.h>
2012-06-27 15:30:18 +02:00
2014-11-03 13:56:55 +01:00
/**
* DOC : cma helpers
*
2022-06-08 14:58:21 +01:00
* The DRM GEM / CMA helpers are a means to provide buffer objects that are
* presented to the device as a contiguous chunk of memory . This is useful
* for devices that do not support scatter - gather DMA ( either directly or
* by using an intimately attached IOMMU ) .
2014-11-03 13:56:55 +01:00
*
2022-06-08 14:58:21 +01:00
* Despite the name , the DRM GEM / CMA helpers are not hardwired to use the
* Contiguous Memory Allocator ( CMA ) .
*
* For devices that access the memory bus through an ( external ) IOMMU then
* the buffer objects are allocated using a traditional page - based
* allocator and may be scattered through physical memory . However they
* are contiguous in the IOVA space so appear contiguous to devices using
* them .
*
* For other devices then the helpers rely on CMA to provide buffer
* objects that are physically contiguous in memory .
2021-11-15 13:01:48 +01:00
*
* For GEM callback helpers in struct & drm_gem_object functions , see likewise
* named functions with an _object_ infix ( e . g . , drm_gem_cma_object_vmap ( ) wraps
* drm_gem_cma_vmap ( ) ) . These helpers perform the necessary type conversion .
2014-11-03 13:56:55 +01:00
*/
2020-11-06 14:16:32 +01:00
static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
2021-11-15 13:01:47 +01:00
. free = drm_gem_cma_object_free ,
. print_info = drm_gem_cma_object_print_info ,
. get_sg_table = drm_gem_cma_object_get_sg_table ,
. vmap = drm_gem_cma_object_vmap ,
. mmap = drm_gem_cma_object_mmap ,
2020-11-06 14:16:32 +01:00
. vm_ops = & drm_gem_cma_vm_ops ,
} ;
2014-11-03 13:56:55 +01:00
/**
2013-02-17 01:54:26 +01:00
* __drm_gem_cma_create - Create a GEM CMA object without allocating memory
2014-11-03 13:56:55 +01:00
* @ drm : DRM device
* @ size : size of the object to allocate
2021-05-23 18:04:13 +01:00
* @ private : true if used for internal purposes
2012-06-27 15:30:18 +02:00
*
2014-11-03 13:56:55 +01:00
* This function creates and initializes a GEM CMA object of the given size ,
* but doesn ' t allocate any memory to back the object .
2013-02-17 01:54:26 +01:00
*
2014-11-03 13:56:55 +01:00
* Returns :
* A struct drm_gem_cma_object * on success or an ERR_PTR ( ) - encoded negative
* error code on failure .
2012-06-27 15:30:18 +02:00
*/
2013-02-17 01:54:26 +01:00
static struct drm_gem_cma_object *
2021-05-23 18:04:13 +01:00
__drm_gem_cma_create ( struct drm_device * drm , size_t size , bool private )
2012-06-27 15:30:18 +02:00
{
struct drm_gem_cma_object * cma_obj ;
struct drm_gem_object * gem_obj ;
2021-05-23 18:04:13 +01:00
int ret = 0 ;
2012-06-27 15:30:18 +02:00
2021-11-30 10:52:55 +01:00
if ( drm - > driver - > gem_create_object ) {
2015-11-30 10:55:13 -08:00
gem_obj = drm - > driver - > gem_create_object ( drm , size ) ;
2021-11-30 10:52:55 +01:00
if ( IS_ERR ( gem_obj ) )
return ERR_CAST ( gem_obj ) ;
cma_obj = to_drm_gem_cma_obj ( gem_obj ) ;
} else {
cma_obj = kzalloc ( sizeof ( * cma_obj ) , GFP_KERNEL ) ;
if ( ! cma_obj )
return ERR_PTR ( - ENOMEM ) ;
gem_obj = & cma_obj - > base ;
}
2020-11-06 14:16:32 +01:00
if ( ! gem_obj - > funcs )
gem_obj - > funcs = & drm_gem_cma_default_funcs ;
2021-05-23 18:04:13 +01:00
if ( private ) {
drm_gem_private_object_init ( drm , gem_obj , size ) ;
/* Always use writecombine for dma-buf mappings */
cma_obj - > map_noncoherent = false ;
} else {
ret = drm_gem_object_init ( drm , gem_obj , size ) ;
}
2012-06-27 15:30:18 +02:00
if ( ret )
2013-02-17 01:54:26 +01:00
goto error ;
2012-06-27 15:30:18 +02:00
ret = drm_gem_create_mmap_offset ( gem_obj ) ;
2013-02-17 01:54:26 +01:00
if ( ret ) {
drm_gem_object_release ( gem_obj ) ;
goto error ;
}
2012-06-27 15:30:18 +02:00
return cma_obj ;
2013-02-17 01:54:26 +01:00
error :
kfree ( cma_obj ) ;
return ERR_PTR ( ret ) ;
}
2012-06-27 15:30:18 +02:00
2014-11-03 13:56:55 +01:00
/**
2013-02-17 01:54:26 +01:00
* drm_gem_cma_create - allocate an object with the given size
2014-11-03 13:56:55 +01:00
* @ drm : DRM device
* @ size : size of the object to allocate
*
2022-06-08 14:58:21 +01:00
* This function creates a CMA GEM object and allocates memory as backing store .
* The allocated memory will occupy a contiguous chunk of bus address space .
*
* For devices that are directly connected to the memory bus then the allocated
* memory will be physically contiguous . For devices that access through an
* IOMMU , then the allocated memory is not expected to be physically contiguous
* because having contiguous IOVAs is sufficient to meet a devices DMA
* requirements .
2013-02-17 01:54:26 +01:00
*
2014-11-03 13:56:55 +01:00
* Returns :
* A struct drm_gem_cma_object * on success or an ERR_PTR ( ) - encoded negative
* error code on failure .
2013-02-17 01:54:26 +01:00
*/
struct drm_gem_cma_object * drm_gem_cma_create ( struct drm_device * drm ,
2014-11-03 13:56:55 +01:00
size_t size )
2013-02-17 01:54:26 +01:00
{
struct drm_gem_cma_object * cma_obj ;
2013-02-17 01:57:30 +01:00
int ret ;
2012-06-27 15:30:18 +02:00
2013-02-17 01:54:26 +01:00
size = round_up ( size , PAGE_SIZE ) ;
2012-06-27 15:30:18 +02:00
2021-05-23 18:04:13 +01:00
cma_obj = __drm_gem_cma_create ( drm , size , false ) ;
2013-02-17 01:54:26 +01:00
if ( IS_ERR ( cma_obj ) )
return cma_obj ;
2021-05-23 18:04:13 +01:00
if ( cma_obj - > map_noncoherent ) {
cma_obj - > vaddr = dma_alloc_noncoherent ( drm - > dev , size ,
& cma_obj - > paddr ,
DMA_TO_DEVICE ,
GFP_KERNEL | __GFP_NOWARN ) ;
} else {
cma_obj - > vaddr = dma_alloc_wc ( drm - > dev , size , & cma_obj - > paddr ,
GFP_KERNEL | __GFP_NOWARN ) ;
}
2013-02-17 01:54:26 +01:00
if ( ! cma_obj - > vaddr ) {
drm: core: Convert device logging to drm_* functions.
Convert device logging with dev_* functions into drm_* functions.
The patch has been generated with the coccinelle script below.
The script focuses on instances of dev_* functions where the drm device
context is clearly visible in its arguments.
@@expression E1; expression list E2; @@
-dev_warn(E1->dev, E2)
+drm_warn(E1, E2)
@@expression E1; expression list E2; @@
-dev_info(E1->dev, E2)
+drm_info(E1, E2)
@@expression E1; expression list E2; @@
-dev_err(E1->dev, E2)
+drm_err(E1, E2)
@@expression E1; expression list E2; @@
-dev_info_once(E1->dev, E2)
+drm_info_once(E1, E2)
@@expression E1; expression list E2; @@
-dev_notice_once(E1->dev, E2)
+drm_notice_once(E1, E2)
@@expression E1; expression list E2; @@
-dev_warn_once(E1->dev, E2)
+drm_warn_once(E1, E2)
@@expression E1; expression list E2; @@
-dev_err_once(E1->dev, E2)
+drm_err_once(E1, E2)
@@expression E1; expression list E2; @@
-dev_err_ratelimited(E1->dev, E2)
+drm_err_ratelimited(E1, E2)
@@expression E1; expression list E2; @@
-dev_dbg(E1->dev, E2)
+drm_dbg(E1, E2)
Signed-off-by: Suraj Upadhyay <usuraj35@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20200718150955.GA23103@blackclown
2020-07-18 20:39:55 +05:30
drm_dbg ( drm , " failed to allocate buffer with size %zu \n " ,
size ) ;
2013-02-17 01:57:30 +01:00
ret = - ENOMEM ;
goto error ;
}
2013-02-17 01:54:26 +01:00
return cma_obj ;
2013-02-17 01:57:30 +01:00
error :
2020-05-15 10:50:53 +01:00
drm_gem_object_put ( & cma_obj - > base ) ;
2013-02-17 01:57:30 +01:00
return ERR_PTR ( ret ) ;
2012-06-27 15:30:18 +02:00
}
EXPORT_SYMBOL_GPL ( drm_gem_cma_create ) ;
2014-11-03 13:56:55 +01:00
/**
* drm_gem_cma_create_with_handle - allocate an object with the given size and
* return a GEM handle to it
* @ file_priv : DRM file - private structure to register the handle for
* @ drm : DRM device
* @ size : size of the object to allocate
* @ handle : return location for the GEM handle
*
2022-06-08 14:58:21 +01:00
* This function creates a CMA GEM object , allocating a chunk of memory as
* backing store . The GEM object is then added to the list of object associated
* with the given file and a handle to it is returned .
*
* The allocated memory will occupy a contiguous chunk of bus address space .
* See drm_gem_cma_create ( ) for more details .
2012-06-27 15:30:18 +02:00
*
2014-11-03 13:56:55 +01:00
* Returns :
* A struct drm_gem_cma_object * on success or an ERR_PTR ( ) - encoded negative
* error code on failure .
2012-06-27 15:30:18 +02:00
*/
2014-11-03 13:56:55 +01:00
static struct drm_gem_cma_object *
drm_gem_cma_create_with_handle ( struct drm_file * file_priv ,
struct drm_device * drm , size_t size ,
uint32_t * handle )
2012-06-27 15:30:18 +02:00
{
struct drm_gem_cma_object * cma_obj ;
struct drm_gem_object * gem_obj ;
int ret ;
cma_obj = drm_gem_cma_create ( drm , size ) ;
if ( IS_ERR ( cma_obj ) )
return cma_obj ;
gem_obj = & cma_obj - > base ;
/*
* allocate a id of idr table where the obj is registered
* and handle has the id what user can see .
*/
ret = drm_gem_handle_create ( file_priv , gem_obj , handle ) ;
/* drop reference from allocate - handle holds it now. */
2020-05-15 10:50:53 +01:00
drm_gem_object_put ( gem_obj ) ;
2016-05-31 22:25:52 +01:00
if ( ret )
return ERR_PTR ( ret ) ;
2012-06-27 15:30:18 +02:00
return cma_obj ;
}
2014-11-03 13:56:55 +01:00
/**
2021-11-15 13:01:48 +01:00
* drm_gem_cma_free - free resources associated with a CMA GEM object
* @ cma_obj : CMA GEM object to free
2014-11-03 13:56:55 +01:00
*
* This function frees the backing memory of the CMA GEM object , cleans up the
* GEM object state and frees the memory used to store the object itself .
2018-11-10 15:56:46 +01:00
* If the buffer is imported and the virtual address is set , it is released .
2012-06-27 15:30:18 +02:00
*/
2021-11-15 13:01:48 +01:00
void drm_gem_cma_free ( struct drm_gem_cma_object * cma_obj )
2012-06-27 15:30:18 +02:00
{
2021-11-15 13:01:48 +01:00
struct drm_gem_object * gem_obj = & cma_obj - > base ;
2022-02-04 09:05:41 -08:00
struct iosys_map map = IOSYS_MAP_INIT_VADDR ( cma_obj - > vaddr ) ;
2012-06-27 15:30:18 +02:00
2019-04-26 14:47:53 +02:00
if ( gem_obj - > import_attach ) {
2018-11-10 15:56:46 +01:00
if ( cma_obj - > vaddr )
2020-09-25 13:56:00 +02:00
dma_buf_vunmap ( gem_obj - > import_attach - > dmabuf , & map ) ;
2013-02-17 01:57:30 +01:00
drm_prime_gem_destroy ( gem_obj , cma_obj - > sgt ) ;
2019-04-26 14:47:53 +02:00
} else if ( cma_obj - > vaddr ) {
2021-07-08 19:51:46 +02:00
if ( cma_obj - > map_noncoherent )
dma_free_noncoherent ( gem_obj - > dev - > dev , cma_obj - > base . size ,
cma_obj - > vaddr , cma_obj - > paddr ,
DMA_TO_DEVICE ) ;
else
dma_free_wc ( gem_obj - > dev - > dev , cma_obj - > base . size ,
cma_obj - > vaddr , cma_obj - > paddr ) ;
2013-02-17 01:57:30 +01:00
}
2013-02-17 01:54:26 +01:00
drm_gem_object_release ( gem_obj ) ;
2012-06-27 15:30:18 +02:00
kfree ( cma_obj ) ;
}
2021-11-15 13:01:48 +01:00
EXPORT_SYMBOL_GPL ( drm_gem_cma_free ) ;
2012-06-27 15:30:18 +02:00
2014-11-03 11:48:49 +01:00
/**
* drm_gem_cma_dumb_create_internal - create a dumb buffer object
* @ file_priv : DRM file - private structure to create the dumb buffer for
* @ drm : DRM device
* @ args : IOCTL data
*
* This aligns the pitch and size arguments to the minimum required . This is
* an internal helper that can be wrapped by a driver to account for hardware
* with more specific alignment requirements . It should not be used directly
2016-12-29 21:48:34 +01:00
* as their & drm_driver . dumb_create callback .
2014-11-03 11:48:49 +01:00
*
* Returns :
* 0 on success or a negative error code on failure .
*/
int drm_gem_cma_dumb_create_internal ( struct drm_file * file_priv ,
struct drm_device * drm ,
struct drm_mode_create_dumb * args )
{
unsigned int min_pitch = DIV_ROUND_UP ( args - > width * args - > bpp , 8 ) ;
struct drm_gem_cma_object * cma_obj ;
if ( args - > pitch < min_pitch )
args - > pitch = min_pitch ;
if ( args - > size < args - > pitch * args - > height )
args - > size = args - > pitch * args - > height ;
cma_obj = drm_gem_cma_create_with_handle ( file_priv , drm , args - > size ,
& args - > handle ) ;
return PTR_ERR_OR_ZERO ( cma_obj ) ;
}
EXPORT_SYMBOL_GPL ( drm_gem_cma_dumb_create_internal ) ;
2014-11-03 13:56:55 +01:00
/**
* drm_gem_cma_dumb_create - create a dumb buffer object
* @ file_priv : DRM file - private structure to create the dumb buffer for
* @ drm : DRM device
* @ args : IOCTL data
*
* This function computes the pitch of the dumb buffer and rounds it up to an
* integer number of bytes per pixel . Drivers for hardware that doesn ' t have
* any additional restrictions on the pitch can directly use this function as
2016-12-29 21:48:34 +01:00
* their & drm_driver . dumb_create callback .
2012-06-27 15:30:18 +02:00
*
2014-11-03 11:48:49 +01:00
* For hardware with additional restrictions , drivers can adjust the fields
* set up by userspace and pass the IOCTL data along to the
* drm_gem_cma_dumb_create_internal ( ) function .
*
2014-11-03 13:56:55 +01:00
* Returns :
* 0 on success or a negative error code on failure .
2012-06-27 15:30:18 +02:00
*/
int drm_gem_cma_dumb_create ( struct drm_file * file_priv ,
2014-11-03 13:56:55 +01:00
struct drm_device * drm ,
struct drm_mode_create_dumb * args )
2012-06-27 15:30:18 +02:00
{
struct drm_gem_cma_object * cma_obj ;
2014-11-03 11:48:49 +01:00
args - > pitch = DIV_ROUND_UP ( args - > width * args - > bpp , 8 ) ;
args - > size = args - > pitch * args - > height ;
2012-06-27 15:30:18 +02:00
2014-11-03 13:56:55 +01:00
cma_obj = drm_gem_cma_create_with_handle ( file_priv , drm , args - > size ,
& args - > handle ) ;
2013-07-15 16:04:46 +05:30
return PTR_ERR_OR_ZERO ( cma_obj ) ;
2012-06-27 15:30:18 +02:00
}
EXPORT_SYMBOL_GPL ( drm_gem_cma_dumb_create ) ;
const struct vm_operations_struct drm_gem_cma_vm_ops = {
. open = drm_gem_vm_open ,
. close = drm_gem_vm_close ,
} ;
EXPORT_SYMBOL_GPL ( drm_gem_cma_vm_ops ) ;
2017-01-04 10:12:57 +01:00
# ifndef CONFIG_MMU
/**
* drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
* @ filp : file object
* @ addr : memory address
* @ len : buffer size
* @ pgoff : page offset
* @ flags : memory flags
*
* This function is used in noMMU platforms to propose address mapping
* for a given buffer .
* It ' s intended to be used as a direct handler for the struct
* & file_operations . get_unmapped_area operation .
*
* Returns :
* mapping address on success or a negative error code on failure .
*/
unsigned long drm_gem_cma_get_unmapped_area ( struct file * filp ,
unsigned long addr ,
unsigned long len ,
unsigned long pgoff ,
unsigned long flags )
{
struct drm_gem_cma_object * cma_obj ;
struct drm_gem_object * obj = NULL ;
struct drm_file * priv = filp - > private_data ;
struct drm_device * dev = priv - > minor - > dev ;
struct drm_vma_offset_node * node ;
2017-08-02 13:56:02 +02:00
if ( drm_dev_is_unplugged ( dev ) )
2017-01-04 10:12:57 +01:00
return - ENODEV ;
drm_vma_offset_lock_lookup ( dev - > vma_offset_manager ) ;
node = drm_vma_offset_exact_lookup_locked ( dev - > vma_offset_manager ,
pgoff ,
len > > PAGE_SHIFT ) ;
if ( likely ( node ) ) {
obj = container_of ( node , struct drm_gem_object , vma_node ) ;
/*
* When the object is being freed , after it hits 0 - refcnt it
* proceeds to tear down the object . In the process it will
* attempt to remove the VMA offset and so acquire this
* mgr - > vm_lock . Therefore if we find an object with a 0 - refcnt
* that matches our range , we know it is in the process of being
* destroyed and will be freed as soon as we release the lock -
* so we have to check for the 0 - refcnted object and treat it as
* invalid .
*/
if ( ! kref_get_unless_zero ( & obj - > refcount ) )
obj = NULL ;
}
drm_vma_offset_unlock_lookup ( dev - > vma_offset_manager ) ;
if ( ! obj )
return - EINVAL ;
if ( ! drm_vma_node_is_allowed ( node , priv ) ) {
2020-05-15 10:50:53 +01:00
drm_gem_object_put ( obj ) ;
2017-01-04 10:12:57 +01:00
return - EACCES ;
}
cma_obj = to_drm_gem_cma_obj ( obj ) ;
2020-05-15 10:50:53 +01:00
drm_gem_object_put ( obj ) ;
2017-01-04 10:12:57 +01:00
return cma_obj - > vaddr ? ( unsigned long ) cma_obj - > vaddr : - EINVAL ;
}
EXPORT_SYMBOL_GPL ( drm_gem_cma_get_unmapped_area ) ;
# endif
2017-11-07 20:13:43 +01:00
/**
* drm_gem_cma_print_info ( ) - Print & drm_gem_cma_object info for debugfs
2021-11-15 13:01:48 +01:00
* @ cma_obj : CMA GEM object
2017-11-07 20:13:43 +01:00
* @ p : DRM printer
* @ indent : Tab indentation level
*
2021-11-15 13:01:48 +01:00
* This function prints paddr and vaddr for use in e . g . debugfs output .
2017-11-07 20:13:43 +01:00
*/
2021-11-15 13:01:48 +01:00
void drm_gem_cma_print_info ( const struct drm_gem_cma_object * cma_obj ,
struct drm_printer * p , unsigned int indent )
2017-11-07 20:13:43 +01:00
{
drm_printf_indent ( p , indent , " paddr=%pad \n " , & cma_obj - > paddr ) ;
drm_printf_indent ( p , indent , " vaddr=%p \n " , cma_obj - > vaddr ) ;
}
EXPORT_SYMBOL ( drm_gem_cma_print_info ) ;
2014-11-03 13:56:55 +01:00
/**
2020-11-23 12:56:45 +01:00
* drm_gem_cma_get_sg_table - provide a scatter / gather table of pinned
2014-11-03 13:56:55 +01:00
* pages for a CMA GEM object
2021-11-15 13:01:48 +01:00
* @ cma_obj : CMA GEM object
2014-11-03 13:56:55 +01:00
*
2021-11-15 13:01:47 +01:00
* This function exports a scatter / gather table by calling the standard
* DMA mapping API .
2014-11-03 13:56:55 +01:00
*
* Returns :
* A pointer to the scatter / gather table of pinned pages or NULL on failure .
*/
2021-11-15 13:01:48 +01:00
struct sg_table * drm_gem_cma_get_sg_table ( struct drm_gem_cma_object * cma_obj )
2013-06-28 14:24:54 +09:00
{
2021-11-15 13:01:48 +01:00
struct drm_gem_object * obj = & cma_obj - > base ;
2013-06-28 14:24:54 +09:00
struct sg_table * sgt ;
int ret ;
sgt = kzalloc ( sizeof ( * sgt ) , GFP_KERNEL ) ;
if ( ! sgt )
2018-07-19 12:37:13 +03:00
return ERR_PTR ( - ENOMEM ) ;
2013-06-28 14:24:54 +09:00
ret = dma_get_sgtable ( obj - > dev - > dev , sgt , cma_obj - > vaddr ,
cma_obj - > paddr , obj - > size ) ;
if ( ret < 0 )
goto out ;
return sgt ;
out :
kfree ( sgt ) ;
2018-07-19 12:37:13 +03:00
return ERR_PTR ( ret ) ;
2013-06-28 14:24:54 +09:00
}
2020-11-23 12:56:45 +01:00
EXPORT_SYMBOL_GPL ( drm_gem_cma_get_sg_table ) ;
2013-06-28 14:24:54 +09:00
2014-11-03 13:56:55 +01:00
/**
* drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
* driver ' s scatter / gather table of pinned pages
* @ dev : device to import into
* @ attach : DMA - BUF attachment
* @ sgt : scatter / gather table of pinned pages
*
* This function imports a scatter / gather table exported via DMA - BUF by
* another driver . Imported buffers must be physically contiguous in memory
* ( i . e . the scatter / gather table must contain a single entry ) . Drivers that
2016-12-29 21:48:34 +01:00
* use the CMA helpers should set this as their
* & drm_driver . gem_prime_import_sg_table callback .
2014-11-03 13:56:55 +01:00
*
* Returns :
* A pointer to a newly created GEM object or an ERR_PTR - encoded negative
* error code on failure .
*/
2013-06-28 14:24:54 +09:00
struct drm_gem_object *
2014-01-09 11:03:14 +01:00
drm_gem_cma_prime_import_sg_table ( struct drm_device * dev ,
struct dma_buf_attachment * attach ,
2013-06-28 14:24:54 +09:00
struct sg_table * sgt )
{
struct drm_gem_cma_object * cma_obj ;
2020-05-08 16:04:44 +02:00
/* check if the entries in the sg_table are contiguous */
if ( drm_prime_get_contiguous_size ( sgt ) < attach - > dmabuf - > size )
return ERR_PTR ( - EINVAL ) ;
2013-06-28 14:24:54 +09:00
/* Create a CMA GEM buffer. */
2021-05-23 18:04:13 +01:00
cma_obj = __drm_gem_cma_create ( dev , attach - > dmabuf - > size , true ) ;
2013-06-28 14:24:54 +09:00
if ( IS_ERR ( cma_obj ) )
2014-07-03 17:33:33 +05:30
return ERR_CAST ( cma_obj ) ;
2013-06-28 14:24:54 +09:00
cma_obj - > paddr = sg_dma_address ( sgt - > sgl ) ;
cma_obj - > sgt = sgt ;
2014-01-09 11:03:14 +01:00
DRM_DEBUG_PRIME ( " dma_addr = %pad, size = %zu \n " , & cma_obj - > paddr , attach - > dmabuf - > size ) ;
2013-06-28 14:24:54 +09:00
return & cma_obj - > base ;
}
EXPORT_SYMBOL_GPL ( drm_gem_cma_prime_import_sg_table ) ;
2014-11-03 13:56:55 +01:00
/**
2020-11-23 12:56:45 +01:00
* drm_gem_cma_vmap - map a CMA GEM object into the kernel ' s virtual
2014-11-03 13:56:55 +01:00
* address space
2021-11-15 13:01:48 +01:00
* @ cma_obj : CMA GEM object
2020-11-03 10:30:11 +01:00
* @ map : Returns the kernel virtual address of the CMA GEM object ' s backing
* store .
2014-11-03 13:56:55 +01:00
*
2021-11-15 13:01:48 +01:00
* This function maps a buffer into the kernel ' s virtual address space .
* Since the CMA buffers are already mapped into the kernel virtual address
* space this simply returns the cached virtual address .
2014-11-03 13:56:55 +01:00
*
* Returns :
2020-11-03 10:30:11 +01:00
* 0 on success , or a negative error code otherwise .
2014-11-03 13:56:55 +01:00
*/
2022-02-04 09:05:41 -08:00
int drm_gem_cma_vmap ( struct drm_gem_cma_object * cma_obj ,
struct iosys_map * map )
2013-06-28 14:24:54 +09:00
{
2022-02-04 09:05:41 -08:00
iosys_map_set_vaddr ( map , cma_obj - > vaddr ) ;
2020-11-03 10:30:11 +01:00
return 0 ;
2013-06-28 14:24:54 +09:00
}
2020-11-23 12:56:45 +01:00
EXPORT_SYMBOL_GPL ( drm_gem_cma_vmap ) ;
2013-06-28 14:24:54 +09:00
2020-11-23 12:56:46 +01:00
/**
* drm_gem_cma_mmap - memory - map an exported CMA GEM object
2021-11-15 13:01:48 +01:00
* @ cma_obj : CMA GEM object
2020-11-23 12:56:46 +01:00
* @ vma : VMA for the area to be mapped
*
* This function maps a buffer into a userspace process ' s address space .
* In addition to the usual GEM VMA setup it immediately faults in the entire
2021-11-15 13:01:47 +01:00
* object instead of using on - demand faulting .
2020-11-23 12:56:46 +01:00
*
* Returns :
* 0 on success or a negative error code on failure .
*/
2021-11-15 13:01:48 +01:00
int drm_gem_cma_mmap ( struct drm_gem_cma_object * cma_obj , struct vm_area_struct * vma )
2020-11-23 12:56:46 +01:00
{
2021-11-15 13:01:48 +01:00
struct drm_gem_object * obj = & cma_obj - > base ;
2020-11-23 12:56:46 +01:00
int ret ;
/*
* Clear the VM_PFNMAP flag that was set by drm_gem_mmap ( ) , and set the
* vm_pgoff ( used as a fake buffer offset by DRM ) to 0 as we want to map
* the whole buffer .
*/
vma - > vm_pgoff - = drm_vma_node_start ( & obj - > vma_node ) ;
vma - > vm_flags & = ~ VM_PFNMAP ;
2021-10-13 10:36:54 -04:00
vma - > vm_flags | = VM_DONTEXPAND ;
2020-11-23 12:56:46 +01:00
2021-05-28 00:03:34 +01:00
if ( cma_obj - > map_noncoherent ) {
vma - > vm_page_prot = vm_get_page_prot ( vma - > vm_flags ) ;
2021-05-23 18:04:13 +01:00
2021-05-28 00:03:34 +01:00
ret = dma_mmap_pages ( cma_obj - > base . dev - > dev ,
vma , vma - > vm_end - vma - > vm_start ,
virt_to_page ( cma_obj - > vaddr ) ) ;
} else {
ret = dma_mmap_wc ( cma_obj - > base . dev - > dev , vma , cma_obj - > vaddr ,
cma_obj - > paddr , vma - > vm_end - vma - > vm_start ) ;
}
2020-11-23 12:56:46 +01:00
if ( ret )
drm_gem_vm_close ( vma ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( drm_gem_cma_mmap ) ;
2018-11-10 15:56:46 +01:00
/**
* drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver ' s
* scatter / gather table and get the virtual address of the buffer
* @ dev : DRM device
* @ attach : DMA - BUF attachment
* @ sgt : Scatter / gather table of pinned pages
*
* This function imports a scatter / gather table using
* drm_gem_cma_prime_import_sg_table ( ) and uses dma_buf_vmap ( ) to get the kernel
* virtual address . This ensures that a CMA GEM object always has its virtual
* address set . This address is released when the object is freed .
*
* This function can be used as the & drm_driver . gem_prime_import_sg_table
2020-06-05 09:32:06 +02:00
* callback . The & DRM_GEM_CMA_DRIVER_OPS_VMAP macro provides a shortcut to set
2018-11-10 15:56:46 +01:00
* the necessary DRM driver operations .
*
* Returns :
* A pointer to a newly created GEM object or an ERR_PTR - encoded negative
* error code on failure .
*/
struct drm_gem_object *
drm_gem_cma_prime_import_sg_table_vmap ( struct drm_device * dev ,
struct dma_buf_attachment * attach ,
struct sg_table * sgt )
{
struct drm_gem_cma_object * cma_obj ;
struct drm_gem_object * obj ;
2022-02-04 09:05:41 -08:00
struct iosys_map map ;
2020-09-25 13:55:59 +02:00
int ret ;
2018-11-10 15:56:46 +01:00
2020-09-25 13:55:59 +02:00
ret = dma_buf_vmap ( attach - > dmabuf , & map ) ;
if ( ret ) {
2018-11-10 15:56:46 +01:00
DRM_ERROR ( " Failed to vmap PRIME buffer \n " ) ;
2020-09-25 13:55:59 +02:00
return ERR_PTR ( ret ) ;
2018-11-10 15:56:46 +01:00
}
obj = drm_gem_cma_prime_import_sg_table ( dev , attach , sgt ) ;
if ( IS_ERR ( obj ) ) {
2020-09-25 13:56:00 +02:00
dma_buf_vunmap ( attach - > dmabuf , & map ) ;
2018-11-10 15:56:46 +01:00
return obj ;
}
cma_obj = to_drm_gem_cma_obj ( obj ) ;
2020-09-25 13:55:59 +02:00
cma_obj - > vaddr = map . vaddr ;
2018-11-10 15:56:46 +01:00
return obj ;
}
EXPORT_SYMBOL ( drm_gem_cma_prime_import_sg_table_vmap ) ;
2021-10-20 15:19:41 +02:00
MODULE_DESCRIPTION ( " DRM CMA memory-management helpers " ) ;
2021-10-27 23:25:05 +02:00
MODULE_IMPORT_NS ( DMA_BUF ) ;
2021-10-20 15:19:41 +02:00
MODULE_LICENSE ( " GPL " ) ;