2019-05-27 09:55:06 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2012-06-27 17:30:18 +04:00
/*
2022-08-02 03:04:03 +03:00
* drm gem DMA helper functions
2012-06-27 17:30:18 +04:00
*
* Copyright ( C ) 2012 Sascha Hauer , Pengutronix
*
* Based on Samsung Exynos code
*
* Copyright ( c ) 2011 Samsung Electronics Co . , Ltd .
*/
2013-02-17 04:57:30 +04:00
# include <linux/dma-buf.h>
2012-06-27 17:30:18 +04:00
# include <linux/dma-mapping.h>
2019-05-26 20:35:35 +03:00
# include <linux/export.h>
# include <linux/mm.h>
2021-10-20 16:19:41 +03:00
# include <linux/module.h>
2019-05-26 20:35:35 +03:00
# include <linux/mutex.h>
# include <linux/slab.h>
2012-06-27 17:30:18 +04:00
# include <drm/drm.h>
2019-05-26 20:35:35 +03:00
# include <drm/drm_device.h>
# include <drm/drm_drv.h>
2022-08-02 03:04:03 +03:00
# include <drm/drm_gem_dma_helper.h>
2013-07-24 23:07:52 +04:00
# include <drm/drm_vma_manager.h>
2012-06-27 17:30:18 +04:00
2014-11-03 15:56:55 +03:00
/**
2022-08-02 03:04:03 +03:00
* DOC : dma helpers
2014-11-03 15:56:55 +03:00
*
2022-08-02 03:04:03 +03:00
* The DRM GEM / DMA helpers are a means to provide buffer objects that are
2022-06-08 16:58:21 +03:00
* presented to the device as a contiguous chunk of memory . This is useful
* for devices that do not support scatter - gather DMA ( either directly or
* by using an intimately attached IOMMU ) .
2014-11-03 15:56:55 +03:00
*
2022-06-08 16:58:21 +03:00
* For devices that access the memory bus through an ( external ) IOMMU then
* the buffer objects are allocated using a traditional page - based
* allocator and may be scattered through physical memory . However they
* are contiguous in the IOVA space so appear contiguous to devices using
* them .
*
* For other devices then the helpers rely on CMA to provide buffer
* objects that are physically contiguous in memory .
2021-11-15 15:01:48 +03:00
*
* For GEM callback helpers in struct & drm_gem_object functions , see likewise
2022-08-02 03:04:03 +03:00
* named functions with an _object_ infix ( e . g . , drm_gem_dma_object_vmap ( ) wraps
* drm_gem_dma_vmap ( ) ) . These helpers perform the necessary type conversion .
2014-11-03 15:56:55 +03:00
*/
2022-08-02 03:04:03 +03:00
static const struct drm_gem_object_funcs drm_gem_dma_default_funcs = {
. free = drm_gem_dma_object_free ,
. print_info = drm_gem_dma_object_print_info ,
. get_sg_table = drm_gem_dma_object_get_sg_table ,
. vmap = drm_gem_dma_object_vmap ,
. mmap = drm_gem_dma_object_mmap ,
. vm_ops = & drm_gem_dma_vm_ops ,
2020-11-06 16:16:32 +03:00
} ;
2014-11-03 15:56:55 +03:00
/**
2022-08-02 03:04:03 +03:00
* __drm_gem_dma_create - Create a GEM DMA object without allocating memory
2014-11-03 15:56:55 +03:00
* @ drm : DRM device
* @ size : size of the object to allocate
2021-05-23 20:04:13 +03:00
* @ private : true if used for internal purposes
2012-06-27 17:30:18 +04:00
*
2022-08-02 03:04:03 +03:00
* This function creates and initializes a GEM DMA object of the given size ,
2014-11-03 15:56:55 +03:00
* but doesn ' t allocate any memory to back the object .
2013-02-17 04:54:26 +04:00
*
2014-11-03 15:56:55 +03:00
* Returns :
2022-08-02 03:04:03 +03:00
* A struct drm_gem_dma_object * on success or an ERR_PTR ( ) - encoded negative
2014-11-03 15:56:55 +03:00
* error code on failure .
2012-06-27 17:30:18 +04:00
*/
2022-08-02 03:04:03 +03:00
static struct drm_gem_dma_object *
__drm_gem_dma_create ( struct drm_device * drm , size_t size , bool private )
2012-06-27 17:30:18 +04:00
{
2022-08-02 03:04:03 +03:00
struct drm_gem_dma_object * dma_obj ;
2012-06-27 17:30:18 +04:00
struct drm_gem_object * gem_obj ;
2021-05-23 20:04:13 +03:00
int ret = 0 ;
2012-06-27 17:30:18 +04:00
2021-11-30 12:52:55 +03:00
if ( drm - > driver - > gem_create_object ) {
2015-11-30 21:55:13 +03:00
gem_obj = drm - > driver - > gem_create_object ( drm , size ) ;
2021-11-30 12:52:55 +03:00
if ( IS_ERR ( gem_obj ) )
return ERR_CAST ( gem_obj ) ;
2022-08-02 03:04:03 +03:00
dma_obj = to_drm_gem_dma_obj ( gem_obj ) ;
2021-11-30 12:52:55 +03:00
} else {
2022-08-02 03:04:03 +03:00
dma_obj = kzalloc ( sizeof ( * dma_obj ) , GFP_KERNEL ) ;
if ( ! dma_obj )
2021-11-30 12:52:55 +03:00
return ERR_PTR ( - ENOMEM ) ;
2022-08-02 03:04:03 +03:00
gem_obj = & dma_obj - > base ;
2021-11-30 12:52:55 +03:00
}
2020-11-06 16:16:32 +03:00
if ( ! gem_obj - > funcs )
2022-08-02 03:04:03 +03:00
gem_obj - > funcs = & drm_gem_dma_default_funcs ;
2020-11-06 16:16:32 +03:00
2021-05-23 20:04:13 +03:00
if ( private ) {
drm_gem_private_object_init ( drm , gem_obj , size ) ;
/* Always use writecombine for dma-buf mappings */
2022-08-02 03:04:03 +03:00
dma_obj - > map_noncoherent = false ;
2021-05-23 20:04:13 +03:00
} else {
ret = drm_gem_object_init ( drm , gem_obj , size ) ;
}
2012-06-27 17:30:18 +04:00
if ( ret )
2013-02-17 04:54:26 +04:00
goto error ;
2012-06-27 17:30:18 +04:00
ret = drm_gem_create_mmap_offset ( gem_obj ) ;
2013-02-17 04:54:26 +04:00
if ( ret ) {
drm_gem_object_release ( gem_obj ) ;
goto error ;
}
2012-06-27 17:30:18 +04:00
2022-08-02 03:04:03 +03:00
return dma_obj ;
2012-06-27 17:30:18 +04:00
2013-02-17 04:54:26 +04:00
error :
2022-08-02 03:04:03 +03:00
kfree ( dma_obj ) ;
2013-02-17 04:54:26 +04:00
return ERR_PTR ( ret ) ;
}
2012-06-27 17:30:18 +04:00
2014-11-03 15:56:55 +03:00
/**
2022-08-02 03:04:03 +03:00
* drm_gem_dma_create - allocate an object with the given size
2014-11-03 15:56:55 +03:00
* @ drm : DRM device
* @ size : size of the object to allocate
*
2022-08-02 03:04:03 +03:00
* This function creates a DMA GEM object and allocates memory as backing store .
2022-06-08 16:58:21 +03:00
* The allocated memory will occupy a contiguous chunk of bus address space .
*
* For devices that are directly connected to the memory bus then the allocated
* memory will be physically contiguous . For devices that access through an
* IOMMU , then the allocated memory is not expected to be physically contiguous
* because having contiguous IOVAs is sufficient to meet a devices DMA
* requirements .
2013-02-17 04:54:26 +04:00
*
2014-11-03 15:56:55 +03:00
* Returns :
2022-08-02 03:04:03 +03:00
* A struct drm_gem_dma_object * on success or an ERR_PTR ( ) - encoded negative
2014-11-03 15:56:55 +03:00
* error code on failure .
2013-02-17 04:54:26 +04:00
*/
2022-08-02 03:04:03 +03:00
struct drm_gem_dma_object * drm_gem_dma_create ( struct drm_device * drm ,
2014-11-03 15:56:55 +03:00
size_t size )
2013-02-17 04:54:26 +04:00
{
2022-08-02 03:04:03 +03:00
struct drm_gem_dma_object * dma_obj ;
2013-02-17 04:57:30 +04:00
int ret ;
2012-06-27 17:30:18 +04:00
2013-02-17 04:54:26 +04:00
size = round_up ( size , PAGE_SIZE ) ;
2012-06-27 17:30:18 +04:00
2022-08-02 03:04:03 +03:00
dma_obj = __drm_gem_dma_create ( drm , size , false ) ;
if ( IS_ERR ( dma_obj ) )
return dma_obj ;
2013-02-17 04:54:26 +04:00
2022-08-02 03:04:03 +03:00
if ( dma_obj - > map_noncoherent ) {
dma_obj - > vaddr = dma_alloc_noncoherent ( drm - > dev , size ,
2022-08-02 03:04:04 +03:00
& dma_obj - > dma_addr ,
2021-05-23 20:04:13 +03:00
DMA_TO_DEVICE ,
GFP_KERNEL | __GFP_NOWARN ) ;
} else {
2022-08-02 03:04:04 +03:00
dma_obj - > vaddr = dma_alloc_wc ( drm - > dev , size ,
& dma_obj - > dma_addr ,
2021-05-23 20:04:13 +03:00
GFP_KERNEL | __GFP_NOWARN ) ;
}
2022-08-02 03:04:03 +03:00
if ( ! dma_obj - > vaddr ) {
drm: core: Convert device logging to drm_* functions.
Convert device logging with dev_* functions into drm_* functions.
The patch has been generated with the coccinelle script below.
The script focuses on instances of dev_* functions where the drm device
context is clearly visible in its arguments.
@@expression E1; expression list E2; @@
-dev_warn(E1->dev, E2)
+drm_warn(E1, E2)
@@expression E1; expression list E2; @@
-dev_info(E1->dev, E2)
+drm_info(E1, E2)
@@expression E1; expression list E2; @@
-dev_err(E1->dev, E2)
+drm_err(E1, E2)
@@expression E1; expression list E2; @@
-dev_info_once(E1->dev, E2)
+drm_info_once(E1, E2)
@@expression E1; expression list E2; @@
-dev_notice_once(E1->dev, E2)
+drm_notice_once(E1, E2)
@@expression E1; expression list E2; @@
-dev_warn_once(E1->dev, E2)
+drm_warn_once(E1, E2)
@@expression E1; expression list E2; @@
-dev_err_once(E1->dev, E2)
+drm_err_once(E1, E2)
@@expression E1; expression list E2; @@
-dev_err_ratelimited(E1->dev, E2)
+drm_err_ratelimited(E1, E2)
@@expression E1; expression list E2; @@
-dev_dbg(E1->dev, E2)
+drm_dbg(E1, E2)
Signed-off-by: Suraj Upadhyay <usuraj35@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20200718150955.GA23103@blackclown
2020-07-18 18:09:55 +03:00
drm_dbg ( drm , " failed to allocate buffer with size %zu \n " ,
size ) ;
2013-02-17 04:57:30 +04:00
ret = - ENOMEM ;
goto error ;
}
2022-08-02 03:04:03 +03:00
return dma_obj ;
2013-02-17 04:57:30 +04:00
error :
2022-08-02 03:04:03 +03:00
drm_gem_object_put ( & dma_obj - > base ) ;
2013-02-17 04:57:30 +04:00
return ERR_PTR ( ret ) ;
2012-06-27 17:30:18 +04:00
}
2022-08-02 03:04:03 +03:00
EXPORT_SYMBOL_GPL ( drm_gem_dma_create ) ;
2012-06-27 17:30:18 +04:00
2014-11-03 15:56:55 +03:00
/**
2022-08-02 03:04:03 +03:00
* drm_gem_dma_create_with_handle - allocate an object with the given size and
2014-11-03 15:56:55 +03:00
* return a GEM handle to it
* @ file_priv : DRM file - private structure to register the handle for
* @ drm : DRM device
* @ size : size of the object to allocate
* @ handle : return location for the GEM handle
*
2022-08-02 03:04:03 +03:00
* This function creates a DMA GEM object , allocating a chunk of memory as
2022-06-08 16:58:21 +03:00
* backing store . The GEM object is then added to the list of object associated
* with the given file and a handle to it is returned .
*
* The allocated memory will occupy a contiguous chunk of bus address space .
2022-08-02 03:04:03 +03:00
* See drm_gem_dma_create ( ) for more details .
2012-06-27 17:30:18 +04:00
*
2014-11-03 15:56:55 +03:00
* Returns :
2022-08-02 03:04:03 +03:00
* A struct drm_gem_dma_object * on success or an ERR_PTR ( ) - encoded negative
2014-11-03 15:56:55 +03:00
* error code on failure .
2012-06-27 17:30:18 +04:00
*/
2022-08-02 03:04:03 +03:00
static struct drm_gem_dma_object *
drm_gem_dma_create_with_handle ( struct drm_file * file_priv ,
2014-11-03 15:56:55 +03:00
struct drm_device * drm , size_t size ,
uint32_t * handle )
2012-06-27 17:30:18 +04:00
{
2022-08-02 03:04:03 +03:00
struct drm_gem_dma_object * dma_obj ;
2012-06-27 17:30:18 +04:00
struct drm_gem_object * gem_obj ;
int ret ;
2022-08-02 03:04:03 +03:00
dma_obj = drm_gem_dma_create ( drm , size ) ;
if ( IS_ERR ( dma_obj ) )
return dma_obj ;
2012-06-27 17:30:18 +04:00
2022-08-02 03:04:03 +03:00
gem_obj = & dma_obj - > base ;
2012-06-27 17:30:18 +04:00
/*
* allocate a id of idr table where the obj is registered
* and handle has the id what user can see .
*/
ret = drm_gem_handle_create ( file_priv , gem_obj , handle ) ;
/* drop reference from allocate - handle holds it now. */
2020-05-15 12:50:53 +03:00
drm_gem_object_put ( gem_obj ) ;
2016-06-01 00:25:52 +03:00
if ( ret )
return ERR_PTR ( ret ) ;
2012-06-27 17:30:18 +04:00
2022-08-02 03:04:03 +03:00
return dma_obj ;
2012-06-27 17:30:18 +04:00
}
2014-11-03 15:56:55 +03:00
/**
2022-08-02 03:04:03 +03:00
* drm_gem_dma_free - free resources associated with a DMA GEM object
* @ dma_obj : DMA GEM object to free
2014-11-03 15:56:55 +03:00
*
2022-08-02 03:04:03 +03:00
* This function frees the backing memory of the DMA GEM object , cleans up the
2014-11-03 15:56:55 +03:00
* GEM object state and frees the memory used to store the object itself .
2018-11-10 17:56:46 +03:00
* If the buffer is imported and the virtual address is set , it is released .
2012-06-27 17:30:18 +04:00
*/
2022-08-02 03:04:03 +03:00
void drm_gem_dma_free ( struct drm_gem_dma_object * dma_obj )
2012-06-27 17:30:18 +04:00
{
2022-08-02 03:04:03 +03:00
struct drm_gem_object * gem_obj = & dma_obj - > base ;
struct iosys_map map = IOSYS_MAP_INIT_VADDR ( dma_obj - > vaddr ) ;
2012-06-27 17:30:18 +04:00
2019-04-26 15:47:53 +03:00
if ( gem_obj - > import_attach ) {
2022-08-02 03:04:03 +03:00
if ( dma_obj - > vaddr )
2020-09-25 14:56:00 +03:00
dma_buf_vunmap ( gem_obj - > import_attach - > dmabuf , & map ) ;
2022-08-02 03:04:03 +03:00
drm_prime_gem_destroy ( gem_obj , dma_obj - > sgt ) ;
} else if ( dma_obj - > vaddr ) {
if ( dma_obj - > map_noncoherent )
dma_free_noncoherent ( gem_obj - > dev - > dev , dma_obj - > base . size ,
2022-08-02 03:04:04 +03:00
dma_obj - > vaddr , dma_obj - > dma_addr ,
2021-07-08 20:51:46 +03:00
DMA_TO_DEVICE ) ;
else
2022-08-02 03:04:03 +03:00
dma_free_wc ( gem_obj - > dev - > dev , dma_obj - > base . size ,
2022-08-02 03:04:04 +03:00
dma_obj - > vaddr , dma_obj - > dma_addr ) ;
2013-02-17 04:57:30 +04:00
}
2013-02-17 04:54:26 +04:00
drm_gem_object_release ( gem_obj ) ;
2012-06-27 17:30:18 +04:00
2022-08-02 03:04:03 +03:00
kfree ( dma_obj ) ;
2012-06-27 17:30:18 +04:00
}
2022-08-02 03:04:03 +03:00
EXPORT_SYMBOL_GPL ( drm_gem_dma_free ) ;
2012-06-27 17:30:18 +04:00
2014-11-03 13:48:49 +03:00
/**
2022-08-02 03:04:03 +03:00
* drm_gem_dma_dumb_create_internal - create a dumb buffer object
2014-11-03 13:48:49 +03:00
* @ file_priv : DRM file - private structure to create the dumb buffer for
* @ drm : DRM device
* @ args : IOCTL data
*
* This aligns the pitch and size arguments to the minimum required . This is
* an internal helper that can be wrapped by a driver to account for hardware
* with more specific alignment requirements . It should not be used directly
2016-12-29 23:48:34 +03:00
* as their & drm_driver . dumb_create callback .
2014-11-03 13:48:49 +03:00
*
* Returns :
* 0 on success or a negative error code on failure .
*/
2022-08-02 03:04:03 +03:00
int drm_gem_dma_dumb_create_internal ( struct drm_file * file_priv ,
2014-11-03 13:48:49 +03:00
struct drm_device * drm ,
struct drm_mode_create_dumb * args )
{
unsigned int min_pitch = DIV_ROUND_UP ( args - > width * args - > bpp , 8 ) ;
2022-08-02 03:04:03 +03:00
struct drm_gem_dma_object * dma_obj ;
2014-11-03 13:48:49 +03:00
if ( args - > pitch < min_pitch )
args - > pitch = min_pitch ;
if ( args - > size < args - > pitch * args - > height )
args - > size = args - > pitch * args - > height ;
2022-08-02 03:04:03 +03:00
dma_obj = drm_gem_dma_create_with_handle ( file_priv , drm , args - > size ,
2014-11-03 13:48:49 +03:00
& args - > handle ) ;
2022-08-02 03:04:03 +03:00
return PTR_ERR_OR_ZERO ( dma_obj ) ;
2014-11-03 13:48:49 +03:00
}
2022-08-02 03:04:03 +03:00
EXPORT_SYMBOL_GPL ( drm_gem_dma_dumb_create_internal ) ;
2014-11-03 13:48:49 +03:00
2014-11-03 15:56:55 +03:00
/**
2022-08-02 03:04:03 +03:00
* drm_gem_dma_dumb_create - create a dumb buffer object
2014-11-03 15:56:55 +03:00
* @ file_priv : DRM file - private structure to create the dumb buffer for
* @ drm : DRM device
* @ args : IOCTL data
*
* This function computes the pitch of the dumb buffer and rounds it up to an
* integer number of bytes per pixel . Drivers for hardware that doesn ' t have
* any additional restrictions on the pitch can directly use this function as
2016-12-29 23:48:34 +03:00
* their & drm_driver . dumb_create callback .
2012-06-27 17:30:18 +04:00
*
2014-11-03 13:48:49 +03:00
* For hardware with additional restrictions , drivers can adjust the fields
* set up by userspace and pass the IOCTL data along to the
2022-08-02 03:04:03 +03:00
* drm_gem_dma_dumb_create_internal ( ) function .
2014-11-03 13:48:49 +03:00
*
2014-11-03 15:56:55 +03:00
* Returns :
* 0 on success or a negative error code on failure .
2012-06-27 17:30:18 +04:00
*/
2022-08-02 03:04:03 +03:00
int drm_gem_dma_dumb_create ( struct drm_file * file_priv ,
2014-11-03 15:56:55 +03:00
struct drm_device * drm ,
struct drm_mode_create_dumb * args )
2012-06-27 17:30:18 +04:00
{
2022-08-02 03:04:03 +03:00
struct drm_gem_dma_object * dma_obj ;
2012-06-27 17:30:18 +04:00
2014-11-03 13:48:49 +03:00
args - > pitch = DIV_ROUND_UP ( args - > width * args - > bpp , 8 ) ;
args - > size = args - > pitch * args - > height ;
2012-06-27 17:30:18 +04:00
2022-08-02 03:04:03 +03:00
dma_obj = drm_gem_dma_create_with_handle ( file_priv , drm , args - > size ,
2014-11-03 15:56:55 +03:00
& args - > handle ) ;
2022-08-02 03:04:03 +03:00
return PTR_ERR_OR_ZERO ( dma_obj ) ;
2012-06-27 17:30:18 +04:00
}
2022-08-02 03:04:03 +03:00
EXPORT_SYMBOL_GPL ( drm_gem_dma_dumb_create ) ;
2012-06-27 17:30:18 +04:00
2022-08-02 03:04:03 +03:00
const struct vm_operations_struct drm_gem_dma_vm_ops = {
2012-06-27 17:30:18 +04:00
. open = drm_gem_vm_open ,
. close = drm_gem_vm_close ,
} ;
2022-08-02 03:04:03 +03:00
EXPORT_SYMBOL_GPL ( drm_gem_dma_vm_ops ) ;
2012-06-27 17:30:18 +04:00
2017-01-04 12:12:57 +03:00
# ifndef CONFIG_MMU
/**
2022-08-02 03:04:03 +03:00
* drm_gem_dma_get_unmapped_area - propose address for mapping in noMMU cases
2017-01-04 12:12:57 +03:00
* @ filp : file object
* @ addr : memory address
* @ len : buffer size
* @ pgoff : page offset
* @ flags : memory flags
*
* This function is used in noMMU platforms to propose address mapping
* for a given buffer .
* It ' s intended to be used as a direct handler for the struct
* & file_operations . get_unmapped_area operation .
*
* Returns :
* mapping address on success or a negative error code on failure .
*/
2022-08-02 03:04:03 +03:00
unsigned long drm_gem_dma_get_unmapped_area ( struct file * filp ,
2017-01-04 12:12:57 +03:00
unsigned long addr ,
unsigned long len ,
unsigned long pgoff ,
unsigned long flags )
{
2022-08-02 03:04:03 +03:00
struct drm_gem_dma_object * dma_obj ;
2017-01-04 12:12:57 +03:00
struct drm_gem_object * obj = NULL ;
struct drm_file * priv = filp - > private_data ;
struct drm_device * dev = priv - > minor - > dev ;
struct drm_vma_offset_node * node ;
2017-08-02 14:56:02 +03:00
if ( drm_dev_is_unplugged ( dev ) )
2017-01-04 12:12:57 +03:00
return - ENODEV ;
drm_vma_offset_lock_lookup ( dev - > vma_offset_manager ) ;
node = drm_vma_offset_exact_lookup_locked ( dev - > vma_offset_manager ,
pgoff ,
len > > PAGE_SHIFT ) ;
if ( likely ( node ) ) {
obj = container_of ( node , struct drm_gem_object , vma_node ) ;
/*
* When the object is being freed , after it hits 0 - refcnt it
* proceeds to tear down the object . In the process it will
* attempt to remove the VMA offset and so acquire this
* mgr - > vm_lock . Therefore if we find an object with a 0 - refcnt
* that matches our range , we know it is in the process of being
* destroyed and will be freed as soon as we release the lock -
* so we have to check for the 0 - refcnted object and treat it as
* invalid .
*/
if ( ! kref_get_unless_zero ( & obj - > refcount ) )
obj = NULL ;
}
drm_vma_offset_unlock_lookup ( dev - > vma_offset_manager ) ;
if ( ! obj )
return - EINVAL ;
if ( ! drm_vma_node_is_allowed ( node , priv ) ) {
2020-05-15 12:50:53 +03:00
drm_gem_object_put ( obj ) ;
2017-01-04 12:12:57 +03:00
return - EACCES ;
}
2022-08-02 03:04:03 +03:00
dma_obj = to_drm_gem_dma_obj ( obj ) ;
2017-01-04 12:12:57 +03:00
2020-05-15 12:50:53 +03:00
drm_gem_object_put ( obj ) ;
2017-01-04 12:12:57 +03:00
2022-08-02 03:04:03 +03:00
return dma_obj - > vaddr ? ( unsigned long ) dma_obj - > vaddr : - EINVAL ;
2017-01-04 12:12:57 +03:00
}
2022-08-02 03:04:03 +03:00
EXPORT_SYMBOL_GPL ( drm_gem_dma_get_unmapped_area ) ;
2017-01-04 12:12:57 +03:00
# endif
2017-11-07 22:13:43 +03:00
/**
2022-08-02 03:04:03 +03:00
* drm_gem_dma_print_info ( ) - Print & drm_gem_dma_object info for debugfs
* @ dma_obj : DMA GEM object
2017-11-07 22:13:43 +03:00
* @ p : DRM printer
* @ indent : Tab indentation level
*
2022-08-02 03:04:04 +03:00
* This function prints dma_addr and vaddr for use in e . g . debugfs output .
2017-11-07 22:13:43 +03:00
*/
2022-08-02 03:04:03 +03:00
void drm_gem_dma_print_info ( const struct drm_gem_dma_object * dma_obj ,
2021-11-15 15:01:48 +03:00
struct drm_printer * p , unsigned int indent )
2017-11-07 22:13:43 +03:00
{
2022-08-02 03:04:04 +03:00
drm_printf_indent ( p , indent , " dma_addr=%pad \n " , & dma_obj - > dma_addr ) ;
2022-08-02 03:04:03 +03:00
drm_printf_indent ( p , indent , " vaddr=%p \n " , dma_obj - > vaddr ) ;
2017-11-07 22:13:43 +03:00
}
2022-08-02 03:04:03 +03:00
EXPORT_SYMBOL ( drm_gem_dma_print_info ) ;
2017-11-07 22:13:43 +03:00
2014-11-03 15:56:55 +03:00
/**
2022-08-02 03:04:03 +03:00
* drm_gem_dma_get_sg_table - provide a scatter / gather table of pinned
* pages for a DMA GEM object
* @ dma_obj : DMA GEM object
2014-11-03 15:56:55 +03:00
*
2021-11-15 15:01:47 +03:00
* This function exports a scatter / gather table by calling the standard
* DMA mapping API .
2014-11-03 15:56:55 +03:00
*
* Returns :
* A pointer to the scatter / gather table of pinned pages or NULL on failure .
*/
2022-08-02 03:04:03 +03:00
struct sg_table * drm_gem_dma_get_sg_table ( struct drm_gem_dma_object * dma_obj )
2013-06-28 09:24:54 +04:00
{
2022-08-02 03:04:03 +03:00
struct drm_gem_object * obj = & dma_obj - > base ;
2013-06-28 09:24:54 +04:00
struct sg_table * sgt ;
int ret ;
sgt = kzalloc ( sizeof ( * sgt ) , GFP_KERNEL ) ;
if ( ! sgt )
2018-07-19 12:37:13 +03:00
return ERR_PTR ( - ENOMEM ) ;
2013-06-28 09:24:54 +04:00
2022-08-02 03:04:03 +03:00
ret = dma_get_sgtable ( obj - > dev - > dev , sgt , dma_obj - > vaddr ,
2022-08-02 03:04:04 +03:00
dma_obj - > dma_addr , obj - > size ) ;
2013-06-28 09:24:54 +04:00
if ( ret < 0 )
goto out ;
return sgt ;
out :
kfree ( sgt ) ;
2018-07-19 12:37:13 +03:00
return ERR_PTR ( ret ) ;
2013-06-28 09:24:54 +04:00
}
2022-08-02 03:04:03 +03:00
EXPORT_SYMBOL_GPL ( drm_gem_dma_get_sg_table ) ;
2013-06-28 09:24:54 +04:00
2014-11-03 15:56:55 +03:00
/**
2022-08-02 03:04:03 +03:00
* drm_gem_dma_prime_import_sg_table - produce a DMA GEM object from another
2014-11-03 15:56:55 +03:00
* driver ' s scatter / gather table of pinned pages
* @ dev : device to import into
* @ attach : DMA - BUF attachment
* @ sgt : scatter / gather table of pinned pages
*
* This function imports a scatter / gather table exported via DMA - BUF by
* another driver . Imported buffers must be physically contiguous in memory
* ( i . e . the scatter / gather table must contain a single entry ) . Drivers that
2022-08-02 03:04:03 +03:00
* use the DMA helpers should set this as their
2016-12-29 23:48:34 +03:00
* & drm_driver . gem_prime_import_sg_table callback .
2014-11-03 15:56:55 +03:00
*
* Returns :
* A pointer to a newly created GEM object or an ERR_PTR - encoded negative
* error code on failure .
*/
2013-06-28 09:24:54 +04:00
struct drm_gem_object *
2022-08-02 03:04:03 +03:00
drm_gem_dma_prime_import_sg_table ( struct drm_device * dev ,
2014-01-09 14:03:14 +04:00
struct dma_buf_attachment * attach ,
2013-06-28 09:24:54 +04:00
struct sg_table * sgt )
{
2022-08-02 03:04:03 +03:00
struct drm_gem_dma_object * dma_obj ;
2013-06-28 09:24:54 +04:00
2020-05-08 17:04:44 +03:00
/* check if the entries in the sg_table are contiguous */
if ( drm_prime_get_contiguous_size ( sgt ) < attach - > dmabuf - > size )
return ERR_PTR ( - EINVAL ) ;
2013-06-28 09:24:54 +04:00
2022-08-02 03:04:03 +03:00
/* Create a DMA GEM buffer. */
dma_obj = __drm_gem_dma_create ( dev , attach - > dmabuf - > size , true ) ;
if ( IS_ERR ( dma_obj ) )
return ERR_CAST ( dma_obj ) ;
2013-06-28 09:24:54 +04:00
2022-08-02 03:04:04 +03:00
dma_obj - > dma_addr = sg_dma_address ( sgt - > sgl ) ;
2022-08-02 03:04:03 +03:00
dma_obj - > sgt = sgt ;
2013-06-28 09:24:54 +04:00
2022-08-02 03:04:04 +03:00
DRM_DEBUG_PRIME ( " dma_addr = %pad, size = %zu \n " , & dma_obj - > dma_addr ,
attach - > dmabuf - > size ) ;
2013-06-28 09:24:54 +04:00
2022-08-02 03:04:03 +03:00
return & dma_obj - > base ;
2013-06-28 09:24:54 +04:00
}
2022-08-02 03:04:03 +03:00
EXPORT_SYMBOL_GPL ( drm_gem_dma_prime_import_sg_table ) ;
2013-06-28 09:24:54 +04:00
2014-11-03 15:56:55 +03:00
/**
2022-08-02 03:04:03 +03:00
* drm_gem_dma_vmap - map a DMA GEM object into the kernel ' s virtual
2014-11-03 15:56:55 +03:00
* address space
2022-08-02 03:04:03 +03:00
* @ dma_obj : DMA GEM object
* @ map : Returns the kernel virtual address of the DMA GEM object ' s backing
2020-11-03 12:30:11 +03:00
* store .
2014-11-03 15:56:55 +03:00
*
2021-11-15 15:01:48 +03:00
* This function maps a buffer into the kernel ' s virtual address space .
2022-08-02 03:04:03 +03:00
* Since the DMA buffers are already mapped into the kernel virtual address
2021-11-15 15:01:48 +03:00
* space this simply returns the cached virtual address .
2014-11-03 15:56:55 +03:00
*
* Returns :
2020-11-03 12:30:11 +03:00
* 0 on success , or a negative error code otherwise .
2014-11-03 15:56:55 +03:00
*/
2022-08-02 03:04:03 +03:00
int drm_gem_dma_vmap ( struct drm_gem_dma_object * dma_obj ,
2022-02-04 20:05:41 +03:00
struct iosys_map * map )
2013-06-28 09:24:54 +04:00
{
2022-08-02 03:04:03 +03:00
iosys_map_set_vaddr ( map , dma_obj - > vaddr ) ;
2020-11-03 12:30:11 +03:00
return 0 ;
2013-06-28 09:24:54 +04:00
}
2022-08-02 03:04:03 +03:00
EXPORT_SYMBOL_GPL ( drm_gem_dma_vmap ) ;
2013-06-28 09:24:54 +04:00
2020-11-23 14:56:46 +03:00
/**
2022-08-02 03:04:03 +03:00
* drm_gem_dma_mmap - memory - map an exported DMA GEM object
* @ dma_obj : DMA GEM object
2020-11-23 14:56:46 +03:00
* @ vma : VMA for the area to be mapped
*
* This function maps a buffer into a userspace process ' s address space .
* In addition to the usual GEM VMA setup it immediately faults in the entire
2021-11-15 15:01:47 +03:00
* object instead of using on - demand faulting .
2020-11-23 14:56:46 +03:00
*
* Returns :
* 0 on success or a negative error code on failure .
*/
2022-08-02 03:04:03 +03:00
int drm_gem_dma_mmap ( struct drm_gem_dma_object * dma_obj , struct vm_area_struct * vma )
2020-11-23 14:56:46 +03:00
{
2022-08-02 03:04:03 +03:00
struct drm_gem_object * obj = & dma_obj - > base ;
2020-11-23 14:56:46 +03:00
int ret ;
/*
* Clear the VM_PFNMAP flag that was set by drm_gem_mmap ( ) , and set the
* vm_pgoff ( used as a fake buffer offset by DRM ) to 0 as we want to map
* the whole buffer .
*/
vma - > vm_pgoff - = drm_vma_node_start ( & obj - > vma_node ) ;
vma - > vm_flags & = ~ VM_PFNMAP ;
2021-10-13 17:36:54 +03:00
vma - > vm_flags | = VM_DONTEXPAND ;
2020-11-23 14:56:46 +03:00
2022-08-02 03:04:03 +03:00
if ( dma_obj - > map_noncoherent ) {
2021-05-28 02:03:34 +03:00
vma - > vm_page_prot = vm_get_page_prot ( vma - > vm_flags ) ;
2021-05-23 20:04:13 +03:00
2022-08-02 03:04:03 +03:00
ret = dma_mmap_pages ( dma_obj - > base . dev - > dev ,
2021-05-28 02:03:34 +03:00
vma , vma - > vm_end - vma - > vm_start ,
2022-08-02 03:04:03 +03:00
virt_to_page ( dma_obj - > vaddr ) ) ;
2021-05-28 02:03:34 +03:00
} else {
2022-08-02 03:04:03 +03:00
ret = dma_mmap_wc ( dma_obj - > base . dev - > dev , vma , dma_obj - > vaddr ,
2022-08-02 03:04:04 +03:00
dma_obj - > dma_addr ,
vma - > vm_end - vma - > vm_start ) ;
2021-05-28 02:03:34 +03:00
}
2020-11-23 14:56:46 +03:00
if ( ret )
drm_gem_vm_close ( vma ) ;
return ret ;
}
2022-08-02 03:04:03 +03:00
EXPORT_SYMBOL_GPL ( drm_gem_dma_mmap ) ;
2020-11-23 14:56:46 +03:00
2018-11-10 17:56:46 +03:00
/**
2022-08-02 03:04:03 +03:00
* drm_gem_dma_prime_import_sg_table_vmap - PRIME import another driver ' s
2018-11-10 17:56:46 +03:00
* scatter / gather table and get the virtual address of the buffer
* @ dev : DRM device
* @ attach : DMA - BUF attachment
* @ sgt : Scatter / gather table of pinned pages
*
* This function imports a scatter / gather table using
2022-08-02 03:04:03 +03:00
* drm_gem_dma_prime_import_sg_table ( ) and uses dma_buf_vmap ( ) to get the kernel
* virtual address . This ensures that a DMA GEM object always has its virtual
2018-11-10 17:56:46 +03:00
* address set . This address is released when the object is freed .
*
* This function can be used as the & drm_driver . gem_prime_import_sg_table
2022-08-02 03:04:03 +03:00
* callback . The & DRM_GEM_DMA_DRIVER_OPS_VMAP macro provides a shortcut to set
2018-11-10 17:56:46 +03:00
* the necessary DRM driver operations .
*
* Returns :
* A pointer to a newly created GEM object or an ERR_PTR - encoded negative
* error code on failure .
*/
struct drm_gem_object *
2022-08-02 03:04:03 +03:00
drm_gem_dma_prime_import_sg_table_vmap ( struct drm_device * dev ,
2018-11-10 17:56:46 +03:00
struct dma_buf_attachment * attach ,
struct sg_table * sgt )
{
2022-08-02 03:04:03 +03:00
struct drm_gem_dma_object * dma_obj ;
2018-11-10 17:56:46 +03:00
struct drm_gem_object * obj ;
2022-02-04 20:05:41 +03:00
struct iosys_map map ;
2020-09-25 14:55:59 +03:00
int ret ;
2018-11-10 17:56:46 +03:00
2020-09-25 14:55:59 +03:00
ret = dma_buf_vmap ( attach - > dmabuf , & map ) ;
if ( ret ) {
2018-11-10 17:56:46 +03:00
DRM_ERROR ( " Failed to vmap PRIME buffer \n " ) ;
2020-09-25 14:55:59 +03:00
return ERR_PTR ( ret ) ;
2018-11-10 17:56:46 +03:00
}
2022-08-02 03:04:03 +03:00
obj = drm_gem_dma_prime_import_sg_table ( dev , attach , sgt ) ;
2018-11-10 17:56:46 +03:00
if ( IS_ERR ( obj ) ) {
2020-09-25 14:56:00 +03:00
dma_buf_vunmap ( attach - > dmabuf , & map ) ;
2018-11-10 17:56:46 +03:00
return obj ;
}
2022-08-02 03:04:03 +03:00
dma_obj = to_drm_gem_dma_obj ( obj ) ;
dma_obj - > vaddr = map . vaddr ;
2018-11-10 17:56:46 +03:00
return obj ;
}
2022-08-02 03:04:03 +03:00
EXPORT_SYMBOL ( drm_gem_dma_prime_import_sg_table_vmap ) ;
2021-10-20 16:19:41 +03:00
2022-08-02 03:04:03 +03:00
MODULE_DESCRIPTION ( " DRM DMA memory-management helpers " ) ;
2021-10-28 00:25:05 +03:00
MODULE_IMPORT_NS ( DMA_BUF ) ;
2021-10-20 16:19:41 +03:00
MODULE_LICENSE ( " GPL " ) ;