2019-05-08 10:26:11 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2020-11-03 10:30:11 +01:00
# include <linux/dma-buf-map.h>
2020-03-31 10:12:38 +02:00
# include <linux/module.h>
2019-09-11 13:09:07 +02:00
# include <drm/drm_debugfs.h>
# include <drm/drm_device.h>
2020-01-06 13:57:45 +01:00
# include <drm/drm_drv.h>
2019-09-11 13:09:07 +02:00
# include <drm/drm_file.h>
2019-10-24 10:14:01 +02:00
# include <drm/drm_framebuffer.h>
2021-02-22 15:17:56 +01:00
# include <drm/drm_gem_atomic_helper.h>
2019-09-04 07:47:36 +02:00
# include <drm/drm_gem_ttm_helper.h>
2019-05-08 10:26:11 +02:00
# include <drm/drm_gem_vram_helper.h>
2020-07-16 14:53:48 +02:00
# include <drm/drm_managed.h>
2019-05-08 10:26:14 +02:00
# include <drm/drm_mode.h>
2019-10-24 10:14:01 +02:00
# include <drm/drm_plane.h>
2019-05-08 10:26:15 +02:00
# include <drm/drm_prime.h>
2019-10-24 10:14:01 +02:00
# include <drm/drm_simple_kms_helper.h>
2019-05-08 10:26:11 +02:00
2021-04-17 18:48:36 +02:00
# include <drm/ttm/ttm_range_manager.h>
2019-07-02 13:50:08 +02:00
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs ;
2019-05-08 10:26:11 +02:00
/**
* DOC : overview
*
2020-03-31 10:12:38 +02:00
* This library provides & struct drm_gem_vram_object ( GEM VRAM ) , a GEM
* buffer object that is backed by video RAM ( VRAM ) . It can be used for
* framebuffer devices with dedicated memory .
2019-09-11 13:09:07 +02:00
*
* The data structure & struct drm_vram_mm and its helpers implement a memory
2020-03-31 10:12:38 +02:00
* manager for simple framebuffer devices with dedicated video memory . GEM
* VRAM buffer objects are either placed in the video memory or remain evicted
* to system memory .
*
* With the GEM interface userspace applications create , manage and destroy
* graphics buffers , such as an on - screen framebuffer . GEM does not provide
* an implementation of these interfaces . It ' s up to the DRM driver to
* provide an implementation that suits the hardware . If the hardware device
* contains dedicated video memory , the DRM driver can use the VRAM helper
* library . Each active buffer object is stored in video RAM . Active
* buffer are used for drawing the current frame , typically something like
* the frame ' s scanout buffer or the cursor image . If there ' s no more space
* left in VRAM , inactive GEM objects can be moved to system memory .
*
2020-07-16 14:53:48 +02:00
* To initialize the VRAM helper library call drmm_vram_helper_alloc_mm ( ) .
* The function allocates and initializes an instance of & struct drm_vram_mm
* in & struct drm_device . vram_mm . Use & DRM_GEM_VRAM_DRIVER to initialize
* & struct drm_driver and & DRM_VRAM_MM_FILE_OPERATIONS to initialize
* & struct file_operations ; as illustrated below .
2020-03-31 10:12:38 +02:00
*
* . . code - block : : c
*
* struct file_operations fops = {
* . owner = THIS_MODULE ,
* DRM_VRAM_MM_FILE_OPERATION
* } ;
* struct drm_driver drv = {
* . driver_feature = DRM_ . . . ,
* . fops = & fops ,
* DRM_GEM_VRAM_DRIVER
* } ;
*
* int init_drm_driver ( )
* {
* struct drm_device * dev ;
* uint64_t vram_base ;
* unsigned long vram_size ;
* int ret ;
*
* // setup device, vram base and size
* // ...
*
2020-07-16 14:53:48 +02:00
* ret = drmm_vram_helper_alloc_mm ( dev , vram_base , vram_size ) ;
2020-03-31 10:12:38 +02:00
* if ( ret )
* return ret ;
* return 0 ;
* }
*
* This creates an instance of & struct drm_vram_mm , exports DRM userspace
* interfaces for GEM buffer management and initializes file operations to
* allow for accessing created GEM buffers . With this setup , the DRM driver
* manages an area of video RAM with VRAM MM and provides GEM VRAM objects
* to userspace .
*
2020-07-16 14:53:48 +02:00
* You don ' t have to clean up the instance of VRAM MM .
* drmm_vram_helper_alloc_mm ( ) is a managed interface that installs a
* clean - up handler to run during the DRM device ' s release .
2020-03-31 10:12:38 +02:00
*
2020-07-16 14:53:48 +02:00
* For drawing or scanout operations , rsp . buffer objects have to be pinned
* in video RAM . Call drm_gem_vram_pin ( ) with & DRM_GEM_VRAM_PL_FLAG_VRAM or
2020-03-31 10:12:38 +02:00
* & DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
* memory . Call drm_gem_vram_unpin ( ) to release the pinned object afterwards .
*
* A buffer object that is pinned in video RAM has a fixed address within that
* memory region . Call drm_gem_vram_offset ( ) to retrieve this value . Typically
* it ' s used to program the hardware ' s scanout engine for framebuffers , set
* the cursor overlay ' s image for a mouse cursor , or use it as input to the
* hardware ' s draing engine .
*
* To access a buffer object ' s memory from the DRM driver , call
2020-09-11 09:59:22 +02:00
* drm_gem_vram_vmap ( ) . It maps the buffer into kernel address
* space and returns the memory address . Use drm_gem_vram_vunmap ( ) to
2020-03-31 10:12:38 +02:00
* release the mapping .
2019-05-08 10:26:11 +02:00
*/
/*
* Buffer - objects helpers
*/
static void drm_gem_vram_cleanup ( struct drm_gem_vram_object * gbo )
{
/* We got here via ttm_bo_put(), which means that the
* TTM buffer object in ' bo ' has already been cleaned
* up ; only release the GEM object .
*/
2019-09-06 14:20:53 +02:00
2020-11-03 10:30:11 +01:00
WARN_ON ( gbo - > vmap_use_count ) ;
WARN_ON ( dma_buf_map_is_set ( & gbo - > map ) ) ;
2019-09-06 14:20:53 +02:00
2019-08-05 16:01:04 +02:00
drm_gem_object_release ( & gbo - > bo . base ) ;
2019-05-08 10:26:11 +02:00
}
static void drm_gem_vram_destroy ( struct drm_gem_vram_object * gbo )
{
drm_gem_vram_cleanup ( gbo ) ;
kfree ( gbo ) ;
}
static void ttm_buffer_object_destroy ( struct ttm_buffer_object * bo )
{
struct drm_gem_vram_object * gbo = drm_gem_vram_of_bo ( bo ) ;
drm_gem_vram_destroy ( gbo ) ;
}
static void drm_gem_vram_placement ( struct drm_gem_vram_object * gbo ,
unsigned long pl_flag )
{
2020-09-07 16:14:52 +02:00
u32 invariant_flags = 0 ;
2019-05-08 10:26:11 +02:00
unsigned int i ;
unsigned int c = 0 ;
2020-09-07 16:14:52 +02:00
if ( pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN )
2020-09-21 16:25:36 +02:00
invariant_flags = TTM_PL_FLAG_TOPDOWN ;
2019-05-08 10:26:11 +02:00
gbo - > placement . placement = gbo - > placements ;
gbo - > placement . busy_placement = gbo - > placements ;
2020-09-10 13:39:41 +02:00
if ( pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM ) {
gbo - > placements [ c ] . mem_type = TTM_PL_VRAM ;
2020-09-30 16:44:16 +02:00
gbo - > placements [ c + + ] . flags = invariant_flags ;
2020-09-10 13:39:41 +02:00
}
2019-05-08 10:26:11 +02:00
2020-09-10 13:39:41 +02:00
if ( pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM | | ! c ) {
gbo - > placements [ c ] . mem_type = TTM_PL_SYSTEM ;
2020-09-30 16:44:16 +02:00
gbo - > placements [ c + + ] . flags = invariant_flags ;
2020-09-10 13:39:41 +02:00
}
2019-05-08 10:26:11 +02:00
gbo - > placement . num_placement = c ;
gbo - > placement . num_busy_placement = c ;
for ( i = 0 ; i < c ; + + i ) {
gbo - > placements [ i ] . fpfn = 0 ;
gbo - > placements [ i ] . lpfn = 0 ;
}
}
/**
* drm_gem_vram_create ( ) - Creates a VRAM - backed GEM object
* @ dev : the DRM device
* @ size : the buffer size in bytes
* @ pg_align : the buffer ' s alignment in multiples of the page size
*
2020-09-22 16:56:59 +02:00
* GEM objects are allocated by calling struct drm_driver . gem_create_object ,
* if set . Otherwise kzalloc ( ) will be used . Drivers can set their own GEM
* object functions in struct drm_driver . gem_create_object . If no functions
* are set , the new GEM object will use the default functions from GEM VRAM
* helpers .
*
2019-05-08 10:26:11 +02:00
* Returns :
* A new instance of & struct drm_gem_vram_object on success , or
* an ERR_PTR ( ) - encoded error code otherwise .
*/
struct drm_gem_vram_object * drm_gem_vram_create ( struct drm_device * dev ,
size_t size ,
2020-01-06 13:57:43 +01:00
unsigned long pg_align )
2019-05-08 10:26:11 +02:00
{
struct drm_gem_vram_object * gbo ;
2020-09-22 16:56:59 +02:00
struct drm_gem_object * gem ;
2020-09-22 16:56:58 +02:00
struct drm_vram_mm * vmm = dev - > vram_mm ;
2020-10-01 14:51:40 +02:00
struct ttm_device * bdev ;
2019-05-08 10:26:11 +02:00
int ret ;
2020-09-22 16:56:58 +02:00
if ( WARN_ONCE ( ! vmm , " VRAM MM not initialized " ) )
return ERR_PTR ( - EINVAL ) ;
2019-05-08 10:26:11 +02:00
2020-01-06 13:57:45 +01:00
if ( dev - > driver - > gem_create_object ) {
2020-09-22 16:56:59 +02:00
gem = dev - > driver - > gem_create_object ( dev , size ) ;
2020-01-06 13:57:45 +01:00
if ( ! gem )
return ERR_PTR ( - ENOMEM ) ;
gbo = drm_gem_vram_of_gem ( gem ) ;
} else {
gbo = kzalloc ( sizeof ( * gbo ) , GFP_KERNEL ) ;
if ( ! gbo )
return ERR_PTR ( - ENOMEM ) ;
2020-09-22 16:56:59 +02:00
gem = & gbo - > bo . base ;
2020-01-06 13:57:45 +01:00
}
2019-05-08 10:26:11 +02:00
2020-09-22 16:56:59 +02:00
if ( ! gem - > funcs )
gem - > funcs = & drm_gem_vram_object_funcs ;
2020-09-22 16:56:58 +02:00
2020-09-22 16:56:59 +02:00
ret = drm_gem_object_init ( dev , gem , size ) ;
2020-09-22 16:56:58 +02:00
if ( ret ) {
kfree ( gbo ) ;
return ERR_PTR ( ret ) ;
}
bdev = & vmm - > bdev ;
gbo - > bo . bdev = bdev ;
2020-09-22 16:57:00 +02:00
drm_gem_vram_placement ( gbo , DRM_GEM_VRAM_PL_FLAG_SYSTEM ) ;
2020-09-22 16:56:58 +02:00
/*
* A failing ttm_bo_init will call ttm_buffer_object_destroy
* to release gbo - > bo . base and kfree gbo .
*/
ret = ttm_bo_init ( bdev , & gbo - > bo , size , ttm_bo_type_device ,
2020-11-17 13:52:28 +01:00
& gbo - > placement , pg_align , false , NULL , NULL ,
ttm_buffer_object_destroy ) ;
2020-09-22 16:56:58 +02:00
if ( ret )
2020-07-14 10:32:36 +02:00
return ERR_PTR ( ret ) ;
2019-05-08 10:26:11 +02:00
return gbo ;
}
EXPORT_SYMBOL ( drm_gem_vram_create ) ;
/**
* drm_gem_vram_put ( ) - Releases a reference to a VRAM - backed GEM object
* @ gbo : the GEM VRAM object
*
* See ttm_bo_put ( ) for more information .
*/
void drm_gem_vram_put ( struct drm_gem_vram_object * gbo )
{
ttm_bo_put ( & gbo - > bo ) ;
}
EXPORT_SYMBOL ( drm_gem_vram_put ) ;
2020-06-24 20:26:46 +02:00
static u64 drm_gem_vram_pg_offset ( struct drm_gem_vram_object * gbo )
{
/* Keep TTM behavior for now, remove when drivers are audited */
2021-04-30 09:48:27 +02:00
if ( WARN_ON_ONCE ( ! gbo - > bo . resource | |
gbo - > bo . resource - > mem_type = = TTM_PL_SYSTEM ) )
2020-06-24 20:26:46 +02:00
return 0 ;
2021-04-12 15:11:47 +02:00
return gbo - > bo . resource - > start ;
2020-06-24 20:26:46 +02:00
}
2019-05-08 10:26:11 +02:00
/**
* drm_gem_vram_offset ( ) - \
Returns a GEM VRAM object ' s offset in video memory
* @ gbo : the GEM VRAM object
*
* This function returns the buffer object ' s offset in the device ' s video
* memory . The buffer object has to be pinned to % TTM_PL_VRAM .
*
* Returns :
* The buffer object ' s offset in video memory on success , or
* a negative errno code otherwise .
*/
s64 drm_gem_vram_offset ( struct drm_gem_vram_object * gbo )
{
2020-09-21 14:43:59 +02:00
if ( WARN_ON_ONCE ( ! gbo - > bo . pin_count ) )
2019-05-08 10:26:11 +02:00
return ( s64 ) - ENODEV ;
2020-06-24 20:26:46 +02:00
return drm_gem_vram_pg_offset ( gbo ) < < PAGE_SHIFT ;
2019-05-08 10:26:11 +02:00
}
EXPORT_SYMBOL ( drm_gem_vram_offset ) ;
2019-09-06 14:20:54 +02:00
static int drm_gem_vram_pin_locked ( struct drm_gem_vram_object * gbo ,
unsigned long pl_flag )
2019-05-08 10:26:11 +02:00
{
struct ttm_operation_ctx ctx = { false , false } ;
2020-09-21 14:43:59 +02:00
int ret ;
2019-05-08 10:26:11 +02:00
2020-09-21 14:43:59 +02:00
if ( gbo - > bo . pin_count )
2019-05-16 18:27:46 +02:00
goto out ;
2019-05-08 10:26:11 +02:00
2019-06-13 09:30:33 +02:00
if ( pl_flag )
drm_gem_vram_placement ( gbo , pl_flag ) ;
2019-05-08 10:26:11 +02:00
ret = ttm_bo_validate ( & gbo - > bo , & gbo - > placement , & ctx ) ;
if ( ret < 0 )
2019-09-06 14:20:54 +02:00
return ret ;
2019-05-08 10:26:11 +02:00
2019-05-16 18:27:46 +02:00
out :
2020-09-21 14:43:59 +02:00
ttm_bo_pin ( & gbo - > bo ) ;
2019-05-08 10:26:11 +02:00
return 0 ;
}
/**
2019-09-06 14:20:54 +02:00
* drm_gem_vram_pin ( ) - Pins a GEM VRAM object in a region .
2019-05-08 10:26:11 +02:00
* @ gbo : the GEM VRAM object
2019-09-06 14:20:54 +02:00
* @ pl_flag : a bitmask of possible memory regions
*
* Pinning a buffer object ensures that it is not evicted from
* a memory region . A pinned buffer object has to be unpinned before
* it can be pinned to another region . If the pl_flag argument is 0 ,
* the buffer is pinned at its current location ( video RAM or system
* memory ) .
2019-05-08 10:26:11 +02:00
*
2019-09-23 19:27:42 +02:00
* Small buffer objects , such as cursor images , can lead to memory
* fragmentation if they are pinned in the middle of video RAM . This
* is especially a problem on devices with only a small amount of
* video RAM . Fragmentation can prevent the primary framebuffer from
* fitting in , even though there ' s enough memory overall . The modifier
* DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
* at the high end of the memory region to avoid fragmentation .
*
2019-05-08 10:26:11 +02:00
* Returns :
* 0 on success , or
* a negative error code otherwise .
*/
2019-09-06 14:20:54 +02:00
int drm_gem_vram_pin ( struct drm_gem_vram_object * gbo , unsigned long pl_flag )
2019-05-08 10:26:11 +02:00
{
2019-09-06 14:20:54 +02:00
int ret ;
2019-05-08 10:26:11 +02:00
2019-05-16 18:27:46 +02:00
ret = ttm_bo_reserve ( & gbo - > bo , true , false , NULL ) ;
2019-09-06 14:20:54 +02:00
if ( ret )
2019-05-16 18:27:46 +02:00
return ret ;
2019-09-06 14:20:54 +02:00
ret = drm_gem_vram_pin_locked ( gbo , pl_flag ) ;
ttm_bo_unreserve ( & gbo - > bo ) ;
return ret ;
}
EXPORT_SYMBOL ( drm_gem_vram_pin ) ;
2020-09-21 14:43:59 +02:00
static void drm_gem_vram_unpin_locked ( struct drm_gem_vram_object * gbo )
2019-09-06 14:20:54 +02:00
{
2020-09-21 14:43:59 +02:00
ttm_bo_unpin ( & gbo - > bo ) ;
2019-09-06 14:20:54 +02:00
}
2019-05-16 18:27:46 +02:00
2019-09-06 14:20:54 +02:00
/**
* drm_gem_vram_unpin ( ) - Unpins a GEM VRAM object
* @ gbo : the GEM VRAM object
*
* Returns :
* 0 on success , or
* a negative error code otherwise .
*/
int drm_gem_vram_unpin ( struct drm_gem_vram_object * gbo )
{
int ret ;
ret = ttm_bo_reserve ( & gbo - > bo , true , false , NULL ) ;
if ( ret )
return ret ;
2020-09-21 14:43:59 +02:00
drm_gem_vram_unpin_locked ( gbo ) ;
2019-05-16 18:27:46 +02:00
ttm_bo_unreserve ( & gbo - > bo ) ;
2019-09-06 14:20:54 +02:00
2020-09-21 14:43:59 +02:00
return 0 ;
2019-05-08 10:26:11 +02:00
}
EXPORT_SYMBOL ( drm_gem_vram_unpin ) ;
2020-11-03 10:30:11 +01:00
static int drm_gem_vram_kmap_locked ( struct drm_gem_vram_object * gbo ,
struct dma_buf_map * map )
2019-09-06 14:20:53 +02:00
{
int ret ;
2020-11-03 10:30:11 +01:00
if ( gbo - > vmap_use_count > 0 )
2019-09-06 14:20:53 +02:00
goto out ;
2021-01-18 15:46:39 +01:00
/*
* VRAM helpers unmap the BO only on demand . So the previous
* page mapping might still be around . Only vmap if the there ' s
* no mapping present .
*/
if ( dma_buf_map_is_null ( & gbo - > map ) ) {
ret = ttm_bo_vmap ( & gbo - > bo , & gbo - > map ) ;
if ( ret )
return ret ;
}
2019-09-06 14:20:53 +02:00
out :
2020-11-03 10:30:11 +01:00
+ + gbo - > vmap_use_count ;
* map = gbo - > map ;
return 0 ;
2019-09-06 14:20:53 +02:00
}
2020-11-03 10:30:11 +01:00
static void drm_gem_vram_kunmap_locked ( struct drm_gem_vram_object * gbo ,
struct dma_buf_map * map )
2019-05-08 10:26:11 +02:00
{
2020-11-03 10:30:11 +01:00
struct drm_device * dev = gbo - > bo . base . dev ;
if ( drm_WARN_ON_ONCE ( dev , ! gbo - > vmap_use_count ) )
2019-09-06 14:20:53 +02:00
return ;
2020-11-03 10:30:11 +01:00
if ( drm_WARN_ON_ONCE ( dev , ! dma_buf_map_is_equal ( & gbo - > map , map ) ) )
return ; /* BUG: map not mapped from this BO */
if ( - - gbo - > vmap_use_count > 0 )
2019-09-06 14:20:53 +02:00
return ;
2019-09-06 14:20:56 +02:00
/*
* Permanently mapping and unmapping buffers adds overhead from
* updating the page tables and creates debugging output . Therefore ,
* we delay the actual unmap operation until the BO gets evicted
* from memory . See drm_gem_vram_bo_driver_move_notify ( ) .
*/
2019-05-08 10:26:11 +02:00
}
2019-09-06 14:20:53 +02:00
2019-09-11 14:03:50 +02:00
/**
* drm_gem_vram_vmap ( ) - Pins and maps a GEM VRAM object into kernel address
* space
2020-11-03 10:30:11 +01:00
* @ gbo : The GEM VRAM object to map
* @ map : Returns the kernel virtual address of the VRAM GEM object ' s backing
* store .
2019-09-11 14:03:50 +02:00
*
* The vmap function pins a GEM VRAM object to its current location , either
* system or video memory , and maps its buffer into kernel address space .
* As pinned object cannot be relocated , you should avoid pinning objects
* permanently . Call drm_gem_vram_vunmap ( ) with the returned address to
* unmap and unpin the GEM VRAM object .
*
* Returns :
2020-11-03 10:30:11 +01:00
* 0 on success , or a negative error code otherwise .
2019-09-11 14:03:50 +02:00
*/
2020-11-03 10:30:11 +01:00
int drm_gem_vram_vmap ( struct drm_gem_vram_object * gbo , struct dma_buf_map * map )
2019-09-11 14:03:50 +02:00
{
int ret ;
ret = ttm_bo_reserve ( & gbo - > bo , true , false , NULL ) ;
if ( ret )
2020-11-03 10:30:11 +01:00
return ret ;
2019-09-11 14:03:50 +02:00
ret = drm_gem_vram_pin_locked ( gbo , 0 ) ;
if ( ret )
goto err_ttm_bo_unreserve ;
2020-11-03 10:30:11 +01:00
ret = drm_gem_vram_kmap_locked ( gbo , map ) ;
if ( ret )
2019-09-11 14:03:50 +02:00
goto err_drm_gem_vram_unpin_locked ;
ttm_bo_unreserve ( & gbo - > bo ) ;
2020-11-03 10:30:11 +01:00
return 0 ;
2019-09-11 14:03:50 +02:00
err_drm_gem_vram_unpin_locked :
drm_gem_vram_unpin_locked ( gbo ) ;
err_ttm_bo_unreserve :
ttm_bo_unreserve ( & gbo - > bo ) ;
2020-11-03 10:30:11 +01:00
return ret ;
2019-09-11 14:03:50 +02:00
}
EXPORT_SYMBOL ( drm_gem_vram_vmap ) ;
/**
* drm_gem_vram_vunmap ( ) - Unmaps and unpins a GEM VRAM object
2020-11-03 10:30:11 +01:00
* @ gbo : The GEM VRAM object to unmap
* @ map : Kernel virtual address where the VRAM GEM object was mapped
2019-09-11 14:03:50 +02:00
*
* A call to drm_gem_vram_vunmap ( ) unmaps and unpins a GEM VRAM buffer . See
* the documentation for drm_gem_vram_vmap ( ) for more information .
*/
2020-11-03 10:30:11 +01:00
void drm_gem_vram_vunmap ( struct drm_gem_vram_object * gbo , struct dma_buf_map * map )
2019-09-11 14:03:50 +02:00
{
int ret ;
ret = ttm_bo_reserve ( & gbo - > bo , false , false , NULL ) ;
if ( WARN_ONCE ( ret , " ttm_bo_reserve_failed(): ret=%d \n " , ret ) )
return ;
2020-11-03 10:30:11 +01:00
drm_gem_vram_kunmap_locked ( gbo , map ) ;
2019-09-11 14:03:50 +02:00
drm_gem_vram_unpin_locked ( gbo ) ;
ttm_bo_unreserve ( & gbo - > bo ) ;
}
EXPORT_SYMBOL ( drm_gem_vram_vunmap ) ;
2019-05-08 10:26:14 +02:00
/**
* drm_gem_vram_fill_create_dumb ( ) - \
Helper for implementing & struct drm_driver . dumb_create
* @ file : the DRM file
* @ dev : the DRM device
* @ pg_align : the buffer ' s alignment in multiples of the page size
2019-12-03 09:38:17 +01:00
* @ pitch_align : the scanline ' s alignment in powers of 2
2019-05-08 10:26:14 +02:00
* @ args : the arguments as provided to \
& struct drm_driver . dumb_create
*
* This helper function fills & struct drm_mode_create_dumb , which is used
* by & struct drm_driver . dumb_create . Implementations of this interface
* should forwards their arguments to this helper , plus the driver - specific
* parameters .
*
* Returns :
* 0 on success , or
* a negative error code otherwise .
*/
int drm_gem_vram_fill_create_dumb ( struct drm_file * file ,
struct drm_device * dev ,
unsigned long pg_align ,
2019-12-03 09:38:17 +01:00
unsigned long pitch_align ,
2019-05-08 10:26:14 +02:00
struct drm_mode_create_dumb * args )
{
size_t pitch , size ;
struct drm_gem_vram_object * gbo ;
int ret ;
u32 handle ;
2019-12-03 09:38:17 +01:00
pitch = args - > width * DIV_ROUND_UP ( args - > bpp , 8 ) ;
if ( pitch_align ) {
if ( WARN_ON_ONCE ( ! is_power_of_2 ( pitch_align ) ) )
return - EINVAL ;
pitch = ALIGN ( pitch , pitch_align ) ;
}
2019-05-08 10:26:14 +02:00
size = pitch * args - > height ;
size = roundup ( size , PAGE_SIZE ) ;
if ( ! size )
return - EINVAL ;
2020-01-06 13:57:44 +01:00
gbo = drm_gem_vram_create ( dev , size , pg_align ) ;
2019-05-08 10:26:14 +02:00
if ( IS_ERR ( gbo ) )
return PTR_ERR ( gbo ) ;
2019-08-05 16:01:04 +02:00
ret = drm_gem_handle_create ( file , & gbo - > bo . base , & handle ) ;
2019-05-08 10:26:14 +02:00
if ( ret )
2020-05-15 10:50:53 +01:00
goto err_drm_gem_object_put ;
2019-05-08 10:26:14 +02:00
2020-05-15 10:50:53 +01:00
drm_gem_object_put ( & gbo - > bo . base ) ;
2019-05-08 10:26:14 +02:00
args - > pitch = pitch ;
args - > size = size ;
args - > handle = handle ;
return 0 ;
2020-05-15 10:50:53 +01:00
err_drm_gem_object_put :
drm_gem_object_put ( & gbo - > bo . base ) ;
2019-05-08 10:26:14 +02:00
return ret ;
}
EXPORT_SYMBOL ( drm_gem_vram_fill_create_dumb ) ;
2019-05-08 10:26:12 +02:00
/*
2020-10-01 14:51:40 +02:00
* Helpers for struct ttm_device_funcs
2019-05-08 10:26:12 +02:00
*/
static bool drm_is_gem_vram ( struct ttm_buffer_object * bo )
{
return ( bo - > destroy = = ttm_buffer_object_destroy ) ;
}
2019-09-11 13:09:08 +02:00
static void drm_gem_vram_bo_driver_evict_flags ( struct drm_gem_vram_object * gbo ,
struct ttm_placement * pl )
2019-05-08 10:26:12 +02:00
{
2020-09-07 16:14:52 +02:00
drm_gem_vram_placement ( gbo , DRM_GEM_VRAM_PL_FLAG_SYSTEM ) ;
2019-05-08 10:26:12 +02:00
* pl = gbo - > placement ;
}
2021-02-11 11:04:23 +01:00
static void drm_gem_vram_bo_driver_move_notify ( struct drm_gem_vram_object * gbo )
2019-09-06 14:20:56 +02:00
{
2020-11-03 10:30:11 +01:00
struct ttm_buffer_object * bo = & gbo - > bo ;
struct drm_device * dev = bo - > base . dev ;
2019-09-06 14:20:56 +02:00
2020-11-03 10:30:11 +01:00
if ( drm_WARN_ON_ONCE ( dev , gbo - > vmap_use_count ) )
2019-09-06 14:20:56 +02:00
return ;
2020-11-03 10:30:11 +01:00
ttm_bo_vunmap ( bo , & gbo - > map ) ;
2021-01-18 15:46:39 +01:00
dma_buf_map_clear ( & gbo - > map ) ; /* explicitly clear mapping for next vmap call */
2019-09-06 14:20:56 +02:00
}
2019-05-08 10:26:17 +02:00
2020-10-06 10:06:43 +10:00
static int drm_gem_vram_bo_driver_move ( struct drm_gem_vram_object * gbo ,
bool evict ,
struct ttm_operation_ctx * ctx ,
struct ttm_resource * new_mem )
{
2021-02-11 11:04:23 +01:00
drm_gem_vram_bo_driver_move_notify ( gbo ) ;
return ttm_bo_move_memcpy ( & gbo - > bo , ctx , new_mem ) ;
2020-10-06 10:06:43 +10:00
}
2019-05-08 10:26:13 +02:00
/*
2019-07-02 13:50:12 +02:00
* Helpers for struct drm_gem_object_funcs
2019-05-08 10:26:13 +02:00
*/
/**
2019-07-02 13:50:12 +02:00
* drm_gem_vram_object_free ( ) - \
Implements & struct drm_gem_object_funcs . free
* @ gem : GEM object . Refers to & struct drm_gem_vram_object . gem
2019-05-08 10:26:13 +02:00
*/
2019-07-02 13:50:12 +02:00
static void drm_gem_vram_object_free ( struct drm_gem_object * gem )
2019-05-08 10:26:13 +02:00
{
struct drm_gem_vram_object * gbo = drm_gem_vram_of_gem ( gem ) ;
drm_gem_vram_put ( gbo ) ;
}
2019-07-02 13:50:12 +02:00
/*
* Helpers for dump buffers
*/
2019-05-08 10:26:13 +02:00
2019-05-08 10:26:18 +02:00
/**
2020-11-16 11:18:01 +01:00
* drm_gem_vram_driver_dumb_create ( ) - \
2019-05-08 10:26:18 +02:00
Implements & struct drm_driver . dumb_create
* @ file : the DRM file
* @ dev : the DRM device
* @ args : the arguments as provided to \
& struct drm_driver . dumb_create
*
* This function requires the driver to use @ drm_device . vram_mm for its
* instance of VRAM MM .
*
* Returns :
* 0 on success , or
* a negative error code otherwise .
*/
int drm_gem_vram_driver_dumb_create ( struct drm_file * file ,
struct drm_device * dev ,
struct drm_mode_create_dumb * args )
{
if ( WARN_ONCE ( ! dev - > vram_mm , " VRAM MM not initialized " ) )
return - EINVAL ;
2020-01-06 13:57:44 +01:00
return drm_gem_vram_fill_create_dumb ( file , dev , 0 , 0 , args ) ;
2019-05-08 10:26:18 +02:00
}
EXPORT_SYMBOL ( drm_gem_vram_driver_dumb_create ) ;
2019-10-24 10:14:01 +02:00
/*
* Helpers for struct drm_plane_helper_funcs
*/
/**
* drm_gem_vram_plane_helper_prepare_fb ( ) - \
* Implements & struct drm_plane_helper_funcs . prepare_fb
* @ plane : a DRM plane
* @ new_state : the plane ' s new state
*
2020-03-31 11:27:40 +02:00
* During plane updates , this function sets the plane ' s fence and
* pins the GEM VRAM objects of the plane ' s new framebuffer to VRAM .
* Call drm_gem_vram_plane_helper_cleanup_fb ( ) to unpin them .
2019-10-24 10:14:01 +02:00
*
* Returns :
* 0 on success , or
* a negative errno code otherwise .
*/
int
drm_gem_vram_plane_helper_prepare_fb ( struct drm_plane * plane ,
struct drm_plane_state * new_state )
{
size_t i ;
struct drm_gem_vram_object * gbo ;
int ret ;
if ( ! new_state - > fb )
return 0 ;
for ( i = 0 ; i < ARRAY_SIZE ( new_state - > fb - > obj ) ; + + i ) {
if ( ! new_state - > fb - > obj [ i ] )
continue ;
gbo = drm_gem_vram_of_gem ( new_state - > fb - > obj [ i ] ) ;
ret = drm_gem_vram_pin ( gbo , DRM_GEM_VRAM_PL_FLAG_VRAM ) ;
if ( ret )
goto err_drm_gem_vram_unpin ;
}
2021-02-22 15:17:56 +01:00
ret = drm_gem_plane_helper_prepare_fb ( plane , new_state ) ;
2020-03-31 11:27:40 +02:00
if ( ret )
goto err_drm_gem_vram_unpin ;
2019-10-24 10:14:01 +02:00
return 0 ;
err_drm_gem_vram_unpin :
while ( i ) {
- - i ;
gbo = drm_gem_vram_of_gem ( new_state - > fb - > obj [ i ] ) ;
drm_gem_vram_unpin ( gbo ) ;
}
return ret ;
}
EXPORT_SYMBOL ( drm_gem_vram_plane_helper_prepare_fb ) ;
/**
* drm_gem_vram_plane_helper_cleanup_fb ( ) - \
* Implements & struct drm_plane_helper_funcs . cleanup_fb
* @ plane : a DRM plane
* @ old_state : the plane ' s old state
*
* During plane updates , this function unpins the GEM VRAM
* objects of the plane ' s old framebuffer from VRAM . Complements
* drm_gem_vram_plane_helper_prepare_fb ( ) .
*/
void
drm_gem_vram_plane_helper_cleanup_fb ( struct drm_plane * plane ,
struct drm_plane_state * old_state )
{
size_t i ;
struct drm_gem_vram_object * gbo ;
if ( ! old_state - > fb )
return ;
for ( i = 0 ; i < ARRAY_SIZE ( old_state - > fb - > obj ) ; + + i ) {
if ( ! old_state - > fb - > obj [ i ] )
continue ;
gbo = drm_gem_vram_of_gem ( old_state - > fb - > obj [ i ] ) ;
drm_gem_vram_unpin ( gbo ) ;
}
}
EXPORT_SYMBOL ( drm_gem_vram_plane_helper_cleanup_fb ) ;
/*
* Helpers for struct drm_simple_display_pipe_funcs
*/
/**
* drm_gem_vram_simple_display_pipe_prepare_fb ( ) - \
* Implements & struct drm_simple_display_pipe_funcs . prepare_fb
* @ pipe : a simple display pipe
* @ new_state : the plane ' s new state
*
* During plane updates , this function pins the GEM VRAM
* objects of the plane ' s new framebuffer to VRAM . Call
* drm_gem_vram_simple_display_pipe_cleanup_fb ( ) to unpin them .
*
* Returns :
* 0 on success , or
* a negative errno code otherwise .
*/
int drm_gem_vram_simple_display_pipe_prepare_fb (
struct drm_simple_display_pipe * pipe ,
struct drm_plane_state * new_state )
{
return drm_gem_vram_plane_helper_prepare_fb ( & pipe - > plane , new_state ) ;
}
EXPORT_SYMBOL ( drm_gem_vram_simple_display_pipe_prepare_fb ) ;
/**
* drm_gem_vram_simple_display_pipe_cleanup_fb ( ) - \
* Implements & struct drm_simple_display_pipe_funcs . cleanup_fb
* @ pipe : a simple display pipe
* @ old_state : the plane ' s old state
*
* During plane updates , this function unpins the GEM VRAM
* objects of the plane ' s old framebuffer from VRAM . Complements
* drm_gem_vram_simple_display_pipe_prepare_fb ( ) .
*/
void drm_gem_vram_simple_display_pipe_cleanup_fb (
struct drm_simple_display_pipe * pipe ,
struct drm_plane_state * old_state )
{
drm_gem_vram_plane_helper_cleanup_fb ( & pipe - > plane , old_state ) ;
}
EXPORT_SYMBOL ( drm_gem_vram_simple_display_pipe_cleanup_fb ) ;
2019-05-08 10:26:15 +02:00
/*
2019-07-02 13:50:12 +02:00
* PRIME helpers
2019-05-08 10:26:15 +02:00
*/
/**
2019-07-02 13:50:12 +02:00
* drm_gem_vram_object_pin ( ) - \
Implements & struct drm_gem_object_funcs . pin
2019-05-08 10:26:15 +02:00
* @ gem : The GEM object to pin
*
* Returns :
* 0 on success , or
* a negative errno code otherwise .
*/
2019-07-02 13:50:12 +02:00
static int drm_gem_vram_object_pin ( struct drm_gem_object * gem )
2019-05-08 10:26:15 +02:00
{
struct drm_gem_vram_object * gbo = drm_gem_vram_of_gem ( gem ) ;
2019-06-13 09:30:33 +02:00
/* Fbdev console emulation is the use case of these PRIME
* helpers . This may involve updating a hardware buffer from
* a shadow FB . We pin the buffer to it ' s current location
* ( either video RAM or system memory ) to prevent it from
* being relocated during the update operation . If you require
* the buffer to be pinned to VRAM , implement a callback that
* sets the flags accordingly .
*/
return drm_gem_vram_pin ( gbo , 0 ) ;
2019-05-08 10:26:15 +02:00
}
/**
2019-07-02 13:50:12 +02:00
* drm_gem_vram_object_unpin ( ) - \
Implements & struct drm_gem_object_funcs . unpin
2019-05-08 10:26:15 +02:00
* @ gem : The GEM object to unpin
*/
2019-07-02 13:50:12 +02:00
static void drm_gem_vram_object_unpin ( struct drm_gem_object * gem )
2019-05-08 10:26:15 +02:00
{
struct drm_gem_vram_object * gbo = drm_gem_vram_of_gem ( gem ) ;
drm_gem_vram_unpin ( gbo ) ;
}
/**
2020-11-03 10:30:11 +01:00
* drm_gem_vram_object_vmap ( ) -
* Implements & struct drm_gem_object_funcs . vmap
* @ gem : The GEM object to map
* @ map : Returns the kernel virtual address of the VRAM GEM object ' s backing
* store .
2019-05-08 10:26:15 +02:00
*
* Returns :
2020-11-03 10:30:11 +01:00
* 0 on success , or a negative error code otherwise .
2019-05-08 10:26:15 +02:00
*/
2020-11-03 10:30:11 +01:00
static int drm_gem_vram_object_vmap ( struct drm_gem_object * gem , struct dma_buf_map * map )
2019-05-08 10:26:15 +02:00
{
struct drm_gem_vram_object * gbo = drm_gem_vram_of_gem ( gem ) ;
2020-11-03 10:30:11 +01:00
return drm_gem_vram_vmap ( gbo , map ) ;
2019-05-08 10:26:15 +02:00
}
/**
2020-11-03 10:30:11 +01:00
* drm_gem_vram_object_vunmap ( ) -
* Implements & struct drm_gem_object_funcs . vunmap
* @ gem : The GEM object to unmap
* @ map : Kernel virtual address where the VRAM GEM object was mapped
2019-05-08 10:26:15 +02:00
*/
2020-11-03 10:30:11 +01:00
static void drm_gem_vram_object_vunmap ( struct drm_gem_object * gem , struct dma_buf_map * map )
2019-05-08 10:26:15 +02:00
{
struct drm_gem_vram_object * gbo = drm_gem_vram_of_gem ( gem ) ;
2019-09-06 14:20:54 +02:00
2020-11-03 10:30:11 +01:00
drm_gem_vram_vunmap ( gbo , map ) ;
2019-05-08 10:26:15 +02:00
}
2019-07-02 13:50:08 +02:00
/*
* GEM object funcs
*/
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
2019-07-02 13:50:12 +02:00
. free = drm_gem_vram_object_free ,
. pin = drm_gem_vram_object_pin ,
. unpin = drm_gem_vram_object_unpin ,
. vmap = drm_gem_vram_object_vmap ,
2019-09-04 07:47:36 +02:00
. vunmap = drm_gem_vram_object_vunmap ,
2019-10-16 13:52:01 +02:00
. mmap = drm_gem_ttm_mmap ,
2019-09-04 07:47:36 +02:00
. print_info = drm_gem_ttm_print_info ,
2019-07-02 13:50:08 +02:00
} ;
2019-09-11 13:09:07 +02:00
/*
* VRAM memory manager
*/
/*
* TTM TT
*/
2020-10-01 14:51:40 +02:00
static void bo_driver_ttm_tt_destroy ( struct ttm_device * bdev , struct ttm_tt * tt )
2019-09-11 13:09:07 +02:00
{
2020-09-17 13:20:48 +10:00
ttm_tt_destroy_common ( bdev , tt ) ;
2019-09-11 13:09:07 +02:00
ttm_tt_fini ( tt ) ;
kfree ( tt ) ;
}
/*
* TTM BO device
*/
static struct ttm_tt * bo_driver_ttm_tt_create ( struct ttm_buffer_object * bo ,
uint32_t page_flags )
{
struct ttm_tt * tt ;
int ret ;
tt = kzalloc ( sizeof ( * tt ) , GFP_KERNEL ) ;
if ( ! tt )
return NULL ;
2020-09-30 10:38:48 +02:00
ret = ttm_tt_init ( tt , bo , page_flags , ttm_cached ) ;
2019-09-11 13:09:07 +02:00
if ( ret < 0 )
goto err_ttm_tt_init ;
return tt ;
err_ttm_tt_init :
kfree ( tt ) ;
return NULL ;
}
static void bo_driver_evict_flags ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement )
{
2019-09-11 13:09:08 +02:00
struct drm_gem_vram_object * gbo ;
2019-09-11 13:09:07 +02:00
2019-09-11 13:09:08 +02:00
/* TTM may pass BOs that are not GEM VRAM BOs. */
if ( ! drm_is_gem_vram ( bo ) )
return ;
gbo = drm_gem_vram_of_bo ( bo ) ;
drm_gem_vram_bo_driver_evict_flags ( gbo , placement ) ;
2019-09-11 13:09:07 +02:00
}
2020-10-21 14:40:29 +10:00
static void bo_driver_delete_mem_notify ( struct ttm_buffer_object * bo )
2019-09-11 13:09:07 +02:00
{
2019-09-11 13:09:08 +02:00
struct drm_gem_vram_object * gbo ;
2019-09-11 13:09:07 +02:00
2019-09-11 13:09:08 +02:00
/* TTM may pass BOs that are not GEM VRAM BOs. */
if ( ! drm_is_gem_vram ( bo ) )
2019-09-11 13:09:07 +02:00
return ;
2019-09-11 13:09:08 +02:00
gbo = drm_gem_vram_of_bo ( bo ) ;
2021-02-11 11:04:23 +01:00
drm_gem_vram_bo_driver_move_notify ( gbo ) ;
2019-09-11 13:09:07 +02:00
}
2020-10-06 10:06:43 +10:00
static int bo_driver_move ( struct ttm_buffer_object * bo ,
bool evict ,
struct ttm_operation_ctx * ctx ,
2020-10-29 13:58:52 +10:00
struct ttm_resource * new_mem ,
struct ttm_place * hop )
2020-10-06 10:06:43 +10:00
{
struct drm_gem_vram_object * gbo ;
gbo = drm_gem_vram_of_bo ( bo ) ;
return drm_gem_vram_bo_driver_move ( gbo , evict , ctx , new_mem ) ;
}
2020-10-01 14:51:40 +02:00
static int bo_driver_io_mem_reserve ( struct ttm_device * bdev ,
2020-08-04 12:56:32 +10:00
struct ttm_resource * mem )
2019-09-11 13:09:07 +02:00
{
struct drm_vram_mm * vmm = drm_vram_mm_of_bdev ( bdev ) ;
switch ( mem - > mem_type ) {
case TTM_PL_SYSTEM : /* nothing to do */
break ;
case TTM_PL_VRAM :
2020-09-07 13:44:36 +02:00
mem - > bus . offset = ( mem - > start < < PAGE_SHIFT ) + vmm - > vram_base ;
2019-09-11 13:09:07 +02:00
mem - > bus . is_iomem = true ;
2020-09-30 11:17:44 +02:00
mem - > bus . caching = ttm_write_combined ;
2019-09-11 13:09:07 +02:00
break ;
default :
return - EINVAL ;
}
return 0 ;
}
2020-10-01 14:51:40 +02:00
static struct ttm_device_funcs bo_driver = {
2019-09-11 13:09:07 +02:00
. ttm_tt_create = bo_driver_ttm_tt_create ,
2020-09-08 06:46:26 +10:00
. ttm_tt_destroy = bo_driver_ttm_tt_destroy ,
2019-09-11 13:09:07 +02:00
. eviction_valuable = ttm_bo_eviction_valuable ,
. evict_flags = bo_driver_evict_flags ,
2020-10-06 10:06:43 +10:00
. move = bo_driver_move ,
2020-10-21 14:40:29 +10:00
. delete_mem_notify = bo_driver_delete_mem_notify ,
2019-09-11 13:09:07 +02:00
. io_mem_reserve = bo_driver_io_mem_reserve ,
} ;
/*
* struct drm_vram_mm
*/
static int drm_vram_mm_debugfs ( struct seq_file * m , void * data )
{
struct drm_info_node * node = ( struct drm_info_node * ) m - > private ;
struct drm_vram_mm * vmm = node - > minor - > dev - > vram_mm ;
2020-08-04 12:56:31 +10:00
struct ttm_resource_manager * man = ttm_manager_type ( & vmm - > bdev , TTM_PL_VRAM ) ;
2019-09-11 13:09:07 +02:00
struct drm_printer p = drm_seq_file_printer ( m ) ;
2020-08-04 12:56:31 +10:00
ttm_resource_manager_debug ( man , & p ) ;
2019-09-11 13:09:07 +02:00
return 0 ;
}
static const struct drm_info_list drm_vram_mm_debugfs_list [ ] = {
{ " vram-mm " , drm_vram_mm_debugfs , 0 , NULL } ,
} ;
/**
* drm_vram_mm_debugfs_init ( ) - Register VRAM MM debugfs file .
*
* @ minor : drm minor device .
*
*/
2020-03-10 16:31:21 +03:00
void drm_vram_mm_debugfs_init ( struct drm_minor * minor )
2019-09-11 13:09:07 +02:00
{
2020-03-10 16:31:14 +03:00
drm_debugfs_create_files ( drm_vram_mm_debugfs_list ,
ARRAY_SIZE ( drm_vram_mm_debugfs_list ) ,
minor - > debugfs_root , minor ) ;
2019-09-11 13:09:07 +02:00
}
EXPORT_SYMBOL ( drm_vram_mm_debugfs_init ) ;
2019-09-11 13:09:09 +02:00
static int drm_vram_mm_init ( struct drm_vram_mm * vmm , struct drm_device * dev ,
uint64_t vram_base , size_t vram_size )
2019-09-11 13:09:07 +02:00
{
int ret ;
vmm - > vram_base = vram_base ;
vmm - > vram_size = vram_size ;
2020-10-01 14:51:40 +02:00
ret = ttm_device_init ( & vmm - > bdev , & bo_driver , dev - > dev ,
2019-09-11 13:09:07 +02:00
dev - > anon_inode - > i_mapping ,
dev - > vma_offset_manager ,
2020-10-24 13:10:28 +02:00
false , true ) ;
2019-09-11 13:09:07 +02:00
if ( ret )
return ret ;
2020-08-04 12:56:19 +10:00
ret = ttm_range_man_init ( & vmm - > bdev , TTM_PL_VRAM ,
2020-09-11 15:06:53 +02:00
false , vram_size > > PAGE_SHIFT ) ;
2019-09-11 13:09:07 +02:00
if ( ret )
return ret ;
return 0 ;
}
2019-09-11 13:09:09 +02:00
static void drm_vram_mm_cleanup ( struct drm_vram_mm * vmm )
2019-09-11 13:09:07 +02:00
{
2020-08-04 12:56:19 +10:00
ttm_range_man_fini ( & vmm - > bdev , TTM_PL_VRAM ) ;
2020-10-01 14:51:40 +02:00
ttm_device_fini ( & vmm - > bdev ) ;
2019-09-11 13:09:07 +02:00
}
/*
* Helpers for integration with struct drm_device
*/
2020-07-16 14:53:48 +02:00
/* deprecated; use drmm_vram_mm_init() */
2019-09-11 13:09:07 +02:00
struct drm_vram_mm * drm_vram_helper_alloc_mm (
2019-09-11 13:09:08 +02:00
struct drm_device * dev , uint64_t vram_base , size_t vram_size )
2019-09-11 13:09:07 +02:00
{
int ret ;
if ( WARN_ON ( dev - > vram_mm ) )
return dev - > vram_mm ;
dev - > vram_mm = kzalloc ( sizeof ( * dev - > vram_mm ) , GFP_KERNEL ) ;
if ( ! dev - > vram_mm )
return ERR_PTR ( - ENOMEM ) ;
2019-09-11 13:09:08 +02:00
ret = drm_vram_mm_init ( dev - > vram_mm , dev , vram_base , vram_size ) ;
2019-09-11 13:09:07 +02:00
if ( ret )
goto err_kfree ;
return dev - > vram_mm ;
err_kfree :
kfree ( dev - > vram_mm ) ;
dev - > vram_mm = NULL ;
return ERR_PTR ( ret ) ;
}
EXPORT_SYMBOL ( drm_vram_helper_alloc_mm ) ;
void drm_vram_helper_release_mm ( struct drm_device * dev )
{
if ( ! dev - > vram_mm )
return ;
drm_vram_mm_cleanup ( dev - > vram_mm ) ;
kfree ( dev - > vram_mm ) ;
dev - > vram_mm = NULL ;
}
EXPORT_SYMBOL ( drm_vram_helper_release_mm ) ;
2020-02-03 16:52:55 +01:00
2020-07-16 14:53:48 +02:00
static void drm_vram_mm_release ( struct drm_device * dev , void * ptr )
{
drm_vram_helper_release_mm ( dev ) ;
}
/**
* drmm_vram_helper_init - Initializes a device ' s instance of
* & struct drm_vram_mm
* @ dev : the DRM device
* @ vram_base : the base address of the video memory
* @ vram_size : the size of the video memory in bytes
*
* Creates a new instance of & struct drm_vram_mm and stores it in
* struct & drm_device . vram_mm . The instance is auto - managed and cleaned
* up as part of device cleanup . Calling this function multiple times
* will generate an error message .
*
* Returns :
* 0 on success , or a negative errno code otherwise .
*/
int drmm_vram_helper_init ( struct drm_device * dev , uint64_t vram_base ,
size_t vram_size )
{
struct drm_vram_mm * vram_mm ;
if ( drm_WARN_ON_ONCE ( dev , dev - > vram_mm ) )
return 0 ;
vram_mm = drm_vram_helper_alloc_mm ( dev , vram_base , vram_size ) ;
if ( IS_ERR ( vram_mm ) )
return PTR_ERR ( vram_mm ) ;
return drmm_add_action_or_reset ( dev , drm_vram_mm_release , NULL ) ;
}
EXPORT_SYMBOL ( drmm_vram_helper_init ) ;
2020-02-03 16:52:55 +01:00
/*
* Mode - config helpers
*/
static enum drm_mode_status
drm_vram_helper_mode_valid_internal ( struct drm_device * dev ,
const struct drm_display_mode * mode ,
unsigned long max_bpp )
{
struct drm_vram_mm * vmm = dev - > vram_mm ;
unsigned long fbsize , fbpages , max_fbpages ;
if ( WARN_ON ( ! dev - > vram_mm ) )
return MODE_BAD ;
max_fbpages = ( vmm - > vram_size / 2 ) > > PAGE_SHIFT ;
fbsize = mode - > hdisplay * mode - > vdisplay * max_bpp ;
fbpages = DIV_ROUND_UP ( fbsize , PAGE_SIZE ) ;
if ( fbpages > max_fbpages )
return MODE_MEM ;
return MODE_OK ;
}
/**
* drm_vram_helper_mode_valid - Tests if a display mode ' s
* framebuffer fits into the available video memory .
* @ dev : the DRM device
* @ mode : the mode to test
*
* This function tests if enough video memory is available for using the
* specified display mode . Atomic modesetting requires importing the
* designated framebuffer into video memory before evicting the active
* one . Hence , any framebuffer may consume at most half of the available
* VRAM . Display modes that require a larger framebuffer can not be used ,
* even if the CRTC does support them . Each framebuffer is assumed to
* have 32 - bit color depth .
*
* Note :
* The function can only test if the display mode is supported in
* general . If there are too many framebuffers pinned to video memory ,
* a display mode may still not be usable in practice . The color depth of
* 32 - bit fits all current use case . A more flexible test can be added
* when necessary .
*
* Returns :
* MODE_OK if the display mode is supported , or an error code of type
* enum drm_mode_status otherwise .
*/
enum drm_mode_status
drm_vram_helper_mode_valid ( struct drm_device * dev ,
const struct drm_display_mode * mode )
{
static const unsigned long max_bpp = 4 ; /* DRM_FORMAT_XRGB8888 */
return drm_vram_helper_mode_valid_internal ( dev , mode , max_bpp ) ;
}
EXPORT_SYMBOL ( drm_vram_helper_mode_valid ) ;
2020-03-31 10:12:38 +02:00
MODULE_DESCRIPTION ( " DRM VRAM memory-management helpers " ) ;
MODULE_LICENSE ( " GPL " ) ;