2019-05-08 10:26:11 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2019-09-11 13:09:07 +02:00
# include <drm/drm_debugfs.h>
# include <drm/drm_device.h>
2020-01-06 13:57:45 +01:00
# include <drm/drm_drv.h>
2019-09-11 13:09:07 +02:00
# include <drm/drm_file.h>
2019-10-24 10:14:01 +02:00
# include <drm/drm_framebuffer.h>
2019-09-04 07:47:36 +02:00
# include <drm/drm_gem_ttm_helper.h>
2019-05-08 10:26:11 +02:00
# include <drm/drm_gem_vram_helper.h>
2019-05-08 10:26:14 +02:00
# include <drm/drm_mode.h>
2019-10-24 10:14:01 +02:00
# include <drm/drm_plane.h>
2019-05-08 10:26:15 +02:00
# include <drm/drm_prime.h>
2019-10-24 10:14:01 +02:00
# include <drm/drm_simple_kms_helper.h>
2019-05-08 10:26:11 +02:00
# include <drm/ttm/ttm_page_alloc.h>
2019-07-02 13:50:08 +02:00
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs ;
2019-05-08 10:26:11 +02:00
/**
* DOC : overview
*
* This library provides a GEM buffer object that is backed by video RAM
* ( VRAM ) . It can be used for framebuffer devices with dedicated memory .
2019-09-11 13:09:07 +02:00
*
* The data structure & struct drm_vram_mm and its helpers implement a memory
* manager for simple framebuffer devices with dedicated video memory . Buffer
* objects are either placed in video RAM or evicted to system memory . The rsp .
* buffer object is provided by & struct drm_gem_vram_object .
2019-05-08 10:26:11 +02:00
*/
/*
* Buffer - objects helpers
*/
static void drm_gem_vram_cleanup ( struct drm_gem_vram_object * gbo )
{
/* We got here via ttm_bo_put(), which means that the
* TTM buffer object in ' bo ' has already been cleaned
* up ; only release the GEM object .
*/
2019-09-06 14:20:53 +02:00
WARN_ON ( gbo - > kmap_use_count ) ;
2019-09-06 14:20:56 +02:00
WARN_ON ( gbo - > kmap . virtual ) ;
2019-09-06 14:20:53 +02:00
2019-08-05 16:01:04 +02:00
drm_gem_object_release ( & gbo - > bo . base ) ;
2019-05-08 10:26:11 +02:00
}
static void drm_gem_vram_destroy ( struct drm_gem_vram_object * gbo )
{
drm_gem_vram_cleanup ( gbo ) ;
kfree ( gbo ) ;
}
static void ttm_buffer_object_destroy ( struct ttm_buffer_object * bo )
{
struct drm_gem_vram_object * gbo = drm_gem_vram_of_bo ( bo ) ;
drm_gem_vram_destroy ( gbo ) ;
}
static void drm_gem_vram_placement ( struct drm_gem_vram_object * gbo ,
unsigned long pl_flag )
{
unsigned int i ;
unsigned int c = 0 ;
2019-09-23 19:27:42 +02:00
u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN ;
2019-05-08 10:26:11 +02:00
gbo - > placement . placement = gbo - > placements ;
gbo - > placement . busy_placement = gbo - > placements ;
if ( pl_flag & TTM_PL_FLAG_VRAM )
gbo - > placements [ c + + ] . flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
2019-09-23 19:27:42 +02:00
TTM_PL_FLAG_VRAM |
invariant_flags ;
2019-05-08 10:26:11 +02:00
if ( pl_flag & TTM_PL_FLAG_SYSTEM )
gbo - > placements [ c + + ] . flags = TTM_PL_MASK_CACHING |
2019-09-23 19:27:42 +02:00
TTM_PL_FLAG_SYSTEM |
invariant_flags ;
2019-05-08 10:26:11 +02:00
if ( ! c )
gbo - > placements [ c + + ] . flags = TTM_PL_MASK_CACHING |
2019-09-23 19:27:42 +02:00
TTM_PL_FLAG_SYSTEM |
invariant_flags ;
2019-05-08 10:26:11 +02:00
gbo - > placement . num_placement = c ;
gbo - > placement . num_busy_placement = c ;
for ( i = 0 ; i < c ; + + i ) {
gbo - > placements [ i ] . fpfn = 0 ;
gbo - > placements [ i ] . lpfn = 0 ;
}
}
static int drm_gem_vram_init ( struct drm_device * dev ,
struct drm_gem_vram_object * gbo ,
2020-01-06 13:57:43 +01:00
size_t size , unsigned long pg_align )
2019-05-08 10:26:11 +02:00
{
2020-01-06 13:57:44 +01:00
struct drm_vram_mm * vmm = dev - > vram_mm ;
struct ttm_bo_device * bdev ;
2019-05-08 10:26:11 +02:00
int ret ;
size_t acc_size ;
2020-01-06 13:57:44 +01:00
if ( WARN_ONCE ( ! vmm , " VRAM MM not initialized " ) )
return - EINVAL ;
bdev = & vmm - > bdev ;
2019-09-11 13:09:10 +02:00
gbo - > bo . base . funcs = & drm_gem_vram_object_funcs ;
2019-07-02 13:50:08 +02:00
2019-08-05 16:01:04 +02:00
ret = drm_gem_object_init ( dev , & gbo - > bo . base , size ) ;
2019-05-08 10:26:11 +02:00
if ( ret )
return ret ;
acc_size = ttm_bo_dma_acc_size ( bdev , size , sizeof ( * gbo ) ) ;
gbo - > bo . bdev = bdev ;
drm_gem_vram_placement ( gbo , TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM ) ;
ret = ttm_bo_init ( bdev , & gbo - > bo , size , ttm_bo_type_device ,
2020-01-06 13:57:43 +01:00
& gbo - > placement , pg_align , false , acc_size ,
2019-05-08 10:26:11 +02:00
NULL , NULL , ttm_buffer_object_destroy ) ;
if ( ret )
goto err_drm_gem_object_release ;
return 0 ;
err_drm_gem_object_release :
2019-08-05 16:01:04 +02:00
drm_gem_object_release ( & gbo - > bo . base ) ;
2019-05-08 10:26:11 +02:00
return ret ;
}
/**
* drm_gem_vram_create ( ) - Creates a VRAM - backed GEM object
* @ dev : the DRM device
* @ size : the buffer size in bytes
* @ pg_align : the buffer ' s alignment in multiples of the page size
*
* Returns :
* A new instance of & struct drm_gem_vram_object on success , or
* an ERR_PTR ( ) - encoded error code otherwise .
*/
struct drm_gem_vram_object * drm_gem_vram_create ( struct drm_device * dev ,
size_t size ,
2020-01-06 13:57:43 +01:00
unsigned long pg_align )
2019-05-08 10:26:11 +02:00
{
struct drm_gem_vram_object * gbo ;
int ret ;
2020-01-06 13:57:45 +01:00
if ( dev - > driver - > gem_create_object ) {
struct drm_gem_object * gem =
dev - > driver - > gem_create_object ( dev , size ) ;
if ( ! gem )
return ERR_PTR ( - ENOMEM ) ;
gbo = drm_gem_vram_of_gem ( gem ) ;
} else {
gbo = kzalloc ( sizeof ( * gbo ) , GFP_KERNEL ) ;
if ( ! gbo )
return ERR_PTR ( - ENOMEM ) ;
}
2019-05-08 10:26:11 +02:00
2020-01-06 13:57:44 +01:00
ret = drm_gem_vram_init ( dev , gbo , size , pg_align ) ;
2019-05-08 10:26:11 +02:00
if ( ret < 0 )
goto err_kfree ;
return gbo ;
err_kfree :
kfree ( gbo ) ;
return ERR_PTR ( ret ) ;
}
EXPORT_SYMBOL ( drm_gem_vram_create ) ;
/**
* drm_gem_vram_put ( ) - Releases a reference to a VRAM - backed GEM object
* @ gbo : the GEM VRAM object
*
* See ttm_bo_put ( ) for more information .
*/
void drm_gem_vram_put ( struct drm_gem_vram_object * gbo )
{
ttm_bo_put ( & gbo - > bo ) ;
}
EXPORT_SYMBOL ( drm_gem_vram_put ) ;
/**
* drm_gem_vram_mmap_offset ( ) - Returns a GEM VRAM object ' s mmap offset
* @ gbo : the GEM VRAM object
*
* See drm_vma_node_offset_addr ( ) for more information .
*
* Returns :
* The buffer object ' s offset for userspace mappings on success , or
* 0 if no offset is allocated .
*/
u64 drm_gem_vram_mmap_offset ( struct drm_gem_vram_object * gbo )
{
2019-08-05 16:01:10 +02:00
return drm_vma_node_offset_addr ( & gbo - > bo . base . vma_node ) ;
2019-05-08 10:26:11 +02:00
}
EXPORT_SYMBOL ( drm_gem_vram_mmap_offset ) ;
/**
* drm_gem_vram_offset ( ) - \
Returns a GEM VRAM object ' s offset in video memory
* @ gbo : the GEM VRAM object
*
* This function returns the buffer object ' s offset in the device ' s video
* memory . The buffer object has to be pinned to % TTM_PL_VRAM .
*
* Returns :
* The buffer object ' s offset in video memory on success , or
* a negative errno code otherwise .
*/
s64 drm_gem_vram_offset ( struct drm_gem_vram_object * gbo )
{
if ( WARN_ON_ONCE ( ! gbo - > pin_count ) )
return ( s64 ) - ENODEV ;
return gbo - > bo . offset ;
}
EXPORT_SYMBOL ( drm_gem_vram_offset ) ;
2019-09-06 14:20:54 +02:00
static int drm_gem_vram_pin_locked ( struct drm_gem_vram_object * gbo ,
unsigned long pl_flag )
2019-05-08 10:26:11 +02:00
{
int i , ret ;
struct ttm_operation_ctx ctx = { false , false } ;
2019-05-16 18:27:46 +02:00
if ( gbo - > pin_count )
goto out ;
2019-05-08 10:26:11 +02:00
2019-06-13 09:30:33 +02:00
if ( pl_flag )
drm_gem_vram_placement ( gbo , pl_flag ) ;
2019-05-08 10:26:11 +02:00
for ( i = 0 ; i < gbo - > placement . num_placement ; + + i )
gbo - > placements [ i ] . flags | = TTM_PL_FLAG_NO_EVICT ;
ret = ttm_bo_validate ( & gbo - > bo , & gbo - > placement , & ctx ) ;
if ( ret < 0 )
2019-09-06 14:20:54 +02:00
return ret ;
2019-05-08 10:26:11 +02:00
2019-05-16 18:27:46 +02:00
out :
+ + gbo - > pin_count ;
2019-05-08 10:26:11 +02:00
return 0 ;
}
/**
2019-09-06 14:20:54 +02:00
* drm_gem_vram_pin ( ) - Pins a GEM VRAM object in a region .
2019-05-08 10:26:11 +02:00
* @ gbo : the GEM VRAM object
2019-09-06 14:20:54 +02:00
* @ pl_flag : a bitmask of possible memory regions
*
* Pinning a buffer object ensures that it is not evicted from
* a memory region . A pinned buffer object has to be unpinned before
* it can be pinned to another region . If the pl_flag argument is 0 ,
* the buffer is pinned at its current location ( video RAM or system
* memory ) .
2019-05-08 10:26:11 +02:00
*
2019-09-23 19:27:42 +02:00
* Small buffer objects , such as cursor images , can lead to memory
* fragmentation if they are pinned in the middle of video RAM . This
* is especially a problem on devices with only a small amount of
* video RAM . Fragmentation can prevent the primary framebuffer from
* fitting in , even though there ' s enough memory overall . The modifier
* DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
* at the high end of the memory region to avoid fragmentation .
*
2019-05-08 10:26:11 +02:00
* Returns :
* 0 on success , or
* a negative error code otherwise .
*/
2019-09-06 14:20:54 +02:00
int drm_gem_vram_pin ( struct drm_gem_vram_object * gbo , unsigned long pl_flag )
2019-05-08 10:26:11 +02:00
{
2019-09-06 14:20:54 +02:00
int ret ;
2019-05-08 10:26:11 +02:00
2019-05-16 18:27:46 +02:00
ret = ttm_bo_reserve ( & gbo - > bo , true , false , NULL ) ;
2019-09-06 14:20:54 +02:00
if ( ret )
2019-05-16 18:27:46 +02:00
return ret ;
2019-09-06 14:20:54 +02:00
ret = drm_gem_vram_pin_locked ( gbo , pl_flag ) ;
ttm_bo_unreserve ( & gbo - > bo ) ;
return ret ;
}
EXPORT_SYMBOL ( drm_gem_vram_pin ) ;
static int drm_gem_vram_unpin_locked ( struct drm_gem_vram_object * gbo )
{
int i , ret ;
struct ttm_operation_ctx ctx = { false , false } ;
2019-05-16 18:27:46 +02:00
2019-05-08 10:26:11 +02:00
if ( WARN_ON_ONCE ( ! gbo - > pin_count ) )
2019-09-06 14:20:54 +02:00
return 0 ;
2019-05-08 10:26:11 +02:00
- - gbo - > pin_count ;
if ( gbo - > pin_count )
2019-09-06 14:20:54 +02:00
return 0 ;
2019-05-08 10:26:11 +02:00
for ( i = 0 ; i < gbo - > placement . num_placement ; + + i )
gbo - > placements [ i ] . flags & = ~ TTM_PL_FLAG_NO_EVICT ;
ret = ttm_bo_validate ( & gbo - > bo , & gbo - > placement , & ctx ) ;
if ( ret < 0 )
2019-09-06 14:20:54 +02:00
return ret ;
2019-05-08 10:26:11 +02:00
return 0 ;
2019-09-06 14:20:54 +02:00
}
2019-05-16 18:27:46 +02:00
2019-09-06 14:20:54 +02:00
/**
* drm_gem_vram_unpin ( ) - Unpins a GEM VRAM object
* @ gbo : the GEM VRAM object
*
* Returns :
* 0 on success , or
* a negative error code otherwise .
*/
int drm_gem_vram_unpin ( struct drm_gem_vram_object * gbo )
{
int ret ;
ret = ttm_bo_reserve ( & gbo - > bo , true , false , NULL ) ;
if ( ret )
return ret ;
ret = drm_gem_vram_unpin_locked ( gbo ) ;
2019-05-16 18:27:46 +02:00
ttm_bo_unreserve ( & gbo - > bo ) ;
2019-09-06 14:20:54 +02:00
2019-05-16 18:27:46 +02:00
return ret ;
2019-05-08 10:26:11 +02:00
}
EXPORT_SYMBOL ( drm_gem_vram_unpin ) ;
2019-09-06 14:20:53 +02:00
static void * drm_gem_vram_kmap_locked ( struct drm_gem_vram_object * gbo ,
bool map , bool * is_iomem )
{
int ret ;
struct ttm_bo_kmap_obj * kmap = & gbo - > kmap ;
if ( gbo - > kmap_use_count > 0 )
goto out ;
if ( kmap - > virtual | | ! map )
goto out ;
ret = ttm_bo_kmap ( & gbo - > bo , 0 , gbo - > bo . num_pages , kmap ) ;
if ( ret )
return ERR_PTR ( ret ) ;
out :
if ( ! kmap - > virtual ) {
if ( is_iomem )
* is_iomem = false ;
return NULL ; /* not mapped; don't increment ref */
}
+ + gbo - > kmap_use_count ;
if ( is_iomem )
return ttm_kmap_obj_virtual ( kmap , is_iomem ) ;
return kmap - > virtual ;
}
2019-05-08 10:26:11 +02:00
/**
2019-06-13 09:30:41 +02:00
* drm_gem_vram_kmap ( ) - Maps a GEM VRAM object into kernel address space
2019-05-08 10:26:11 +02:00
* @ gbo : the GEM VRAM object
* @ map : establish a mapping if necessary
* @ is_iomem : returns true if the mapped memory is I / O memory , or false \
otherwise ; can be NULL
*
* This function maps the buffer object into the kernel ' s address space
* or returns the current mapping . If the parameter map is false , the
* function only queries the current mapping , but does not establish a
* new one .
*
* Returns :
* The buffers virtual address if mapped , or
* NULL if not mapped , or
* an ERR_PTR ( ) - encoded error code otherwise .
*/
2019-06-13 09:30:41 +02:00
void * drm_gem_vram_kmap ( struct drm_gem_vram_object * gbo , bool map ,
bool * is_iomem )
2019-05-08 10:26:11 +02:00
{
int ret ;
2019-09-06 14:20:53 +02:00
void * virtual ;
2019-05-08 10:26:11 +02:00
2019-09-06 14:20:53 +02:00
ret = ttm_bo_reserve ( & gbo - > bo , true , false , NULL ) ;
2019-05-08 10:26:11 +02:00
if ( ret )
return ERR_PTR ( ret ) ;
2019-09-06 14:20:53 +02:00
virtual = drm_gem_vram_kmap_locked ( gbo , map , is_iomem ) ;
ttm_bo_unreserve ( & gbo - > bo ) ;
2019-05-08 10:26:11 +02:00
2019-09-06 14:20:53 +02:00
return virtual ;
2019-05-08 10:26:11 +02:00
}
EXPORT_SYMBOL ( drm_gem_vram_kmap ) ;
2019-09-06 14:20:53 +02:00
static void drm_gem_vram_kunmap_locked ( struct drm_gem_vram_object * gbo )
2019-05-08 10:26:11 +02:00
{
2019-09-06 14:20:53 +02:00
if ( WARN_ON_ONCE ( ! gbo - > kmap_use_count ) )
return ;
if ( - - gbo - > kmap_use_count > 0 )
return ;
2019-09-06 14:20:56 +02:00
/*
* Permanently mapping and unmapping buffers adds overhead from
* updating the page tables and creates debugging output . Therefore ,
* we delay the actual unmap operation until the BO gets evicted
* from memory . See drm_gem_vram_bo_driver_move_notify ( ) .
*/
2019-05-08 10:26:11 +02:00
}
2019-09-06 14:20:53 +02:00
/**
* drm_gem_vram_kunmap ( ) - Unmaps a GEM VRAM object
* @ gbo : the GEM VRAM object
*/
void drm_gem_vram_kunmap ( struct drm_gem_vram_object * gbo )
{
int ret ;
ret = ttm_bo_reserve ( & gbo - > bo , false , false , NULL ) ;
if ( WARN_ONCE ( ret , " ttm_bo_reserve_failed(): ret=%d \n " , ret ) )
return ;
drm_gem_vram_kunmap_locked ( gbo ) ;
ttm_bo_unreserve ( & gbo - > bo ) ;
}
2019-05-08 10:26:11 +02:00
EXPORT_SYMBOL ( drm_gem_vram_kunmap ) ;
2019-05-08 10:26:12 +02:00
2019-09-11 14:03:50 +02:00
/**
* drm_gem_vram_vmap ( ) - Pins and maps a GEM VRAM object into kernel address
* space
* @ gbo : The GEM VRAM object to map
*
* The vmap function pins a GEM VRAM object to its current location , either
* system or video memory , and maps its buffer into kernel address space .
* As pinned object cannot be relocated , you should avoid pinning objects
* permanently . Call drm_gem_vram_vunmap ( ) with the returned address to
* unmap and unpin the GEM VRAM object .
*
* If you have special requirements for the pinning or mapping operations ,
* call drm_gem_vram_pin ( ) and drm_gem_vram_kmap ( ) directly .
*
* Returns :
* The buffer ' s virtual address on success , or
* an ERR_PTR ( ) - encoded error code otherwise .
*/
void * drm_gem_vram_vmap ( struct drm_gem_vram_object * gbo )
{
int ret ;
void * base ;
ret = ttm_bo_reserve ( & gbo - > bo , true , false , NULL ) ;
if ( ret )
return ERR_PTR ( ret ) ;
ret = drm_gem_vram_pin_locked ( gbo , 0 ) ;
if ( ret )
goto err_ttm_bo_unreserve ;
base = drm_gem_vram_kmap_locked ( gbo , true , NULL ) ;
if ( IS_ERR ( base ) ) {
ret = PTR_ERR ( base ) ;
goto err_drm_gem_vram_unpin_locked ;
}
ttm_bo_unreserve ( & gbo - > bo ) ;
return base ;
err_drm_gem_vram_unpin_locked :
drm_gem_vram_unpin_locked ( gbo ) ;
err_ttm_bo_unreserve :
ttm_bo_unreserve ( & gbo - > bo ) ;
return ERR_PTR ( ret ) ;
}
EXPORT_SYMBOL ( drm_gem_vram_vmap ) ;
/**
* drm_gem_vram_vunmap ( ) - Unmaps and unpins a GEM VRAM object
* @ gbo : The GEM VRAM object to unmap
* @ vaddr : The mapping ' s base address as returned by drm_gem_vram_vmap ( )
*
* A call to drm_gem_vram_vunmap ( ) unmaps and unpins a GEM VRAM buffer . See
* the documentation for drm_gem_vram_vmap ( ) for more information .
*/
void drm_gem_vram_vunmap ( struct drm_gem_vram_object * gbo , void * vaddr )
{
int ret ;
ret = ttm_bo_reserve ( & gbo - > bo , false , false , NULL ) ;
if ( WARN_ONCE ( ret , " ttm_bo_reserve_failed(): ret=%d \n " , ret ) )
return ;
drm_gem_vram_kunmap_locked ( gbo ) ;
drm_gem_vram_unpin_locked ( gbo ) ;
ttm_bo_unreserve ( & gbo - > bo ) ;
}
EXPORT_SYMBOL ( drm_gem_vram_vunmap ) ;
2019-05-08 10:26:14 +02:00
/**
* drm_gem_vram_fill_create_dumb ( ) - \
Helper for implementing & struct drm_driver . dumb_create
* @ file : the DRM file
* @ dev : the DRM device
* @ pg_align : the buffer ' s alignment in multiples of the page size
2019-12-03 09:38:17 +01:00
* @ pitch_align : the scanline ' s alignment in powers of 2
2019-05-08 10:26:14 +02:00
* @ args : the arguments as provided to \
& struct drm_driver . dumb_create
*
* This helper function fills & struct drm_mode_create_dumb , which is used
* by & struct drm_driver . dumb_create . Implementations of this interface
* should forwards their arguments to this helper , plus the driver - specific
* parameters .
*
* Returns :
* 0 on success , or
* a negative error code otherwise .
*/
int drm_gem_vram_fill_create_dumb ( struct drm_file * file ,
struct drm_device * dev ,
unsigned long pg_align ,
2019-12-03 09:38:17 +01:00
unsigned long pitch_align ,
2019-05-08 10:26:14 +02:00
struct drm_mode_create_dumb * args )
{
size_t pitch , size ;
struct drm_gem_vram_object * gbo ;
int ret ;
u32 handle ;
2019-12-03 09:38:17 +01:00
pitch = args - > width * DIV_ROUND_UP ( args - > bpp , 8 ) ;
if ( pitch_align ) {
if ( WARN_ON_ONCE ( ! is_power_of_2 ( pitch_align ) ) )
return - EINVAL ;
pitch = ALIGN ( pitch , pitch_align ) ;
}
2019-05-08 10:26:14 +02:00
size = pitch * args - > height ;
size = roundup ( size , PAGE_SIZE ) ;
if ( ! size )
return - EINVAL ;
2020-01-06 13:57:44 +01:00
gbo = drm_gem_vram_create ( dev , size , pg_align ) ;
2019-05-08 10:26:14 +02:00
if ( IS_ERR ( gbo ) )
return PTR_ERR ( gbo ) ;
2019-08-05 16:01:04 +02:00
ret = drm_gem_handle_create ( file , & gbo - > bo . base , & handle ) ;
2019-05-08 10:26:14 +02:00
if ( ret )
goto err_drm_gem_object_put_unlocked ;
2019-08-05 16:01:04 +02:00
drm_gem_object_put_unlocked ( & gbo - > bo . base ) ;
2019-05-08 10:26:14 +02:00
args - > pitch = pitch ;
args - > size = size ;
args - > handle = handle ;
return 0 ;
err_drm_gem_object_put_unlocked :
2019-08-05 16:01:04 +02:00
drm_gem_object_put_unlocked ( & gbo - > bo . base ) ;
2019-05-08 10:26:14 +02:00
return ret ;
}
EXPORT_SYMBOL ( drm_gem_vram_fill_create_dumb ) ;
2019-05-08 10:26:12 +02:00
/*
* Helpers for struct ttm_bo_driver
*/
static bool drm_is_gem_vram ( struct ttm_buffer_object * bo )
{
return ( bo - > destroy = = ttm_buffer_object_destroy ) ;
}
2019-09-11 13:09:08 +02:00
static void drm_gem_vram_bo_driver_evict_flags ( struct drm_gem_vram_object * gbo ,
struct ttm_placement * pl )
2019-05-08 10:26:12 +02:00
{
drm_gem_vram_placement ( gbo , TTM_PL_FLAG_SYSTEM ) ;
* pl = gbo - > placement ;
}
2019-09-11 13:09:08 +02:00
static void drm_gem_vram_bo_driver_move_notify ( struct drm_gem_vram_object * gbo ,
bool evict ,
struct ttm_mem_reg * new_mem )
2019-09-06 14:20:56 +02:00
{
2019-09-11 13:09:08 +02:00
struct ttm_bo_kmap_obj * kmap = & gbo - > kmap ;
2019-09-06 14:20:56 +02:00
if ( WARN_ON_ONCE ( gbo - > kmap_use_count ) )
return ;
if ( ! kmap - > virtual )
return ;
ttm_bo_kunmap ( kmap ) ;
kmap - > virtual = NULL ;
}
2019-05-08 10:26:17 +02:00
2019-05-08 10:26:13 +02:00
/*
2019-07-02 13:50:12 +02:00
* Helpers for struct drm_gem_object_funcs
2019-05-08 10:26:13 +02:00
*/
/**
2019-07-02 13:50:12 +02:00
* drm_gem_vram_object_free ( ) - \
Implements & struct drm_gem_object_funcs . free
* @ gem : GEM object . Refers to & struct drm_gem_vram_object . gem
2019-05-08 10:26:13 +02:00
*/
2019-07-02 13:50:12 +02:00
static void drm_gem_vram_object_free ( struct drm_gem_object * gem )
2019-05-08 10:26:13 +02:00
{
struct drm_gem_vram_object * gbo = drm_gem_vram_of_gem ( gem ) ;
drm_gem_vram_put ( gbo ) ;
}
2019-07-02 13:50:12 +02:00
/*
* Helpers for dump buffers
*/
2019-05-08 10:26:13 +02:00
2019-05-08 10:26:18 +02:00
/**
* drm_gem_vram_driver_create_dumb ( ) - \
Implements & struct drm_driver . dumb_create
* @ file : the DRM file
* @ dev : the DRM device
* @ args : the arguments as provided to \
& struct drm_driver . dumb_create
*
* This function requires the driver to use @ drm_device . vram_mm for its
* instance of VRAM MM .
*
* Returns :
* 0 on success , or
* a negative error code otherwise .
*/
int drm_gem_vram_driver_dumb_create ( struct drm_file * file ,
struct drm_device * dev ,
struct drm_mode_create_dumb * args )
{
if ( WARN_ONCE ( ! dev - > vram_mm , " VRAM MM not initialized " ) )
return - EINVAL ;
2020-01-06 13:57:44 +01:00
return drm_gem_vram_fill_create_dumb ( file , dev , 0 , 0 , args ) ;
2019-05-08 10:26:18 +02:00
}
EXPORT_SYMBOL ( drm_gem_vram_driver_dumb_create ) ;
2019-05-08 10:26:13 +02:00
/**
* drm_gem_vram_driver_dumb_mmap_offset ( ) - \
Implements & struct drm_driver . dumb_mmap_offset
* @ file : DRM file pointer .
* @ dev : DRM device .
* @ handle : GEM handle
* @ offset : Returns the mapping ' s memory offset on success
*
* Returns :
* 0 on success , or
* a negative errno code otherwise .
*/
int drm_gem_vram_driver_dumb_mmap_offset ( struct drm_file * file ,
struct drm_device * dev ,
uint32_t handle , uint64_t * offset )
{
struct drm_gem_object * gem ;
struct drm_gem_vram_object * gbo ;
gem = drm_gem_object_lookup ( file , handle ) ;
if ( ! gem )
return - ENOENT ;
gbo = drm_gem_vram_of_gem ( gem ) ;
* offset = drm_gem_vram_mmap_offset ( gbo ) ;
drm_gem_object_put_unlocked ( gem ) ;
return 0 ;
}
EXPORT_SYMBOL ( drm_gem_vram_driver_dumb_mmap_offset ) ;
2019-05-08 10:26:15 +02:00
2019-10-24 10:14:01 +02:00
/*
* Helpers for struct drm_plane_helper_funcs
*/
/**
* drm_gem_vram_plane_helper_prepare_fb ( ) - \
* Implements & struct drm_plane_helper_funcs . prepare_fb
* @ plane : a DRM plane
* @ new_state : the plane ' s new state
*
* During plane updates , this function pins the GEM VRAM
* objects of the plane ' s new framebuffer to VRAM . Call
* drm_gem_vram_plane_helper_cleanup_fb ( ) to unpin them .
*
* Returns :
* 0 on success , or
* a negative errno code otherwise .
*/
int
drm_gem_vram_plane_helper_prepare_fb ( struct drm_plane * plane ,
struct drm_plane_state * new_state )
{
size_t i ;
struct drm_gem_vram_object * gbo ;
int ret ;
if ( ! new_state - > fb )
return 0 ;
for ( i = 0 ; i < ARRAY_SIZE ( new_state - > fb - > obj ) ; + + i ) {
if ( ! new_state - > fb - > obj [ i ] )
continue ;
gbo = drm_gem_vram_of_gem ( new_state - > fb - > obj [ i ] ) ;
ret = drm_gem_vram_pin ( gbo , DRM_GEM_VRAM_PL_FLAG_VRAM ) ;
if ( ret )
goto err_drm_gem_vram_unpin ;
}
return 0 ;
err_drm_gem_vram_unpin :
while ( i ) {
- - i ;
gbo = drm_gem_vram_of_gem ( new_state - > fb - > obj [ i ] ) ;
drm_gem_vram_unpin ( gbo ) ;
}
return ret ;
}
EXPORT_SYMBOL ( drm_gem_vram_plane_helper_prepare_fb ) ;
/**
* drm_gem_vram_plane_helper_cleanup_fb ( ) - \
* Implements & struct drm_plane_helper_funcs . cleanup_fb
* @ plane : a DRM plane
* @ old_state : the plane ' s old state
*
* During plane updates , this function unpins the GEM VRAM
* objects of the plane ' s old framebuffer from VRAM . Complements
* drm_gem_vram_plane_helper_prepare_fb ( ) .
*/
void
drm_gem_vram_plane_helper_cleanup_fb ( struct drm_plane * plane ,
struct drm_plane_state * old_state )
{
size_t i ;
struct drm_gem_vram_object * gbo ;
if ( ! old_state - > fb )
return ;
for ( i = 0 ; i < ARRAY_SIZE ( old_state - > fb - > obj ) ; + + i ) {
if ( ! old_state - > fb - > obj [ i ] )
continue ;
gbo = drm_gem_vram_of_gem ( old_state - > fb - > obj [ i ] ) ;
drm_gem_vram_unpin ( gbo ) ;
}
}
EXPORT_SYMBOL ( drm_gem_vram_plane_helper_cleanup_fb ) ;
/*
* Helpers for struct drm_simple_display_pipe_funcs
*/
/**
* drm_gem_vram_simple_display_pipe_prepare_fb ( ) - \
* Implements & struct drm_simple_display_pipe_funcs . prepare_fb
* @ pipe : a simple display pipe
* @ new_state : the plane ' s new state
*
* During plane updates , this function pins the GEM VRAM
* objects of the plane ' s new framebuffer to VRAM . Call
* drm_gem_vram_simple_display_pipe_cleanup_fb ( ) to unpin them .
*
* Returns :
* 0 on success , or
* a negative errno code otherwise .
*/
int drm_gem_vram_simple_display_pipe_prepare_fb (
struct drm_simple_display_pipe * pipe ,
struct drm_plane_state * new_state )
{
return drm_gem_vram_plane_helper_prepare_fb ( & pipe - > plane , new_state ) ;
}
EXPORT_SYMBOL ( drm_gem_vram_simple_display_pipe_prepare_fb ) ;
/**
* drm_gem_vram_simple_display_pipe_cleanup_fb ( ) - \
* Implements & struct drm_simple_display_pipe_funcs . cleanup_fb
* @ pipe : a simple display pipe
* @ old_state : the plane ' s old state
*
* During plane updates , this function unpins the GEM VRAM
* objects of the plane ' s old framebuffer from VRAM . Complements
* drm_gem_vram_simple_display_pipe_prepare_fb ( ) .
*/
void drm_gem_vram_simple_display_pipe_cleanup_fb (
struct drm_simple_display_pipe * pipe ,
struct drm_plane_state * old_state )
{
drm_gem_vram_plane_helper_cleanup_fb ( & pipe - > plane , old_state ) ;
}
EXPORT_SYMBOL ( drm_gem_vram_simple_display_pipe_cleanup_fb ) ;
2019-05-08 10:26:15 +02:00
/*
2019-07-02 13:50:12 +02:00
* PRIME helpers
2019-05-08 10:26:15 +02:00
*/
/**
2019-07-02 13:50:12 +02:00
* drm_gem_vram_object_pin ( ) - \
Implements & struct drm_gem_object_funcs . pin
2019-05-08 10:26:15 +02:00
* @ gem : The GEM object to pin
*
* Returns :
* 0 on success , or
* a negative errno code otherwise .
*/
2019-07-02 13:50:12 +02:00
static int drm_gem_vram_object_pin ( struct drm_gem_object * gem )
2019-05-08 10:26:15 +02:00
{
struct drm_gem_vram_object * gbo = drm_gem_vram_of_gem ( gem ) ;
2019-06-13 09:30:33 +02:00
/* Fbdev console emulation is the use case of these PRIME
* helpers . This may involve updating a hardware buffer from
* a shadow FB . We pin the buffer to it ' s current location
* ( either video RAM or system memory ) to prevent it from
* being relocated during the update operation . If you require
* the buffer to be pinned to VRAM , implement a callback that
* sets the flags accordingly .
*/
return drm_gem_vram_pin ( gbo , 0 ) ;
2019-05-08 10:26:15 +02:00
}
/**
2019-07-02 13:50:12 +02:00
* drm_gem_vram_object_unpin ( ) - \
Implements & struct drm_gem_object_funcs . unpin
2019-05-08 10:26:15 +02:00
* @ gem : The GEM object to unpin
*/
2019-07-02 13:50:12 +02:00
static void drm_gem_vram_object_unpin ( struct drm_gem_object * gem )
2019-05-08 10:26:15 +02:00
{
struct drm_gem_vram_object * gbo = drm_gem_vram_of_gem ( gem ) ;
drm_gem_vram_unpin ( gbo ) ;
}
/**
2019-07-02 13:50:12 +02:00
* drm_gem_vram_object_vmap ( ) - \
Implements & struct drm_gem_object_funcs . vmap
2019-05-08 10:26:15 +02:00
* @ gem : The GEM object to map
*
* Returns :
* The buffers virtual address on success , or
* NULL otherwise .
*/
2019-07-02 13:50:12 +02:00
static void * drm_gem_vram_object_vmap ( struct drm_gem_object * gem )
2019-05-08 10:26:15 +02:00
{
struct drm_gem_vram_object * gbo = drm_gem_vram_of_gem ( gem ) ;
void * base ;
2019-09-11 14:03:50 +02:00
base = drm_gem_vram_vmap ( gbo ) ;
if ( IS_ERR ( base ) )
return NULL ;
2019-05-08 10:26:15 +02:00
return base ;
}
/**
2019-07-02 13:50:12 +02:00
* drm_gem_vram_object_vunmap ( ) - \
Implements & struct drm_gem_object_funcs . vunmap
2019-05-08 10:26:15 +02:00
* @ gem : The GEM object to unmap
* @ vaddr : The mapping ' s base address
*/
2019-07-02 13:50:12 +02:00
static void drm_gem_vram_object_vunmap ( struct drm_gem_object * gem ,
void * vaddr )
2019-05-08 10:26:15 +02:00
{
struct drm_gem_vram_object * gbo = drm_gem_vram_of_gem ( gem ) ;
2019-09-06 14:20:54 +02:00
2019-09-11 14:03:50 +02:00
drm_gem_vram_vunmap ( gbo , vaddr ) ;
2019-05-08 10:26:15 +02:00
}
2019-07-02 13:50:08 +02:00
/*
* GEM object funcs
*/
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
2019-07-02 13:50:12 +02:00
. free = drm_gem_vram_object_free ,
. pin = drm_gem_vram_object_pin ,
. unpin = drm_gem_vram_object_unpin ,
. vmap = drm_gem_vram_object_vmap ,
2019-09-04 07:47:36 +02:00
. vunmap = drm_gem_vram_object_vunmap ,
2019-10-16 13:52:01 +02:00
. mmap = drm_gem_ttm_mmap ,
2019-09-04 07:47:36 +02:00
. print_info = drm_gem_ttm_print_info ,
2019-07-02 13:50:08 +02:00
} ;
2019-09-11 13:09:07 +02:00
/*
* VRAM memory manager
*/
/*
* TTM TT
*/
static void backend_func_destroy ( struct ttm_tt * tt )
{
ttm_tt_fini ( tt ) ;
kfree ( tt ) ;
}
static struct ttm_backend_func backend_func = {
. destroy = backend_func_destroy
} ;
/*
* TTM BO device
*/
static struct ttm_tt * bo_driver_ttm_tt_create ( struct ttm_buffer_object * bo ,
uint32_t page_flags )
{
struct ttm_tt * tt ;
int ret ;
tt = kzalloc ( sizeof ( * tt ) , GFP_KERNEL ) ;
if ( ! tt )
return NULL ;
tt - > func = & backend_func ;
ret = ttm_tt_init ( tt , bo , page_flags ) ;
if ( ret < 0 )
goto err_ttm_tt_init ;
return tt ;
err_ttm_tt_init :
kfree ( tt ) ;
return NULL ;
}
static int bo_driver_init_mem_type ( struct ttm_bo_device * bdev , uint32_t type ,
struct ttm_mem_type_manager * man )
{
switch ( type ) {
case TTM_PL_SYSTEM :
man - > flags = TTM_MEMTYPE_FLAG_MAPPABLE ;
man - > available_caching = TTM_PL_MASK_CACHING ;
man - > default_caching = TTM_PL_FLAG_CACHED ;
break ;
case TTM_PL_VRAM :
man - > func = & ttm_bo_manager_func ;
man - > flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE ;
man - > available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC ;
man - > default_caching = TTM_PL_FLAG_WC ;
break ;
default :
return - EINVAL ;
}
return 0 ;
}
static void bo_driver_evict_flags ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement )
{
2019-09-11 13:09:08 +02:00
struct drm_gem_vram_object * gbo ;
2019-09-11 13:09:07 +02:00
2019-09-11 13:09:08 +02:00
/* TTM may pass BOs that are not GEM VRAM BOs. */
if ( ! drm_is_gem_vram ( bo ) )
return ;
gbo = drm_gem_vram_of_bo ( bo ) ;
drm_gem_vram_bo_driver_evict_flags ( gbo , placement ) ;
2019-09-11 13:09:07 +02:00
}
static void bo_driver_move_notify ( struct ttm_buffer_object * bo ,
bool evict ,
struct ttm_mem_reg * new_mem )
{
2019-09-11 13:09:08 +02:00
struct drm_gem_vram_object * gbo ;
2019-09-11 13:09:07 +02:00
2019-09-11 13:09:08 +02:00
/* TTM may pass BOs that are not GEM VRAM BOs. */
if ( ! drm_is_gem_vram ( bo ) )
2019-09-11 13:09:07 +02:00
return ;
2019-09-11 13:09:08 +02:00
gbo = drm_gem_vram_of_bo ( bo ) ;
drm_gem_vram_bo_driver_move_notify ( gbo , evict , new_mem ) ;
2019-09-11 13:09:07 +02:00
}
static int bo_driver_io_mem_reserve ( struct ttm_bo_device * bdev ,
struct ttm_mem_reg * mem )
{
struct ttm_mem_type_manager * man = bdev - > man + mem - > mem_type ;
struct drm_vram_mm * vmm = drm_vram_mm_of_bdev ( bdev ) ;
if ( ! ( man - > flags & TTM_MEMTYPE_FLAG_MAPPABLE ) )
return - EINVAL ;
mem - > bus . addr = NULL ;
mem - > bus . size = mem - > num_pages < < PAGE_SHIFT ;
switch ( mem - > mem_type ) {
case TTM_PL_SYSTEM : /* nothing to do */
mem - > bus . offset = 0 ;
mem - > bus . base = 0 ;
mem - > bus . is_iomem = false ;
break ;
case TTM_PL_VRAM :
mem - > bus . offset = mem - > start < < PAGE_SHIFT ;
mem - > bus . base = vmm - > vram_base ;
mem - > bus . is_iomem = true ;
break ;
default :
return - EINVAL ;
}
return 0 ;
}
static void bo_driver_io_mem_free ( struct ttm_bo_device * bdev ,
struct ttm_mem_reg * mem )
{ }
static struct ttm_bo_driver bo_driver = {
. ttm_tt_create = bo_driver_ttm_tt_create ,
. ttm_tt_populate = ttm_pool_populate ,
. ttm_tt_unpopulate = ttm_pool_unpopulate ,
. init_mem_type = bo_driver_init_mem_type ,
. eviction_valuable = ttm_bo_eviction_valuable ,
. evict_flags = bo_driver_evict_flags ,
. move_notify = bo_driver_move_notify ,
. io_mem_reserve = bo_driver_io_mem_reserve ,
. io_mem_free = bo_driver_io_mem_free ,
} ;
/*
* struct drm_vram_mm
*/
# if defined(CONFIG_DEBUG_FS)
static int drm_vram_mm_debugfs ( struct seq_file * m , void * data )
{
struct drm_info_node * node = ( struct drm_info_node * ) m - > private ;
struct drm_vram_mm * vmm = node - > minor - > dev - > vram_mm ;
struct drm_mm * mm = vmm - > bdev . man [ TTM_PL_VRAM ] . priv ;
struct drm_printer p = drm_seq_file_printer ( m ) ;
2019-09-25 11:38:50 +02:00
spin_lock ( & ttm_bo_glob . lru_lock ) ;
2019-09-11 13:09:07 +02:00
drm_mm_print ( mm , & p ) ;
2019-09-25 11:38:50 +02:00
spin_unlock ( & ttm_bo_glob . lru_lock ) ;
2019-09-11 13:09:07 +02:00
return 0 ;
}
static const struct drm_info_list drm_vram_mm_debugfs_list [ ] = {
{ " vram-mm " , drm_vram_mm_debugfs , 0 , NULL } ,
} ;
# endif
/**
* drm_vram_mm_debugfs_init ( ) - Register VRAM MM debugfs file .
*
* @ minor : drm minor device .
*
* Returns :
* 0 on success , or
* a negative error code otherwise .
*/
int drm_vram_mm_debugfs_init ( struct drm_minor * minor )
{
int ret = 0 ;
# if defined(CONFIG_DEBUG_FS)
ret = drm_debugfs_create_files ( drm_vram_mm_debugfs_list ,
ARRAY_SIZE ( drm_vram_mm_debugfs_list ) ,
minor - > debugfs_root , minor ) ;
# endif
return ret ;
}
EXPORT_SYMBOL ( drm_vram_mm_debugfs_init ) ;
2019-09-11 13:09:09 +02:00
static int drm_vram_mm_init ( struct drm_vram_mm * vmm , struct drm_device * dev ,
uint64_t vram_base , size_t vram_size )
2019-09-11 13:09:07 +02:00
{
int ret ;
vmm - > vram_base = vram_base ;
vmm - > vram_size = vram_size ;
ret = ttm_bo_device_init ( & vmm - > bdev , & bo_driver ,
dev - > anon_inode - > i_mapping ,
dev - > vma_offset_manager ,
true ) ;
if ( ret )
return ret ;
ret = ttm_bo_init_mm ( & vmm - > bdev , TTM_PL_VRAM , vram_size > > PAGE_SHIFT ) ;
if ( ret )
return ret ;
return 0 ;
}
2019-09-11 13:09:09 +02:00
static void drm_vram_mm_cleanup ( struct drm_vram_mm * vmm )
2019-09-11 13:09:07 +02:00
{
ttm_bo_device_release ( & vmm - > bdev ) ;
}
/*
* Helpers for integration with struct drm_device
*/
/**
* drm_vram_helper_alloc_mm - Allocates a device ' s instance of \
& struct drm_vram_mm
* @ dev : the DRM device
* @ vram_base : the base address of the video memory
* @ vram_size : the size of the video memory in bytes
*
* Returns :
* The new instance of & struct drm_vram_mm on success , or
* an ERR_PTR ( ) - encoded errno code otherwise .
*/
struct drm_vram_mm * drm_vram_helper_alloc_mm (
2019-09-11 13:09:08 +02:00
struct drm_device * dev , uint64_t vram_base , size_t vram_size )
2019-09-11 13:09:07 +02:00
{
int ret ;
if ( WARN_ON ( dev - > vram_mm ) )
return dev - > vram_mm ;
dev - > vram_mm = kzalloc ( sizeof ( * dev - > vram_mm ) , GFP_KERNEL ) ;
if ( ! dev - > vram_mm )
return ERR_PTR ( - ENOMEM ) ;
2019-09-11 13:09:08 +02:00
ret = drm_vram_mm_init ( dev - > vram_mm , dev , vram_base , vram_size ) ;
2019-09-11 13:09:07 +02:00
if ( ret )
goto err_kfree ;
return dev - > vram_mm ;
err_kfree :
kfree ( dev - > vram_mm ) ;
dev - > vram_mm = NULL ;
return ERR_PTR ( ret ) ;
}
EXPORT_SYMBOL ( drm_vram_helper_alloc_mm ) ;
/**
* drm_vram_helper_release_mm - Releases a device ' s instance of \
& struct drm_vram_mm
* @ dev : the DRM device
*/
void drm_vram_helper_release_mm ( struct drm_device * dev )
{
if ( ! dev - > vram_mm )
return ;
drm_vram_mm_cleanup ( dev - > vram_mm ) ;
kfree ( dev - > vram_mm ) ;
dev - > vram_mm = NULL ;
}
EXPORT_SYMBOL ( drm_vram_helper_release_mm ) ;
2020-02-03 16:52:55 +01:00
/*
* Mode - config helpers
*/
static enum drm_mode_status
drm_vram_helper_mode_valid_internal ( struct drm_device * dev ,
const struct drm_display_mode * mode ,
unsigned long max_bpp )
{
struct drm_vram_mm * vmm = dev - > vram_mm ;
unsigned long fbsize , fbpages , max_fbpages ;
if ( WARN_ON ( ! dev - > vram_mm ) )
return MODE_BAD ;
max_fbpages = ( vmm - > vram_size / 2 ) > > PAGE_SHIFT ;
fbsize = mode - > hdisplay * mode - > vdisplay * max_bpp ;
fbpages = DIV_ROUND_UP ( fbsize , PAGE_SIZE ) ;
if ( fbpages > max_fbpages )
return MODE_MEM ;
return MODE_OK ;
}
/**
* drm_vram_helper_mode_valid - Tests if a display mode ' s
* framebuffer fits into the available video memory .
* @ dev : the DRM device
* @ mode : the mode to test
*
* This function tests if enough video memory is available for using the
* specified display mode . Atomic modesetting requires importing the
* designated framebuffer into video memory before evicting the active
* one . Hence , any framebuffer may consume at most half of the available
* VRAM . Display modes that require a larger framebuffer can not be used ,
* even if the CRTC does support them . Each framebuffer is assumed to
* have 32 - bit color depth .
*
* Note :
* The function can only test if the display mode is supported in
* general . If there are too many framebuffers pinned to video memory ,
* a display mode may still not be usable in practice . The color depth of
* 32 - bit fits all current use case . A more flexible test can be added
* when necessary .
*
* Returns :
* MODE_OK if the display mode is supported , or an error code of type
* enum drm_mode_status otherwise .
*/
enum drm_mode_status
drm_vram_helper_mode_valid ( struct drm_device * dev ,
const struct drm_display_mode * mode )
{
static const unsigned long max_bpp = 4 ; /* DRM_FORMAT_XRGB8888 */
return drm_vram_helper_mode_valid_internal ( dev , mode , max_bpp ) ;
}
EXPORT_SYMBOL ( drm_vram_helper_mode_valid ) ;