2014-09-23 17:46:53 +04:00
# ifndef __DRM_GEM_H__
# define __DRM_GEM_H__
/*
* GEM Graphics Execution Manager Driver Interfaces
*
* Copyright 1999 Precision Insight , Inc . , Cedar Park , Texas .
* Copyright 2000 VA Linux Systems , Inc . , Sunnyvale , California .
* Copyright ( c ) 2009 - 2010 , Code Aurora Forum .
* All rights reserved .
* Copyright © 2014 Intel Corporation
* Daniel Vetter < daniel . vetter @ ffwll . ch >
*
* Author : Rickard E . ( Rik ) Faith < faith @ valinux . com >
* Author : Gareth Hughes < gareth @ valinux . com >
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* VA LINUX SYSTEMS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
2017-03-08 17:12:35 +03:00
# include <linux/kref.h>
2019-08-11 11:06:32 +03:00
# include <linux/dma-resv.h>
2023-07-20 03:14:22 +03:00
# include <linux/list.h>
# include <linux/mutex.h>
2017-03-08 17:12:35 +03:00
# include <drm/drm_vma_manager.h>
2022-02-04 20:05:41 +03:00
struct iosys_map ;
2018-11-10 17:56:45 +03:00
struct drm_gem_object ;
2023-05-24 18:59:35 +03:00
/**
* enum drm_gem_object_status - bitmask of object state for fdinfo reporting
* @ DRM_GEM_OBJECT_RESIDENT : object is resident in memory ( ie . not unpinned )
* @ DRM_GEM_OBJECT_PURGEABLE : object marked as purgeable by userspace
*
* Bitmask of status used for fdinfo memory stats , see & drm_gem_object_funcs . status
* and drm_show_fdinfo ( ) . Note that an object can DRM_GEM_OBJECT_PURGEABLE if
* it still active or not resident , in which case drm_show_fdinfo ( ) will not
* account for it as purgeable . So drivers do not need to check if the buffer
* is idle and resident to return this bit . ( Ie . userspace can mark a buffer
* as purgeable even while it is still busy on the GPU . . it does not _actually_
* become puregeable until it becomes idle . The status gem object func does
* not need to consider this . )
*/
enum drm_gem_object_status {
DRM_GEM_OBJECT_RESIDENT = BIT ( 0 ) ,
DRM_GEM_OBJECT_PURGEABLE = BIT ( 1 ) ,
} ;
2018-11-10 17:56:45 +03:00
/**
* struct drm_gem_object_funcs - GEM object functions
*/
struct drm_gem_object_funcs {
/**
* @ free :
*
* Deconstructor for drm_gem_objects .
*
* This callback is mandatory .
*/
void ( * free ) ( struct drm_gem_object * obj ) ;
/**
* @ open :
*
* Called upon GEM handle creation .
*
* This callback is optional .
*/
int ( * open ) ( struct drm_gem_object * obj , struct drm_file * file ) ;
/**
* @ close :
*
* Called upon GEM handle release .
*
* This callback is optional .
*/
void ( * close ) ( struct drm_gem_object * obj , struct drm_file * file ) ;
/**
* @ print_info :
*
* If driver subclasses struct & drm_gem_object , it can implement this
* optional hook for printing additional driver specific info .
*
* drm_printf_indent ( ) should be used in the callback passing it the
* indent argument .
*
* This callback is called from drm_gem_print_info ( ) .
*
* This callback is optional .
*/
void ( * print_info ) ( struct drm_printer * p , unsigned int indent ,
const struct drm_gem_object * obj ) ;
/**
* @ export :
*
* Export backing buffer as a & dma_buf .
* If this is not set drm_gem_prime_export ( ) is used .
*
* This callback is optional .
*/
struct dma_buf * ( * export ) ( struct drm_gem_object * obj , int flags ) ;
/**
* @ pin :
*
2019-06-20 15:46:15 +03:00
* Pin backing buffer in memory . Used by the drm_gem_map_attach ( ) helper .
2018-11-10 17:56:45 +03:00
*
* This callback is optional .
*/
int ( * pin ) ( struct drm_gem_object * obj ) ;
/**
* @ unpin :
*
2019-06-20 15:46:15 +03:00
* Unpin backing buffer . Used by the drm_gem_map_detach ( ) helper .
2018-11-10 17:56:45 +03:00
*
* This callback is optional .
*/
void ( * unpin ) ( struct drm_gem_object * obj ) ;
/**
* @ get_sg_table :
*
* Returns a Scatter - Gather table representation of the buffer .
2019-06-20 15:46:15 +03:00
* Used when exporting a buffer by the drm_gem_map_dma_buf ( ) helper .
* Releasing is done by calling dma_unmap_sg_attrs ( ) and sg_free_table ( )
* in drm_gem_unmap_buf ( ) , therefore these helpers and this callback
* here cannot be used for sg tables pointing at driver private memory
* ranges .
2018-11-10 17:56:45 +03:00
*
2019-06-20 15:46:15 +03:00
* See also drm_prime_pages_to_sg ( ) .
2018-11-10 17:56:45 +03:00
*/
struct sg_table * ( * get_sg_table ) ( struct drm_gem_object * obj ) ;
/**
* @ vmap :
*
2019-06-20 15:46:15 +03:00
* Returns a virtual address for the buffer . Used by the
* drm_gem_dmabuf_vmap ( ) helper .
2018-11-10 17:56:45 +03:00
*
* This callback is optional .
*/
2022-02-04 20:05:41 +03:00
int ( * vmap ) ( struct drm_gem_object * obj , struct iosys_map * map ) ;
2018-11-10 17:56:45 +03:00
/**
* @ vunmap :
*
2020-07-15 08:23:45 +03:00
* Releases the address previously returned by @ vmap . Used by the
2019-06-20 15:46:15 +03:00
* drm_gem_dmabuf_vunmap ( ) helper .
2018-11-10 17:56:45 +03:00
*
* This callback is optional .
*/
2022-02-04 20:05:41 +03:00
void ( * vunmap ) ( struct drm_gem_object * obj , struct iosys_map * map ) ;
2018-11-10 17:56:45 +03:00
2019-10-16 14:51:53 +03:00
/**
* @ mmap :
*
* Handle mmap ( ) of the gem object , setup vma accordingly .
*
* This callback is optional .
*
2020-07-15 08:23:45 +03:00
* The callback is used by both drm_gem_mmap_obj ( ) and
2019-10-16 14:51:53 +03:00
* drm_gem_prime_mmap ( ) . When @ mmap is present @ vm_ops is not
2019-11-27 12:25:22 +03:00
* used , the @ mmap callback must set vma - > vm_ops instead .
2019-10-16 14:51:53 +03:00
*/
int ( * mmap ) ( struct drm_gem_object * obj , struct vm_area_struct * vma ) ;
2022-11-16 17:07:50 +03:00
/**
* @ evict :
*
* Evicts gem object out from memory . Used by the drm_gem_object_evict ( )
* helper . Returns 0 on success , - errno otherwise .
*
* This callback is optional .
*/
int ( * evict ) ( struct drm_gem_object * obj ) ;
2023-05-24 18:59:35 +03:00
/**
* @ status :
*
* The optional status callback can return additional object state
* which determines which stats the object is counted against . The
* callback is called under table_lock . Racing against object status
* change is " harmless " , and the callback can expect to not race
* against object destruction .
*
* Called by drm_show_memory_stats ( ) .
*/
enum drm_gem_object_status ( * status ) ( struct drm_gem_object * obj ) ;
2023-09-29 21:14:30 +03:00
/**
* @ rss :
*
* Return resident size of the object in physical memory .
*
* Called by drm_show_memory_stats ( ) .
*/
size_t ( * rss ) ( struct drm_gem_object * obj ) ;
2018-11-10 17:56:45 +03:00
/**
* @ vm_ops :
*
* Virtual memory operations used with mmap .
*
* This is optional but necessary for mmap support .
*/
const struct vm_operations_struct * vm_ops ;
} ;
2022-08-02 18:51:42 +03:00
/**
* struct drm_gem_lru - A simple LRU helper
*
* A helper for tracking GEM objects in a given state , to aid in
* driver ' s shrinker implementation . Tracks the count of pages
* for lockless & shrinker . count_objects , and provides
* & drm_gem_lru_scan for driver ' s & shrinker . scan_objects
* implementation .
*/
struct drm_gem_lru {
/**
* @ lock :
*
* Lock protecting movement of GEM objects between LRUs . All
* LRUs that the object can move between should be protected
* by the same lock .
*/
struct mutex * lock ;
/**
* @ count :
*
* The total number of backing pages of the GEM objects in
* this LRU .
*/
long count ;
/**
* @ list :
*
* The LRU list .
*/
struct list_head list ;
} ;
2014-09-23 17:46:53 +04:00
/**
2015-10-22 20:11:27 +03:00
* struct drm_gem_object - GEM buffer object
*
* This structure defines the generic parts for GEM buffer objects , which are
* mostly around handling mmap and userspace handles .
*
* Buffer objects are often abbreviated to BO .
2014-09-23 17:46:53 +04:00
*/
struct drm_gem_object {
2015-10-22 20:11:27 +03:00
/**
* @ refcount :
*
* Reference count of this object
*
2020-05-15 12:50:51 +03:00
* Please use drm_gem_object_get ( ) to acquire and drm_gem_object_put_locked ( )
2020-05-15 12:50:53 +03:00
* or drm_gem_object_put ( ) to release a reference to a GEM
2017-02-28 17:46:41 +03:00
* buffer object .
2015-10-22 20:11:27 +03:00
*/
2014-09-23 17:46:53 +04:00
struct kref refcount ;
/**
2015-10-22 20:11:27 +03:00
* @ handle_count :
*
* This is the GEM file_priv handle count of this object .
2014-09-23 17:46:53 +04:00
*
* Each handle also holds a reference . Note that when the handle_count
* drops to 0 any global names ( e . g . the id in the flink namespace ) will
* be cleared .
*
2017-01-25 09:26:46 +03:00
* Protected by & drm_device . object_name_lock .
2015-10-22 20:11:27 +03:00
*/
2014-09-23 17:46:53 +04:00
unsigned handle_count ;
2015-10-22 20:11:27 +03:00
/**
* @ dev : DRM dev this object belongs to .
*/
2014-09-23 17:46:53 +04:00
struct drm_device * dev ;
2015-10-22 20:11:27 +03:00
/**
* @ filp :
*
* SHMEM file node used as backing storage for swappable buffer objects .
* GEM also supports driver private objects with driver - specific backing
2022-08-02 03:04:03 +03:00
* storage ( contiguous DMA memory , special reserved blocks ) . In this
2015-10-22 20:11:27 +03:00
* case @ filp is NULL .
*/
2014-09-23 17:46:53 +04:00
struct file * filp ;
2015-10-22 20:11:27 +03:00
/**
* @ vma_node :
*
* Mapping info for this object to support mmap . Drivers are supposed to
* allocate the mmap offset using drm_gem_create_mmap_offset ( ) . The
* offset itself can be retrieved using drm_vma_node_offset_addr ( ) .
*
* Memory mapping itself is handled by drm_gem_mmap ( ) , which also checks
* that userspace is allowed to access the object .
*/
2014-09-23 17:46:53 +04:00
struct drm_vma_offset_node vma_node ;
/**
2015-10-22 20:11:27 +03:00
* @ size :
*
2014-09-23 17:46:53 +04:00
* Size of the object , in bytes . Immutable over the object ' s
* lifetime .
*/
size_t size ;
/**
2015-10-22 20:11:27 +03:00
* @ name :
*
2014-09-23 17:46:53 +04:00
* Global name for this object , starts at 1. 0 means unnamed .
2017-01-25 09:26:46 +03:00
* Access is covered by & drm_device . object_name_lock . This is used by
* the GEM_FLINK and GEM_OPEN ioctls .
2014-09-23 17:46:53 +04:00
*/
int name ;
/**
2015-10-22 20:11:27 +03:00
* @ dma_buf :
*
* dma - buf associated with this GEM object .
2014-09-23 17:46:53 +04:00
*
* Pointer to the dma - buf associated with this gem object ( either
* through importing or exporting ) . We break the resulting reference
* loop when the last gem handle for this object is released .
*
2017-01-25 09:26:46 +03:00
* Protected by & drm_device . object_name_lock .
2014-09-23 17:46:53 +04:00
*/
struct dma_buf * dma_buf ;
/**
2015-10-22 20:11:27 +03:00
* @ import_attach :
*
* dma - buf attachment backing this object .
2014-09-23 17:46:53 +04:00
*
* Any foreign dma_buf imported as a gem object has this set to the
* attachment point for the device . This is invariant over the lifetime
* of a gem object .
*
2020-09-23 13:21:59 +03:00
* The & drm_gem_object_funcs . free callback is responsible for
2020-05-15 12:50:49 +03:00
* cleaning up the dma_buf attachment and references acquired at import
* time .
2014-09-23 17:46:53 +04:00
*
* Note that the drm gem / prime core does not depend upon drivers setting
* this field any more . So for drivers where this doesn ' t make sense
* ( e . g . virtual devices or a displaylink behind an usb bus ) they can
* simply leave it as NULL .
*/
struct dma_buf_attachment * import_attach ;
2018-11-10 17:56:45 +03:00
2019-02-02 18:41:54 +03:00
/**
* @ resv :
*
* Pointer to reservation object associated with the this GEM object .
*
* Normally ( @ resv = = & @ _resv ) except for imported GEM objects .
*/
2019-08-11 11:06:32 +03:00
struct dma_resv * resv ;
2019-02-02 18:41:54 +03:00
/**
* @ _resv :
*
* A reservation object for this GEM object .
*
* This is unused for imported GEM objects .
*/
2019-08-11 11:06:32 +03:00
struct dma_resv _resv ;
2019-02-02 18:41:54 +03:00
2023-07-20 03:14:22 +03:00
/**
* @ gpuva :
*
* Provides the list of GPU VAs attached to this GEM object .
*
* Drivers should lock list accesses with the GEMs & dma_resv lock
* ( & drm_gem_object . resv ) or a custom lock if one is provided .
*/
struct {
struct list_head list ;
# ifdef CONFIG_LOCKDEP
struct lockdep_map * lock_dep_map ;
# endif
} gpuva ;
2018-11-10 17:56:45 +03:00
/**
* @ funcs :
*
* Optional GEM object functions . If this is set , it will be used instead of the
* corresponding & drm_driver GEM callbacks .
*
* New drivers should use this .
*
*/
const struct drm_gem_object_funcs * funcs ;
2022-08-02 18:51:42 +03:00
/**
* @ lru_node :
*
* List node in a & drm_gem_lru .
*/
struct list_head lru_node ;
/**
* @ lru :
*
* The current LRU list that the GEM object is on .
*/
struct drm_gem_lru * lru ;
2014-09-23 17:46:53 +04:00
} ;
2022-06-09 20:42:11 +03:00
/**
* DRM_GEM_FOPS - Default drm GEM file operations
*
* This macro provides a shorthand for setting the GEM file ops in the
* & file_operations structure . If all you need are the default ops , use
* DEFINE_DRM_GEM_FOPS instead .
*/
# define DRM_GEM_FOPS \
. open = drm_open , \
. release = drm_release , \
. unlocked_ioctl = drm_ioctl , \
. compat_ioctl = drm_compat_ioctl , \
. poll = drm_poll , \
. read = drm_read , \
. llseek = noop_llseek , \
. mmap = drm_gem_mmap
drm/gem: Add DEFINE_DRM_GEM_FOPS
Sadly there's only 1 driver which can use it, everyone else is special
for some reason:
- gma500 has a horrible runtime PM ioctl wrapper that probably doesn't
really work but meh.
- i915 needs special compat_ioctl handler because regrets.
- arcgpu needs to fixup the pgprot because (no idea why it can't do
that in the fault handler like everyone else).
- tegra does even worse stuff with pgprot
- udl does something with vm_flags too ...
- cma helpers, etnaviv, mtk, msm, rockchip, omap all implement some
variation on prefaulting.
- exynos is exynos, I got lost in the midlayers.
- vc4 has to reinvent half of cma helpers because those are too much
midlayer, plus vm_flags dances.
- vgem also seems unhappy with the default vm_flags.
So pretty sad divergence and I'm sure we could do better, but not
really an idea. Oh well, maybe this macro here helps to encourage more
consistency at least going forward.
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Reviewed-by: Liviu Dudau <Liviu.Dudau@arm.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170308141257.12119-25-daniel.vetter@ffwll.ch
2017-03-08 17:12:57 +03:00
/**
* DEFINE_DRM_GEM_FOPS ( ) - macro to generate file operations for GEM drivers
* @ name : name for the generated structure
*
* This macro autogenerates a suitable & struct file_operations for GEM based
* drivers , which can be assigned to & drm_driver . fops . Note that this structure
* cannot be shared between drivers , because it contains a reference to the
* current module using THIS_MODULE .
*
* Note that the declaration is already marked as static - if you need a
* non - static version of this you ' re probably doing it wrong and will break the
* THIS_MODULE reference by accident .
*/
# define DEFINE_DRM_GEM_FOPS(name) \
static const struct file_operations name = { \
. owner = THIS_MODULE , \
2022-06-09 20:42:11 +03:00
DRM_GEM_FOPS , \
drm/gem: Add DEFINE_DRM_GEM_FOPS
Sadly there's only 1 driver which can use it, everyone else is special
for some reason:
- gma500 has a horrible runtime PM ioctl wrapper that probably doesn't
really work but meh.
- i915 needs special compat_ioctl handler because regrets.
- arcgpu needs to fixup the pgprot because (no idea why it can't do
that in the fault handler like everyone else).
- tegra does even worse stuff with pgprot
- udl does something with vm_flags too ...
- cma helpers, etnaviv, mtk, msm, rockchip, omap all implement some
variation on prefaulting.
- exynos is exynos, I got lost in the midlayers.
- vc4 has to reinvent half of cma helpers because those are too much
midlayer, plus vm_flags dances.
- vgem also seems unhappy with the default vm_flags.
So pretty sad divergence and I'm sure we could do better, but not
really an idea. Oh well, maybe this macro here helps to encourage more
consistency at least going forward.
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Reviewed-by: Liviu Dudau <Liviu.Dudau@arm.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170308141257.12119-25-daniel.vetter@ffwll.ch
2017-03-08 17:12:57 +03:00
}
2014-09-23 17:46:53 +04:00
void drm_gem_object_release ( struct drm_gem_object * obj ) ;
void drm_gem_object_free ( struct kref * kref ) ;
int drm_gem_object_init ( struct drm_device * dev ,
struct drm_gem_object * obj , size_t size ) ;
void drm_gem_private_object_init ( struct drm_device * dev ,
struct drm_gem_object * obj , size_t size ) ;
2022-11-19 09:41:31 +03:00
void drm_gem_private_object_fini ( struct drm_gem_object * obj ) ;
2014-09-23 17:46:53 +04:00
void drm_gem_vm_open ( struct vm_area_struct * vma ) ;
void drm_gem_vm_close ( struct vm_area_struct * vma ) ;
int drm_gem_mmap_obj ( struct drm_gem_object * obj , unsigned long obj_size ,
struct vm_area_struct * vma ) ;
int drm_gem_mmap ( struct file * filp , struct vm_area_struct * vma ) ;
2015-10-22 20:11:27 +03:00
/**
2017-02-28 17:46:41 +03:00
* drm_gem_object_get - acquire a GEM buffer object reference
2015-10-22 20:11:27 +03:00
* @ obj : GEM buffer object
*
2017-02-28 17:46:41 +03:00
* This function acquires an additional reference to @ obj . It is illegal to
* call this without already holding a reference . No locks required .
2015-10-22 20:11:27 +03:00
*/
2017-02-28 17:46:41 +03:00
static inline void drm_gem_object_get ( struct drm_gem_object * obj )
2014-09-23 17:46:53 +04:00
{
kref_get ( & obj - > refcount ) ;
}
2020-05-20 17:23:47 +03:00
__attribute__ ( ( nonnull ) )
static inline void
__drm_gem_object_put ( struct drm_gem_object * obj )
{
kref_put ( & obj - > refcount , drm_gem_object_free ) ;
}
2015-10-22 20:11:27 +03:00
/**
2020-05-15 12:50:52 +03:00
* drm_gem_object_put - drop a GEM buffer object reference
2015-10-22 20:11:27 +03:00
* @ obj : GEM buffer object
*
2020-05-15 12:50:50 +03:00
* This releases a reference to @ obj .
2015-10-22 20:11:27 +03:00
*/
2014-09-23 17:46:53 +04:00
static inline void
2020-05-15 12:50:52 +03:00
drm_gem_object_put ( struct drm_gem_object * obj )
2014-09-23 17:46:53 +04:00
{
2020-05-20 17:23:47 +03:00
if ( obj )
__drm_gem_object_put ( obj ) ;
2014-09-23 17:46:53 +04:00
}
int drm_gem_handle_create ( struct drm_file * file_priv ,
struct drm_gem_object * obj ,
u32 * handlep ) ;
int drm_gem_handle_delete ( struct drm_file * filp , u32 handle ) ;
void drm_gem_free_mmap_offset ( struct drm_gem_object * obj ) ;
int drm_gem_create_mmap_offset ( struct drm_gem_object * obj ) ;
int drm_gem_create_mmap_offset_size ( struct drm_gem_object * obj , size_t size ) ;
struct page * * drm_gem_get_pages ( struct drm_gem_object * obj ) ;
void drm_gem_put_pages ( struct drm_gem_object * obj , struct page * * pages ,
bool dirty , bool accessed ) ;
2024-02-27 13:14:57 +03:00
void drm_gem_lock ( struct drm_gem_object * obj ) ;
void drm_gem_unlock ( struct drm_gem_object * obj ) ;
2022-10-17 20:22:11 +03:00
int drm_gem_vmap_unlocked ( struct drm_gem_object * obj , struct iosys_map * map ) ;
void drm_gem_vunmap_unlocked ( struct drm_gem_object * obj , struct iosys_map * map ) ;
2019-03-08 23:26:02 +03:00
int drm_gem_objects_lookup ( struct drm_file * filp , void __user * bo_handles ,
int count , struct drm_gem_object * * * objs_out ) ;
2016-05-09 13:04:54 +03:00
struct drm_gem_object * drm_gem_object_lookup ( struct drm_file * filp , u32 handle ) ;
2019-08-11 11:06:32 +03:00
long drm_gem_dma_resv_wait ( struct drm_file * filep , u32 handle ,
2019-02-02 18:41:54 +03:00
bool wait_all , unsigned long timeout ) ;
2019-03-08 19:17:13 +03:00
int drm_gem_lock_reservations ( struct drm_gem_object * * objs , int count ,
struct ww_acquire_ctx * acquire_ctx ) ;
void drm_gem_unlock_reservations ( struct drm_gem_object * * objs , int count ,
struct ww_acquire_ctx * acquire_ctx ) ;
2019-08-07 17:52:47 +03:00
int drm_gem_dumb_map_offset ( struct drm_file * file , struct drm_device * dev ,
u32 handle , u64 * offset ) ;
2014-09-23 17:46:53 +04:00
2022-08-02 18:51:42 +03:00
void drm_gem_lru_init ( struct drm_gem_lru * lru , struct mutex * lock ) ;
void drm_gem_lru_remove ( struct drm_gem_object * obj ) ;
2023-03-20 17:43:28 +03:00
void drm_gem_lru_move_tail_locked ( struct drm_gem_lru * lru , struct drm_gem_object * obj ) ;
2022-08-02 18:51:42 +03:00
void drm_gem_lru_move_tail ( struct drm_gem_lru * lru , struct drm_gem_object * obj ) ;
2022-11-04 19:04:59 +03:00
unsigned long drm_gem_lru_scan ( struct drm_gem_lru * lru ,
unsigned int nr_to_scan ,
unsigned long * remaining ,
2022-08-02 18:51:42 +03:00
bool ( * shrink ) ( struct drm_gem_object * obj ) ) ;
2022-11-16 17:07:50 +03:00
int drm_gem_evict ( struct drm_gem_object * obj ) ;
2024-02-13 00:04:24 +03:00
/**
* drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats
*
* This helper should only be used for fdinfo shared memory stats to determine
* if a GEM object is shared .
*
* @ obj : obj in question
*/
static inline bool drm_gem_object_is_shared_for_memory_stats ( struct drm_gem_object * obj )
{
return ( obj - > handle_count > 1 ) | | obj - > dma_buf ;
}
2023-07-20 03:14:22 +03:00
# ifdef CONFIG_LOCKDEP
/**
* drm_gem_gpuva_set_lock ( ) - Set the lock protecting accesses to the gpuva list .
* @ obj : the & drm_gem_object
* @ lock : the lock used to protect the gpuva list . The locking primitive
* must contain a dep_map field .
*
2023-08-04 21:23:41 +03:00
* Call this if you ' re not proctecting access to the gpuva list with the
* dma - resv lock , but with a custom lock .
2023-07-20 03:14:22 +03:00
*/
# define drm_gem_gpuva_set_lock(obj, lock) \
2023-08-04 21:23:41 +03:00
if ( ! WARN ( ( obj ) - > gpuva . lock_dep_map , \
" GEM GPUVA lock should be set only once. " ) ) \
2023-07-20 03:14:22 +03:00
( obj ) - > gpuva . lock_dep_map = & ( lock ) - > dep_map
# define drm_gem_gpuva_assert_lock_held(obj) \
2023-08-04 21:23:41 +03:00
lockdep_assert ( ( obj ) - > gpuva . lock_dep_map ? \
lock_is_held ( ( obj ) - > gpuva . lock_dep_map ) : \
dma_resv_held ( ( obj ) - > resv ) )
2023-07-20 03:14:22 +03:00
# else
# define drm_gem_gpuva_set_lock(obj, lock) do {} while (0)
# define drm_gem_gpuva_assert_lock_held(obj) do {} while (0)
# endif
/**
* drm_gem_gpuva_init ( ) - initialize the gpuva list of a GEM object
* @ obj : the & drm_gem_object
*
2023-11-08 03:12:40 +03:00
* This initializes the & drm_gem_object ' s & drm_gpuvm_bo list .
2023-07-20 03:14:22 +03:00
*
* Calling this function is only necessary for drivers intending to support the
* & drm_driver_feature DRIVER_GEM_GPUVA .
2023-08-04 21:23:41 +03:00
*
* See also drm_gem_gpuva_set_lock ( ) .
2023-07-20 03:14:22 +03:00
*/
static inline void drm_gem_gpuva_init ( struct drm_gem_object * obj )
{
INIT_LIST_HEAD ( & obj - > gpuva . list ) ;
}
/**
2023-11-08 03:12:40 +03:00
* drm_gem_for_each_gpuvm_bo ( ) - iterator to walk over a list of & drm_gpuvm_bo
* @ entry__ : & drm_gpuvm_bo structure to assign to in each iteration step
* @ obj__ : the & drm_gem_object the & drm_gpuvm_bo to walk are associated with
2023-07-20 03:14:22 +03:00
*
2023-11-08 03:12:40 +03:00
* This iterator walks over all & drm_gpuvm_bo structures associated with the
* & drm_gem_object .
2023-07-20 03:14:22 +03:00
*/
2023-11-08 03:12:40 +03:00
# define drm_gem_for_each_gpuvm_bo(entry__, obj__) \
list_for_each_entry ( entry__ , & ( obj__ ) - > gpuva . list , list . entry . gem )
2023-07-20 03:14:22 +03:00
/**
2023-11-08 03:12:40 +03:00
* drm_gem_for_each_gpuvm_bo_safe ( ) - iterator to safely walk over a list of
* & drm_gpuvm_bo
* @ entry__ : & drm_gpuvm_bostructure to assign to in each iteration step
* @ next__ : & next & drm_gpuvm_bo to store the next step
* @ obj__ : the & drm_gem_object the & drm_gpuvm_bo to walk are associated with
2023-07-20 03:14:22 +03:00
*
2023-11-08 03:12:40 +03:00
* This iterator walks over all & drm_gpuvm_bo structures associated with the
2023-07-20 03:14:22 +03:00
* & drm_gem_object . It is implemented with list_for_each_entry_safe ( ) , hence
* it is save against removal of elements .
*/
2023-11-08 03:12:40 +03:00
# define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \
list_for_each_entry_safe ( entry__ , next__ , & ( obj__ ) - > gpuva . list , list . entry . gem )
2023-07-20 03:14:22 +03:00
2014-09-23 17:46:53 +04:00
# endif /* __DRM_GEM_H__ */