2009-06-10 15:20:19 +02:00
/**************************************************************************
*
* Copyright ( c ) 2006 - 2009 VMware , Inc . , Palo Alto , CA . , USA
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors : Thomas Hellstrom < thellstrom - at - vmware - dot - com >
*/
# ifndef _TTM_BO_API_H_
# define _TTM_BO_API_H_
2019-08-05 16:01:03 +02:00
# include <drm/drm_gem.h>
2012-10-02 18:01:25 +01:00
# include <drm/drm_hashtab.h>
2013-07-24 21:08:53 +02:00
# include <drm/drm_vma_manager.h>
2009-06-10 15:20:19 +02:00
# include <linux/kref.h>
# include <linux/list.h>
# include <linux/wait.h>
# include <linux/mutex.h>
# include <linux/mm.h>
# include <linux/bitmap.h>
2019-08-11 10:06:32 +02:00
# include <linux/dma-resv.h>
2009-06-10 15:20:19 +02:00
2020-08-03 16:25:15 +02:00
# include "ttm_resource.h"
2020-10-01 14:51:40 +02:00
struct ttm_global ;
2018-02-21 17:26:45 +01:00
2020-10-01 14:51:40 +02:00
struct ttm_device ;
2009-06-10 15:20:19 +02:00
2020-11-03 10:30:10 +01:00
struct dma_buf_map ;
2009-06-10 15:20:19 +02:00
struct drm_mm_node ;
2016-09-08 15:40:38 +02:00
struct ttm_placement ;
2009-12-08 15:33:32 +01:00
2016-08-30 17:26:04 +02:00
struct ttm_place ;
2018-08-06 17:05:30 +08:00
struct ttm_lru_bulk_move ;
2009-06-10 15:20:19 +02:00
/**
* enum ttm_bo_type
*
* @ ttm_bo_type_device : These are ' normal ' buffers that can
* be mmapped by user space . Each of these bos occupy a slot in the
* device address space , that can be used for normal vm operations .
*
* @ ttm_bo_type_kernel : These buffers are like ttm_bo_type_device buffers ,
* but they cannot be accessed from user - space . For kernel - only use .
2012-04-02 11:46:06 +01:00
*
* @ ttm_bo_type_sg : Buffer made from dmabuf sg table shared with another
* driver .
2009-06-10 15:20:19 +02:00
*/
enum ttm_bo_type {
ttm_bo_type_device ,
2012-04-02 11:46:06 +01:00
ttm_bo_type_kernel ,
ttm_bo_type_sg
2009-06-10 15:20:19 +02:00
} ;
struct ttm_tt ;
/**
* struct ttm_buffer_object
*
2019-08-05 16:01:03 +02:00
* @ base : drm_gem_object superclass data .
2009-06-10 15:20:19 +02:00
* @ bdev : Pointer to the buffer object device structure .
* @ type : The bo type .
2021-02-05 16:17:07 +01:00
* @ page_alignment : Page alignment .
2009-06-10 15:20:19 +02:00
* @ destroy : Destruction function . If NULL , kfree is used .
* @ num_pages : Actual number of pages .
* @ kref : Reference count of this buffer object . When this refcount reaches
2019-11-11 14:42:13 +01:00
* zero , the object is destroyed or put on the delayed delete list .
2009-06-10 15:20:19 +02:00
* @ mem : structure describing current placement .
* @ ttm : TTM structure holding system pages .
* @ evicted : Whether the object was evicted without user - space knowing .
2019-11-11 14:42:13 +01:00
* @ deleted : True if the object is only a zombie and already deleted .
2009-06-10 15:20:19 +02:00
* @ lru : List head for the lru list .
* @ ddestroy : List head for the delayed destroy list .
* @ swap : List head for swap LRU list .
2016-06-15 13:44:01 +02:00
* @ moving : Fence set when BO is moving
2009-06-10 15:20:19 +02:00
* @ offset : The current GPU offset , which can have different meanings
* depending on the memory type . For SYSTEM type memory , it should be 0.
* @ cur_placement : Hint of current placement .
*
* Base class for TTM buffer object , that deals with data placement and CPU
* mappings . GPU mappings are really up to the driver , but for simpler GPUs
* the driver can usually use the placement offset @ offset directly as the
* GPU virtual address . For drivers implementing multiple
* GPU memory manager contexts , the driver should manage the address space
* in these contexts separately and use these objects to get the correct
* placement and caching for these GPU maps . This makes it possible to use
* these objects for even quite elaborate memory management schemes .
* The destroy member , the API visibility of this object makes it possible
* to derive driver specific types .
*/
struct ttm_buffer_object {
2019-08-05 16:01:03 +02:00
struct drm_gem_object base ;
2009-06-10 15:20:19 +02:00
/**
* Members constant at init .
*/
2020-10-01 14:51:40 +02:00
struct ttm_device * bdev ;
2009-06-10 15:20:19 +02:00
enum ttm_bo_type type ;
2021-02-05 16:17:07 +01:00
uint32_t page_alignment ;
2009-06-10 15:20:19 +02:00
void ( * destroy ) ( struct ttm_buffer_object * ) ;
/**
* Members not needing protection .
*/
struct kref kref ;
/**
2013-06-27 13:48:19 +02:00
* Members protected by the bo : : resv : : reserved lock .
2009-06-10 15:20:19 +02:00
*/
2021-04-12 15:11:47 +02:00
struct ttm_resource * resource ;
2009-06-10 15:20:19 +02:00
struct ttm_tt * ttm ;
2019-11-11 14:42:13 +01:00
bool deleted ;
2009-06-10 15:20:19 +02:00
/**
* Members protected by the bdev : : lru_lock .
*/
struct list_head lru ;
struct list_head ddestroy ;
/**
2014-01-21 13:07:31 +01:00
* Members protected by a bo reservation .
2009-06-10 15:20:19 +02:00
*/
2016-10-25 13:00:45 +01:00
struct dma_fence * moving ;
2017-01-10 14:08:28 +01:00
unsigned priority ;
2020-09-21 13:05:54 +02:00
unsigned pin_count ;
2017-01-10 14:08:28 +01:00
2009-06-10 15:20:19 +02:00
/**
* Special members that are protected by the reserve lock
* and the bo : : lock when written to . Can be read with
* either of these locks held .
*/
2012-04-02 11:46:06 +01:00
struct sg_table * sg ;
2009-06-10 15:20:19 +02:00
} ;
/**
* struct ttm_bo_kmap_obj
*
* @ virtual : The current kernel virtual address .
* @ page : The page when kmap ' ing a single page .
* @ bo_kmap_type : Type of bo_kmap .
*
* Object describing a kernel mapping . Since a TTM bo may be located
* in various memory types with various caching policies , the
* mapping can either be an ioremap , a vmap , a kmap or part of a
* premapped region .
*/
2009-08-17 01:18:38 +03:00
# define TTM_BO_MAP_IOMEM_MASK 0x80
2009-06-10 15:20:19 +02:00
struct ttm_bo_kmap_obj {
void * virtual ;
struct page * page ;
enum {
2009-08-17 01:18:38 +03:00
ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK ,
ttm_bo_map_vmap = 2 ,
ttm_bo_map_kmap = 3 ,
ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK ,
2009-06-10 15:20:19 +02:00
} bo_kmap_type ;
2010-04-09 14:39:23 +02:00
struct ttm_buffer_object * bo ;
2009-06-10 15:20:19 +02:00
} ;
2017-04-12 14:24:39 +02:00
/**
* struct ttm_operation_ctx
*
* @ interruptible : Sleep interruptible if sleeping .
* @ no_wait_gpu : Return immediately if the GPU is busy .
2020-11-02 13:01:53 +01:00
* @ gfp_retry_mayfail : Set the __GFP_RETRY_MAYFAIL when allocation pages .
2020-11-02 13:16:13 +01:00
* @ allow_res_evict : Allow eviction of reserved BOs . Can be used when multiple
* BOs share the same reservation object .
* @ force_alloc : Don ' t check the memory account during suspend or CPU page
* faults . Should only be used by TTM internally .
2017-12-08 11:36:46 +08:00
* @ resv : Reservation object to allow reserved evictions with .
2017-04-12 14:24:39 +02:00
*
* Context for TTM operations like changing buffer placement or general memory
* allocation .
*/
struct ttm_operation_ctx {
bool interruptible ;
bool no_wait_gpu ;
2020-11-02 13:01:53 +01:00
bool gfp_retry_mayfail ;
2020-11-02 13:16:13 +01:00
bool allow_res_evict ;
bool force_alloc ;
2019-08-11 10:06:32 +02:00
struct dma_resv * resv ;
2017-04-27 18:19:46 +02:00
uint64_t bytes_moved ;
2017-04-12 14:24:39 +02:00
} ;
2018-06-21 15:21:35 +02:00
/**
* ttm_bo_get - reference a struct ttm_buffer_object
*
* @ bo : The buffer object .
*/
static inline void ttm_bo_get ( struct ttm_buffer_object * bo )
{
kref_get ( & bo - > kref ) ;
}
2018-09-26 15:57:46 +02:00
/**
* ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
* its refcount has already reached zero .
* @ bo : The buffer object .
*
* Used to reference a TTM buffer object in lookups where the object is removed
* from the lookup structure during the destructor and for RCU lookups .
*
* Returns : @ bo if the referencing was successful , NULL otherwise .
*/
static inline __must_check struct ttm_buffer_object *
ttm_bo_get_unless_zero ( struct ttm_buffer_object * bo )
{
if ( ! kref_get_unless_zero ( & bo - > kref ) )
return NULL ;
return bo ;
}
2009-06-10 15:20:19 +02:00
/**
* ttm_bo_wait - wait for buffer idle .
*
* @ bo : The buffer object .
* @ interruptible : Use interruptible wait .
* @ no_wait : Return immediately if buffer is busy .
*
* This function must be called with the bo : : mutex held , and makes
* sure any previous rendering to the buffer is completed .
* Note : It might be necessary to block validations before the
* wait by reserving the buffer .
* Returns - EBUSY if no_wait is true and the buffer is busy .
2009-12-07 18:36:18 +01:00
* Returns - ERESTARTSYS if interrupted by a signal .
2009-06-10 15:20:19 +02:00
*/
2017-02-16 14:25:30 +01:00
int ttm_bo_wait ( struct ttm_buffer_object * bo , bool interruptible , bool no_wait ) ;
2016-06-29 12:58:49 -07:00
2020-09-23 13:04:49 +10:00
static inline int ttm_bo_wait_ctx ( struct ttm_buffer_object * bo , struct ttm_operation_ctx * ctx )
{
return ttm_bo_wait ( bo , ctx - > interruptible , ctx - > no_wait_gpu ) ;
}
2016-06-29 12:58:49 -07:00
/**
* ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
*
* @ placement : Return immediately if buffer is busy .
2020-08-04 12:56:32 +10:00
* @ mem : The struct ttm_resource indicating the region where the bo resides
2016-06-29 12:58:49 -07:00
* @ new_flags : Describes compatible placement found
*
* Returns true if the placement is compatible
*/
2020-08-04 12:56:32 +10:00
bool ttm_bo_mem_compat ( struct ttm_placement * placement , struct ttm_resource * mem ,
2017-02-16 14:25:30 +01:00
uint32_t * new_flags ) ;
2016-06-29 12:58:49 -07:00
2009-06-10 15:20:19 +02:00
/**
2009-12-10 17:16:27 +01:00
* ttm_bo_validate
2009-06-10 15:20:19 +02:00
*
* @ bo : The buffer object .
2009-12-08 15:33:32 +01:00
* @ placement : Proposed placement for the buffer object .
2017-04-12 14:24:39 +02:00
* @ ctx : validation parameters .
2009-06-10 15:20:19 +02:00
*
* Changes placement and caching policy of the buffer object
2009-12-08 15:33:32 +01:00
* according proposed placement .
2009-06-10 15:20:19 +02:00
* Returns
2009-12-08 15:33:32 +01:00
* - EINVAL on invalid proposed placement .
2009-06-10 15:20:19 +02:00
* - ENOMEM on out - of - memory condition .
* - EBUSY if no_wait is true and buffer busy .
2009-12-07 18:36:18 +01:00
* - ERESTARTSYS if interrupted by a signal .
2009-06-10 15:20:19 +02:00
*/
2017-02-16 14:25:30 +01:00
int ttm_bo_validate ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement ,
2017-04-12 14:24:39 +02:00
struct ttm_operation_ctx * ctx ) ;
2009-12-08 15:33:32 +01:00
2018-06-21 15:21:35 +02:00
/**
* ttm_bo_put
*
* @ bo : The buffer object .
*
* Unreference a buffer object .
*/
void ttm_bo_put ( struct ttm_buffer_object * bo ) ;
2016-01-11 15:35:20 +01:00
/**
* ttm_bo_move_to_lru_tail
*
* @ bo : The buffer object .
2020-11-27 15:14:34 +01:00
* @ mem : Resource object .
2018-08-06 17:05:30 +08:00
* @ bulk : optional bulk move structure to remember BO positions
2016-01-11 15:35:20 +01:00
*
* Move this BO to the tail of all lru lists used to lookup and reserve an
2020-10-01 14:51:40 +02:00
* object . This function must be called with struct ttm_global : : lru_lock
2016-01-11 15:35:20 +01:00
* held , and is used to make a BO less likely to be considered for eviction .
*/
2018-08-06 17:05:30 +08:00
void ttm_bo_move_to_lru_tail ( struct ttm_buffer_object * bo ,
2020-11-27 15:14:34 +01:00
struct ttm_resource * mem ,
2018-08-06 17:05:30 +08:00
struct ttm_lru_bulk_move * bulk ) ;
2010-11-22 13:24:40 +10:00
2018-08-06 17:28:35 +08:00
/**
* ttm_bo_bulk_move_lru_tail
*
* @ bulk : bulk move structure
*
* Bulk move BOs to the LRU tail , only valid to use when driver makes sure that
2020-10-01 14:51:40 +02:00
* BO order never changes . Should be called with ttm_global : : lru_lock held .
2018-08-06 17:28:35 +08:00
*/
void ttm_bo_bulk_move_lru_tail ( struct ttm_lru_bulk_move * bulk ) ;
2010-04-26 16:00:09 -04:00
/**
* ttm_bo_lock_delayed_workqueue
*
* Prevent the delayed workqueue from running .
* Returns
* True if the workqueue was queued at the time
*/
2020-10-01 14:51:40 +02:00
int ttm_bo_lock_delayed_workqueue ( struct ttm_device * bdev ) ;
2010-04-26 16:00:09 -04:00
/**
* ttm_bo_unlock_delayed_workqueue
*
* Allows the delayed workqueue to run .
*/
2020-10-01 14:51:40 +02:00
void ttm_bo_unlock_delayed_workqueue ( struct ttm_device * bdev , int resched ) ;
2010-04-26 16:00:09 -04:00
2016-08-30 17:26:04 +02:00
/**
* ttm_bo_eviction_valuable
*
* @ bo : The buffer object to evict
* @ place : the placement we need to make room for
*
* Check if it is valuable to evict the BO to make room for the given placement .
*/
bool ttm_bo_eviction_valuable ( struct ttm_buffer_object * bo ,
const struct ttm_place * place ) ;
2017-02-16 10:56:40 +01:00
/**
* ttm_bo_init_reserved
*
2020-10-01 14:51:40 +02:00
* @ bdev : Pointer to a ttm_device struct .
2017-02-16 10:56:40 +01:00
* @ bo : Pointer to a ttm_buffer_object to be initialized .
* @ size : Requested size of buffer object .
* @ type : Requested type of buffer object .
* @ flags : Initial placement flags .
* @ page_alignment : Data alignment in pages .
2017-04-12 14:41:43 +02:00
* @ ctx : TTM operation context for memory allocation .
2019-08-11 10:06:32 +02:00
* @ resv : Pointer to a dma_resv , or NULL to let ttm allocate one .
2017-02-16 10:56:40 +01:00
* @ destroy : Destroy function . Use NULL for kfree ( ) .
*
* This function initializes a pre - allocated struct ttm_buffer_object .
* As this object may be part of a larger structure , this function ,
* together with the @ destroy function ,
* enables driver - specific objects derived from a ttm_buffer_object .
*
* On successful return , the caller owns an object kref to @ bo . The kref and
* list_kref are usually set to 1 , but note that in some situations , other
* tasks may already be holding references to @ bo as well .
* Furthermore , if resv = = NULL , the buffer ' s reservation lock will be held ,
* and it is the caller ' s responsibility to call ttm_bo_unreserve .
*
* If a failure occurs , the function will call the @ destroy function , or
* kfree ( ) if @ destroy is NULL . Thus , after a failure , dereferencing @ bo is
* illegal and will likely cause memory corruption .
*
* Returns
* - ENOMEM : Out of memory .
* - EINVAL : Invalid placement flags .
* - ERESTARTSYS : Interrupted by signal while sleeping waiting for resources .
*/
2020-10-01 14:51:40 +02:00
int ttm_bo_init_reserved ( struct ttm_device * bdev ,
2017-02-16 14:25:30 +01:00
struct ttm_buffer_object * bo ,
2020-12-09 15:07:50 +01:00
size_t size , enum ttm_bo_type type ,
2017-02-16 14:25:30 +01:00
struct ttm_placement * placement ,
uint32_t page_alignment ,
2017-04-12 14:41:43 +02:00
struct ttm_operation_ctx * ctx ,
2020-11-17 13:52:28 +01:00
struct sg_table * sg , struct dma_resv * resv ,
2017-02-16 14:25:30 +01:00
void ( * destroy ) ( struct ttm_buffer_object * ) ) ;
2017-02-16 10:56:40 +01:00
2009-06-10 15:20:19 +02:00
/**
2009-12-10 17:16:27 +01:00
* ttm_bo_init
2009-06-10 15:20:19 +02:00
*
2020-10-01 14:51:40 +02:00
* @ bdev : Pointer to a ttm_device struct .
2009-06-10 15:20:19 +02:00
* @ bo : Pointer to a ttm_buffer_object to be initialized .
* @ size : Requested size of buffer object .
* @ type : Requested type of buffer object .
* @ flags : Initial placement flags .
* @ page_alignment : Data alignment in pages .
* @ interruptible : If needing to sleep to wait for GPU resources ,
* sleep interruptible .
* pinned in physical memory . If this behaviour is not desired , this member
2011-04-04 01:25:18 +02:00
* holds a pointer to a persistent shmem object . Typically , this would
2009-06-10 15:20:19 +02:00
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface .
2019-08-11 10:06:32 +02:00
* @ resv : Pointer to a dma_resv , or NULL to let ttm allocate one .
2009-06-10 15:20:19 +02:00
* @ destroy : Destroy function . Use NULL for kfree ( ) .
*
* This function initializes a pre - allocated struct ttm_buffer_object .
* As this object may be part of a larger structure , this function ,
* together with the @ destroy function ,
* enables driver - specific objects derived from a ttm_buffer_object .
2017-02-14 10:37:41 +01:00
*
* On successful return , the caller owns an object kref to @ bo . The kref and
* list_kref are usually set to 1 , but note that in some situations , other
* tasks may already be holding references to @ bo as well .
*
2010-11-09 21:31:44 +01:00
* If a failure occurs , the function will call the @ destroy function , or
* kfree ( ) if @ destroy is NULL . Thus , after a failure , dereferencing @ bo is
* illegal and will likely cause memory corruption .
*
2009-06-10 15:20:19 +02:00
* Returns
* - ENOMEM : Out of memory .
* - EINVAL : Invalid placement flags .
2009-12-07 18:36:18 +01:00
* - ERESTARTSYS : Interrupted by signal while sleeping waiting for resources .
2009-06-10 15:20:19 +02:00
*/
2020-10-01 14:51:40 +02:00
int ttm_bo_init ( struct ttm_device * bdev , struct ttm_buffer_object * bo ,
2020-12-09 15:07:50 +01:00
size_t size , enum ttm_bo_type type ,
2017-02-16 14:25:30 +01:00
struct ttm_placement * placement ,
2020-11-17 13:52:28 +01:00
uint32_t page_alignment , bool interrubtible ,
2019-08-11 10:06:32 +02:00
struct sg_table * sg , struct dma_resv * resv ,
2017-02-16 14:25:30 +01:00
void ( * destroy ) ( struct ttm_buffer_object * ) ) ;
2011-11-11 15:42:57 -05:00
2009-06-10 15:20:19 +02:00
/**
* ttm_kmap_obj_virtual
*
* @ map : A struct ttm_bo_kmap_obj returned from ttm_bo_kmap .
* @ is_iomem : Pointer to an integer that on return indicates 1 if the
* virtual map is io memory , 0 if normal memory .
*
* Returns the virtual address of a buffer object area mapped by ttm_bo_kmap .
* If * is_iomem is 1 on return , the virtual address points to an io memory area ,
* that should strictly be accessed by the iowriteXX ( ) and similar functions .
*/
static inline void * ttm_kmap_obj_virtual ( struct ttm_bo_kmap_obj * map ,
bool * is_iomem )
{
2009-08-17 01:18:38 +03:00
* is_iomem = ! ! ( map - > bo_kmap_type & TTM_BO_MAP_IOMEM_MASK ) ;
2009-06-10 15:20:19 +02:00
return map - > virtual ;
}
/**
* ttm_bo_kmap
*
* @ bo : The buffer object .
* @ start_page : The first page to map .
* @ num_pages : Number of pages to map .
* @ map : pointer to a struct ttm_bo_kmap_obj representing the map .
*
* Sets up a kernel virtual mapping , using ioremap , vmap or kmap to the
* data in the buffer object . The ttm_kmap_obj_virtual function can then be
* used to obtain a virtual address to the data .
*
* Returns
* - ENOMEM : Out of memory .
* - EINVAL : Invalid range .
*/
2017-02-16 14:25:30 +01:00
int ttm_bo_kmap ( struct ttm_buffer_object * bo , unsigned long start_page ,
unsigned long num_pages , struct ttm_bo_kmap_obj * map ) ;
2009-06-10 15:20:19 +02:00
/**
* ttm_bo_kunmap
*
* @ map : Object describing the map to unmap .
*
* Unmaps a kernel map set up by ttm_bo_kmap .
*/
2017-02-16 14:25:30 +01:00
void ttm_bo_kunmap ( struct ttm_bo_kmap_obj * map ) ;
2009-06-10 15:20:19 +02:00
2020-11-03 10:30:10 +01:00
/**
* ttm_bo_vmap
*
* @ bo : The buffer object .
* @ map : pointer to a struct dma_buf_map representing the map .
*
* Sets up a kernel virtual mapping , using ioremap or vmap to the
* data in the buffer object . The parameter @ map returns the virtual
* address as struct dma_buf_map . Unmap the buffer with ttm_bo_vunmap ( ) .
*
* Returns
* - ENOMEM : Out of memory .
* - EINVAL : Invalid range .
*/
int ttm_bo_vmap ( struct ttm_buffer_object * bo , struct dma_buf_map * map ) ;
/**
* ttm_bo_vunmap
*
* @ bo : The buffer object .
* @ map : Object describing the map to unmap .
*
* Unmaps a kernel map set up by ttm_bo_vmap ( ) .
*/
void ttm_bo_vunmap ( struct ttm_buffer_object * bo , struct dma_buf_map * map ) ;
2009-06-10 15:20:19 +02:00
/**
2019-10-16 13:51:59 +02:00
* ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object .
2009-06-10 15:20:19 +02:00
*
* @ vma : vma as input from the fbdev mmap method .
2019-10-16 13:51:59 +02:00
* @ bo : The bo backing the address space .
2009-06-10 15:20:19 +02:00
*
2019-10-16 13:51:59 +02:00
* Maps a buffer object .
2009-06-10 15:20:19 +02:00
*/
2019-10-16 13:51:59 +02:00
int ttm_bo_mmap_obj ( struct vm_area_struct * vma , struct ttm_buffer_object * bo ) ;
2009-06-10 15:20:19 +02:00
/**
* ttm_bo_io
*
2020-10-01 14:51:40 +02:00
* @ bdev : Pointer to the struct ttm_device .
2009-06-10 15:20:19 +02:00
* @ filp : Pointer to the struct file attempting to read / write .
* @ wbuf : User - space pointer to address of buffer to write . NULL on read .
* @ rbuf : User - space pointer to address of buffer to read into .
* Null on write .
* @ count : Number of bytes to read / write .
* @ f_pos : Pointer to current file position .
* @ write : 1 for read , 0 for write .
*
* This function implements read / write into ttm buffer objects , and is
* intended to
* be called from the fops : : read and fops : : write method .
* Returns :
* See man ( 2 ) write , man ( 2 ) read . In particular ,
2009-12-07 18:36:18 +01:00
* the function may return - ERESTARTSYS if
2009-06-10 15:20:19 +02:00
* interrupted by a signal .
*/
2020-10-01 14:51:40 +02:00
ssize_t ttm_bo_io ( struct ttm_device * bdev , struct file * filp ,
2017-02-16 14:25:30 +01:00
const char __user * wbuf , char __user * rbuf ,
size_t count , loff_t * f_pos , bool write ) ;
2009-06-10 15:20:19 +02:00
2020-10-06 13:35:32 +02:00
int ttm_bo_swapout ( struct ttm_buffer_object * bo , struct ttm_operation_ctx * ctx ,
gfp_t gfp_flags ) ;
2019-08-05 16:01:03 +02:00
2020-09-21 13:05:54 +02:00
/**
* ttm_bo_pin - Pin the buffer object .
* @ bo : The buffer object to pin
*
* Make sure the buffer is not evicted any more during memory pressure .
*/
static inline void ttm_bo_pin ( struct ttm_buffer_object * bo )
{
dma_resv_assert_held ( bo - > base . resv ) ;
2020-10-28 12:31:20 +01:00
WARN_ON_ONCE ( ! kref_read ( & bo - > kref ) ) ;
2020-09-21 13:05:54 +02:00
+ + bo - > pin_count ;
}
/**
* ttm_bo_unpin - Unpin the buffer object .
* @ bo : The buffer object to unpin
*
* Allows the buffer object to be evicted again during memory pressure .
*/
static inline void ttm_bo_unpin ( struct ttm_buffer_object * bo )
{
dma_resv_assert_held ( bo - > base . resv ) ;
2020-10-28 12:31:20 +01:00
WARN_ON_ONCE ( ! kref_read ( & bo - > kref ) ) ;
2021-03-12 09:34:39 +01:00
if ( bo - > pin_count )
- - bo - > pin_count ;
else
WARN_ON_ONCE ( true ) ;
2020-09-21 13:05:54 +02:00
}
2020-10-01 14:51:40 +02:00
int ttm_mem_evict_first ( struct ttm_device * bdev ,
2020-08-03 16:25:15 +02:00
struct ttm_resource_manager * man ,
const struct ttm_place * place ,
struct ttm_operation_ctx * ctx ,
struct ww_acquire_ctx * ticket ) ;
2019-09-25 15:11:23 +02:00
/* Default number of pre-faulted pages in the TTM fault handler */
# define TTM_BO_VM_NUM_PREFAULT 16
vm_fault_t ttm_bo_vm_reserve ( struct ttm_buffer_object * bo ,
struct vm_fault * vmf ) ;
vm_fault_t ttm_bo_vm_fault_reserved ( struct vm_fault * vmf ,
pgprot_t prot ,
2020-03-24 18:48:33 +01:00
pgoff_t num_prefault ,
pgoff_t fault_page_size ) ;
2019-09-25 15:11:23 +02:00
2019-09-27 14:34:24 +02:00
vm_fault_t ttm_bo_vm_fault ( struct vm_fault * vmf ) ;
2019-09-25 15:11:23 +02:00
void ttm_bo_vm_open ( struct vm_area_struct * vma ) ;
void ttm_bo_vm_close ( struct vm_area_struct * vma ) ;
2019-09-27 14:34:24 +02:00
int ttm_bo_vm_access ( struct vm_area_struct * vma , unsigned long addr ,
void * buf , int len , int write ) ;
2020-10-01 14:51:40 +02:00
bool ttm_bo_delayed_delete ( struct ttm_device * bdev , bool remove_all ) ;
2019-09-27 14:34:24 +02:00
2021-05-12 10:26:33 -04:00
vm_fault_t ttm_bo_vm_dummy_page ( struct vm_fault * vmf , pgprot_t prot ) ;
2009-06-10 15:20:19 +02:00
# endif