2009-06-10 15:20:19 +02:00
/**************************************************************************
*
* Copyright ( c ) 2006 - 2009 Vmware , Inc . , Palo Alto , CA . , USA
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors : Thomas Hellstrom < thellstrom - at - vmware - dot - com >
*/
# ifndef _TTM_BO_DRIVER_H_
# define _TTM_BO_DRIVER_H_
2012-10-02 18:01:25 +01:00
# include <drm/drm_mm.h>
2013-07-24 21:08:53 +02:00
# include <drm/drm_vma_manager.h>
2012-10-02 18:01:25 +01:00
# include <linux/workqueue.h>
# include <linux/fs.h>
# include <linux/spinlock.h>
2019-08-11 10:06:32 +02:00
# include <linux/dma-resv.h>
2009-06-10 15:20:19 +02:00
2017-04-24 13:50:20 +09:00
# include "ttm_bo_api.h"
# include "ttm_memory.h"
# include "ttm_module.h"
# include "ttm_placement.h"
2018-02-22 09:23:44 +01:00
# include "ttm_tt.h"
2017-04-24 13:50:20 +09:00
2017-03-28 09:34:16 +08:00
# define TTM_MAX_BO_PRIORITY 4U
2017-01-10 14:08:28 +01:00
2010-08-05 10:48:18 +10:00
struct ttm_mem_type_manager ;
struct ttm_mem_type_manager_func {
2010-10-29 10:46:44 +02:00
/**
* struct ttm_mem_type_manager member takedown
*
* @ man : Pointer to a memory type manager .
*
* Called to undo the setup done in init . All allocated resources
* should be freed .
*/
2010-08-05 10:48:18 +10:00
int ( * takedown ) ( struct ttm_mem_type_manager * man ) ;
2010-10-29 10:46:44 +02:00
/**
* struct ttm_mem_type_manager member get_node
*
* @ man : Pointer to a memory type manager .
* @ bo : Pointer to the buffer object we ' re allocating space for .
* @ placement : Placement details .
2014-07-03 09:02:23 +02:00
* @ flags : Additional placement flags .
2010-10-29 10:46:44 +02:00
* @ mem : Pointer to a struct ttm_mem_reg to be filled in .
*
* This function should allocate space in the memory type managed
* by @ man . Placement details if
* applicable are given by @ placement . If successful ,
* @ mem : : mm_node should be set to a non - null value , and
* @ mem : : start should be set to a value identifying the beginning
* of the range allocated , and the function should return zero .
2011-03-30 22:57:33 -03:00
* If the memory region accommodate the buffer object , @ mem : : mm_node
2010-10-29 10:46:44 +02:00
* should be set to NULL , and the function should return 0.
2011-03-30 22:57:33 -03:00
* If a system error occurred , preventing the request to be fulfilled ,
2010-10-29 10:46:44 +02:00
* the function should return a negative error code .
*
* Note that @ mem : : mm_node will only be dereferenced by
* struct ttm_mem_type_manager functions and optionally by the driver ,
* which has knowledge of the underlying type .
*
* This function may not be called from within atomic context , so
* an implementation can and must use either a mutex or a spinlock to
* protect any data structures managing the space .
*/
2010-08-05 10:48:18 +10:00
int ( * get_node ) ( struct ttm_mem_type_manager * man ,
struct ttm_buffer_object * bo ,
2014-08-27 13:16:04 +02:00
const struct ttm_place * place ,
2010-08-05 10:48:18 +10:00
struct ttm_mem_reg * mem ) ;
2010-10-29 10:46:44 +02:00
/**
* struct ttm_mem_type_manager member put_node
*
* @ man : Pointer to a memory type manager .
* @ mem : Pointer to a struct ttm_mem_reg to be filled in .
*
* This function frees memory type resources previously allocated
* and that are identified by @ mem : : mm_node and @ mem : : start . May not
* be called from within atomic context .
*/
2010-08-05 10:48:18 +10:00
void ( * put_node ) ( struct ttm_mem_type_manager * man ,
struct ttm_mem_reg * mem ) ;
2010-10-29 10:46:44 +02:00
/**
* struct ttm_mem_type_manager member debug
*
* @ man : Pointer to a memory type manager .
2017-08-07 11:13:41 +02:00
* @ printer : Prefix to be used in printout to identify the caller .
2010-10-29 10:46:44 +02:00
*
* This function is called to print out the state of the memory
* type manager to aid debugging of out - of - memory conditions .
* It may not be called from within atomic context .
*/
2017-08-07 11:13:41 +02:00
void ( * debug ) ( struct ttm_mem_type_manager * man ,
struct drm_printer * printer ) ;
2010-08-05 10:48:18 +10:00
} ;
2010-11-11 09:41:57 +01:00
/**
* struct ttm_mem_type_manager
*
* @ has_type : The memory type has been initialized .
* @ use_type : The memory type is enabled .
* @ flags : TTM_MEMTYPE_XX flags identifying the traits of the memory
* managed by this memory type .
* @ gpu_offset : If used , the GPU offset of the first managed page of
* fixed memory or the first managed location in an aperture .
* @ size : Size of the managed region .
* @ available_caching : A mask of available caching types , TTM_PL_FLAG_XX ,
* as defined in ttm_placement_common . h
* @ default_caching : The default caching policy used for a buffer object
* placed in this memory type if the user doesn ' t provide one .
* @ func : structure pointer implementing the range manager . See above
* @ priv : Driver private closure for @ func .
* @ io_reserve_mutex : Mutex optionally protecting shared io_reserve structures
* @ use_io_reserve_lru : Use an lru list to try to unreserve io_mem_regions
* reserved by the TTM vm system .
* @ io_reserve_lru : Optional lru list for unreserving io mem regions .
2016-06-15 13:44:03 +02:00
* @ move_lock : lock for move fence
2010-11-11 09:41:57 +01:00
* static information . bdev : : driver : : io_mem_free is never used .
* @ lru : The lru list for this memory type .
2016-06-15 13:44:03 +02:00
* @ move : The fence of the last pipelined move operation .
2010-11-11 09:41:57 +01:00
*
* This structure is used to identify and manage memory types for a device .
*/
2009-06-10 15:20:19 +02:00
struct ttm_mem_type_manager {
2010-08-05 10:48:18 +10:00
struct ttm_bo_device * bdev ;
2009-06-10 15:20:19 +02:00
/*
* No protection . Constant from start .
*/
bool has_type ;
bool use_type ;
2020-07-21 09:58:13 +02:00
bool use_tt ;
2009-06-10 15:20:19 +02:00
uint64_t size ;
uint32_t available_caching ;
uint32_t default_caching ;
2010-10-29 10:46:44 +02:00
const struct ttm_mem_type_manager_func * func ;
void * priv ;
2010-11-11 09:41:57 +01:00
struct mutex io_reserve_mutex ;
bool use_io_reserve_lru ;
2016-06-15 13:44:03 +02:00
spinlock_t move_lock ;
2010-11-11 09:41:57 +01:00
/*
* Protected by @ io_reserve_mutex :
*/
struct list_head io_reserve_lru ;
2009-06-10 15:20:19 +02:00
/*
2010-10-29 10:46:44 +02:00
* Protected by the global - > lru_lock .
2009-06-10 15:20:19 +02:00
*/
2010-10-29 10:46:44 +02:00
2017-01-10 14:08:28 +01:00
struct list_head lru [ TTM_MAX_BO_PRIORITY ] ;
2016-06-15 13:44:03 +02:00
/*
* Protected by @ move_lock .
*/
2016-10-25 13:00:45 +01:00
struct dma_fence * move ;
2009-06-10 15:20:19 +02:00
} ;
/**
* struct ttm_bo_driver
*
* @ create_ttm_backend_entry : Callback to create a struct ttm_backend .
* @ evict_flags : Callback to obtain placement flags when a buffer is evicted .
* @ move : Callback for a driver to hook in accelerated functions to
* move a buffer .
* If set to NULL , a potentially slow memcpy ( ) move is used .
*/
struct ttm_bo_driver {
/**
2011-11-01 20:46:13 -04:00
* ttm_tt_create
2009-06-10 15:20:19 +02:00
*
2018-02-22 10:18:14 +01:00
* @ bo : The buffer object to create the ttm for .
2011-11-01 20:46:13 -04:00
* @ page_flags : Page flags as identified by TTM_PAGE_FLAG_XX flags .
2009-06-10 15:20:19 +02:00
*
2011-11-01 20:46:13 -04:00
* Create a struct ttm_tt to back data with system memory pages .
* No pages are actually allocated .
* Returns :
* NULL : Out of memory .
2009-06-10 15:20:19 +02:00
*/
2018-02-22 10:18:14 +01:00
struct ttm_tt * ( * ttm_tt_create ) ( struct ttm_buffer_object * bo ,
2018-02-21 20:34:13 +01:00
uint32_t page_flags ) ;
2009-06-10 15:20:19 +02:00
2011-11-02 23:59:28 -04:00
/**
* ttm_tt_populate
*
* @ ttm : The struct ttm_tt to contain the backing pages .
*
* Allocate all backing pages
* Returns :
* - ENOMEM : Out of memory .
*/
2017-12-21 17:42:50 +08:00
int ( * ttm_tt_populate ) ( struct ttm_tt * ttm ,
struct ttm_operation_ctx * ctx ) ;
2011-11-02 23:59:28 -04:00
/**
* ttm_tt_unpopulate
*
* @ ttm : The struct ttm_tt to contain the backing pages .
*
* Free all backing page
*/
void ( * ttm_tt_unpopulate ) ( struct ttm_tt * ttm ) ;
2016-08-30 17:26:04 +02:00
/**
* struct ttm_bo_driver member eviction_valuable
*
* @ bo : the buffer object to be evicted
* @ place : placement we need room for
*
* Check with the driver if it is valuable to evict a BO to make room
* for a certain placement .
*/
bool ( * eviction_valuable ) ( struct ttm_buffer_object * bo ,
const struct ttm_place * place ) ;
2009-06-10 15:20:19 +02:00
/**
* struct ttm_bo_driver member evict_flags :
*
* @ bo : the buffer object to be evicted
*
* Return the bo flags for a buffer which is not mapped to the hardware .
* These will be placed in proposed_flags so that when the move is
* finished , they ' ll end up in bo - > mem . flags
*/
2016-08-30 15:01:27 +02:00
void ( * evict_flags ) ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement ) ;
2009-06-10 15:20:19 +02:00
/**
* struct ttm_bo_driver member move :
*
* @ bo : the buffer to move
* @ evict : whether this motion is evicting the buffer from
* the graphics address space
2017-04-26 16:31:14 +02:00
* @ ctx : context for this move with parameters
2009-06-10 15:20:19 +02:00
* @ new_mem : the new memory region receiving the buffer
*
* Move a buffer between two memory regions .
*/
2016-08-30 15:01:27 +02:00
int ( * move ) ( struct ttm_buffer_object * bo , bool evict ,
2017-04-26 16:31:14 +02:00
struct ttm_operation_ctx * ctx ,
2016-08-30 15:01:27 +02:00
struct ttm_mem_reg * new_mem ) ;
2009-06-10 15:20:19 +02:00
/**
* struct ttm_bo_driver_member verify_access
*
* @ bo : Pointer to a buffer object .
* @ filp : Pointer to a struct file trying to access the object .
*
* Called from the map / write / read methods to verify that the
* caller is permitted to access the buffer object .
* This member may be set to NULL , which will refuse this kind of
* access for all buffer objects .
* This function should return 0 if access is granted , - EPERM otherwise .
*/
2016-08-30 15:01:27 +02:00
int ( * verify_access ) ( struct ttm_buffer_object * bo ,
struct file * filp ) ;
2009-06-10 15:20:19 +02:00
2016-12-15 17:23:49 +01:00
/**
* Hook to notify driver about a driver move so it
* can do tiling things and book - keeping .
*
* @ evict : whether this move is evicting the buffer from the graphics
* address space
*/
2009-06-24 09:48:08 +10:00
void ( * move_notify ) ( struct ttm_buffer_object * bo ,
2016-12-15 17:23:49 +01:00
bool evict ,
2009-06-24 09:48:08 +10:00
struct ttm_mem_reg * new_mem ) ;
/* notify the driver we are taking a fault on this BO
* and have reserved it */
2010-04-09 14:39:23 +02:00
int ( * fault_reserve_notify ) ( struct ttm_buffer_object * bo ) ;
2010-01-13 22:28:40 +01:00
/**
* notify the driver that we ' re about to swap out this bo
*/
2016-08-30 15:01:27 +02:00
void ( * swap_notify ) ( struct ttm_buffer_object * bo ) ;
2010-04-09 14:39:23 +02:00
/**
* Driver callback on when mapping io memory ( for bo_move_memcpy
* for instance ) . TTM will take care to call io_mem_free whenever
* the mapping is not use anymore . io_mem_reserve & io_mem_free
* are balanced .
*/
2016-08-30 15:01:27 +02:00
int ( * io_mem_reserve ) ( struct ttm_bo_device * bdev ,
struct ttm_mem_reg * mem ) ;
void ( * io_mem_free ) ( struct ttm_bo_device * bdev ,
struct ttm_mem_reg * mem ) ;
2017-03-28 16:54:50 +02:00
/**
* Return the pfn for a given page_offset inside the BO .
*
* @ bo : the BO to look up the pfn for
* @ page_offset : the offset to look up
*/
unsigned long ( * io_mem_pfn ) ( struct ttm_buffer_object * bo ,
unsigned long page_offset ) ;
2017-07-13 17:01:16 -04:00
/**
* Read / write memory buffers for ptrace access
*
* @ bo : the BO to access
* @ offset : the offset from the start of the BO
* @ buf : pointer to source / destination buffer
* @ len : number of bytes to copy
* @ write : whether to read ( 0 ) from or write ( non - 0 ) to BO
*
* If successful , this function should return the number of
* bytes copied , - EIO otherwise . If the number of bytes
* returned is < len , the function may be called again with
* the remainder of the buffer to copy .
*/
int ( * access_memory ) ( struct ttm_buffer_object * bo , unsigned long offset ,
void * buf , int len , int write ) ;
2019-01-10 17:56:39 +08:00
/**
* struct ttm_bo_driver member del_from_lru_notify
*
* @ bo : the buffer object deleted from lru
*
* notify driver that a BO was deleted from LRU .
*/
void ( * del_from_lru_notify ) ( struct ttm_buffer_object * bo ) ;
2019-07-09 19:09:42 -04:00
/**
* Notify the driver that we ' re about to release a BO
*
* @ bo : BO that is about to be released
*
* Gives the driver a chance to do any cleanup , including
* adding fences that may force a delayed delete
*/
void ( * release_notify ) ( struct ttm_buffer_object * bo ) ;
2009-06-10 15:20:19 +02:00
} ;
/**
2009-08-18 16:51:56 +02:00
* struct ttm_bo_global - Buffer object driver global data .
2009-06-10 15:20:19 +02:00
*
* @ dummy_read_page : Pointer to a dummy page used for mapping requests
* of unpopulated pages .
2009-08-18 16:51:56 +02:00
* @ shrink : A shrink callback object used for buffer object swap .
* @ device_list_mutex : Mutex protecting the device list .
* This mutex is held while traversing the device list for pm options .
* @ lru_lock : Spinlock protecting the bo subsystem lru lists .
* @ device_list : List of buffer object devices .
* @ swap_lru : Lru list of buffer objects used for swapping .
*/
2018-10-19 15:06:06 +02:00
extern struct ttm_bo_global {
2009-08-18 16:51:56 +02:00
/**
* Constant after init .
*/
struct kobject kobj ;
struct page * dummy_read_page ;
spinlock_t lru_lock ;
/**
2018-10-19 15:06:06 +02:00
* Protected by ttm_global_mutex .
2009-08-18 16:51:56 +02:00
*/
struct list_head device_list ;
/**
* Protected by the lru_lock .
*/
2017-01-10 14:08:28 +01:00
struct list_head swap_lru [ TTM_MAX_BO_PRIORITY ] ;
2009-08-18 16:51:56 +02:00
/**
* Internal protection .
*/
atomic_t bo_count ;
2018-10-19 15:06:06 +02:00
} ttm_bo_glob ;
2009-08-18 16:51:56 +02:00
# define TTM_NUM_MEM_TYPES 8
/**
* struct ttm_bo_device - Buffer object driver device - specific data .
*
* @ driver : Pointer to a struct ttm_bo_driver struct setup by the driver .
2009-06-10 15:20:19 +02:00
* @ man : An array of mem_type_managers .
2019-09-05 09:05:02 +02:00
* @ vma_manager : Address space manager ( pointer )
2009-06-10 15:20:19 +02:00
* lru_lock : Spinlock that protects the buffer + device lru lists and
* ddestroy lists .
* @ dev_mapping : A pointer to the struct address_space representing the
* device address space .
* @ wq : Work queue structure for the delayed delete workqueue .
2017-12-22 08:12:40 -05:00
* @ no_retry : Don ' t retry allocation if it fails
2009-06-10 15:20:19 +02:00
*
*/
struct ttm_bo_device {
/*
* Constant after bo device init / atomic .
*/
2009-08-18 16:51:56 +02:00
struct list_head device_list ;
2009-06-10 15:20:19 +02:00
struct ttm_bo_driver * driver ;
2009-08-18 16:51:56 +02:00
struct ttm_mem_type_manager man [ TTM_NUM_MEM_TYPES ] ;
2013-07-24 21:08:53 +02:00
2009-06-10 15:20:19 +02:00
/*
2013-07-24 21:08:53 +02:00
* Protected by internal locks .
2009-06-10 15:20:19 +02:00
*/
2019-09-05 09:05:02 +02:00
struct drm_vma_offset_manager * vma_manager ;
2009-06-10 15:20:19 +02:00
/*
2009-08-18 16:51:56 +02:00
* Protected by the global : lru lock .
2009-06-10 15:20:19 +02:00
*/
struct list_head ddestroy ;
/*
* Protected by load / firstopen / lastclose / unload sync .
*/
struct address_space * dev_mapping ;
/*
* Internal protection .
*/
struct delayed_work wq ;
2009-07-10 22:36:26 +10:00
bool need_dma32 ;
2017-12-22 08:12:40 -05:00
bool no_retry ;
2009-06-10 15:20:19 +02:00
} ;
2018-08-06 16:46:26 +08:00
/**
* struct ttm_lru_bulk_move_pos
*
* @ first : first BO in the bulk move range
* @ last : last BO in the bulk move range
*
* Positions for a lru bulk move .
*/
struct ttm_lru_bulk_move_pos {
struct ttm_buffer_object * first ;
struct ttm_buffer_object * last ;
} ;
/**
* struct ttm_lru_bulk_move
*
* @ tt : first / last lru entry for BOs in the TT domain
* @ vram : first / last lru entry for BOs in the VRAM domain
* @ swap : first / last lru entry for BOs on the swap list
*
* Helper structure for bulk moves on the LRU list .
*/
struct ttm_lru_bulk_move {
struct ttm_lru_bulk_move_pos tt [ TTM_MAX_BO_PRIORITY ] ;
struct ttm_lru_bulk_move_pos vram [ TTM_MAX_BO_PRIORITY ] ;
struct ttm_lru_bulk_move_pos swap [ TTM_MAX_BO_PRIORITY ] ;
} ;
2009-06-10 15:20:19 +02:00
/**
* ttm_flag_masked
*
* @ old : Pointer to the result and original value .
* @ new : New value of bits .
* @ mask : Mask of bits to change .
*
* Convenience function to change a number of bits identified by a mask .
*/
static inline uint32_t
ttm_flag_masked ( uint32_t * old , uint32_t new , uint32_t mask )
{
* old ^ = ( * old ^ new ) & mask ;
return * old ;
}
/*
* ttm_bo . c
*/
/**
* ttm_mem_reg_is_pci
*
* @ bdev : Pointer to a struct ttm_bo_device .
* @ mem : A valid struct ttm_mem_reg .
*
* Returns true if the memory described by @ mem is PCI memory ,
* false otherwise .
*/
2017-04-12 15:08:17 +02:00
bool ttm_mem_reg_is_pci ( struct ttm_bo_device * bdev , struct ttm_mem_reg * mem ) ;
2009-06-10 15:20:19 +02:00
/**
* ttm_bo_mem_space
*
* @ bo : Pointer to a struct ttm_buffer_object . the data of which
* we want to allocate space for .
* @ proposed_placement : Proposed new placement for the buffer object .
* @ mem : A struct ttm_mem_reg .
* @ interruptible : Sleep interruptible when sliping .
2010-04-07 10:21:19 +00:00
* @ no_wait_gpu : Return immediately if the GPU is busy .
2009-06-10 15:20:19 +02:00
*
* Allocate memory space for the buffer object pointed to by @ bo , using
* the placement flags in @ mem , potentially evicting other idle buffer objects .
* This function may sleep while waiting for space to become available .
* Returns :
* - EBUSY : No space available ( only if no_wait = = 1 ) .
* - ENOMEM : Could not allocate memory for the buffer object , either due to
* fragmentation or concurrent allocators .
2009-12-07 18:36:18 +01:00
* - ERESTARTSYS : An interruptible sleep was interrupted by a signal .
2009-06-10 15:20:19 +02:00
*/
2017-04-12 15:08:17 +02:00
int ttm_bo_mem_space ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement ,
struct ttm_mem_reg * mem ,
2017-04-12 15:33:00 +02:00
struct ttm_operation_ctx * ctx ) ;
2017-04-12 15:08:17 +02:00
void ttm_bo_mem_put ( struct ttm_buffer_object * bo , struct ttm_mem_reg * mem ) ;
2010-08-04 12:07:08 +10:00
2017-04-12 15:08:17 +02:00
int ttm_bo_device_release ( struct ttm_bo_device * bdev ) ;
2009-06-10 15:20:19 +02:00
/**
* ttm_bo_device_init
*
* @ bdev : A pointer to a struct ttm_bo_device to initialize .
2011-06-08 17:06:15 +00:00
* @ glob : A pointer to an initialized struct ttm_bo_global .
2009-06-10 15:20:19 +02:00
* @ driver : A pointer to a struct ttm_bo_driver set up by the caller .
2013-08-13 19:10:30 +02:00
* @ mapping : The address space to use for this bo .
2019-09-05 09:05:09 +02:00
* @ vma_manager : A pointer to a vma manager .
2009-06-10 15:20:19 +02:00
* @ file_page_offset : Offset into the device address space that is available
* for buffer data . This ensures compatibility with other users of the
* address space .
*
* Initializes a struct ttm_bo_device :
* Returns :
* ! 0 : Failure .
*/
2018-10-19 16:55:26 +02:00
int ttm_bo_device_init ( struct ttm_bo_device * bdev ,
2017-04-12 15:08:17 +02:00
struct ttm_bo_driver * driver ,
struct address_space * mapping ,
2019-09-05 09:05:02 +02:00
struct drm_vma_offset_manager * vma_manager ,
2019-02-07 09:59:29 +01:00
bool need_dma32 ) ;
2009-06-10 15:20:19 +02:00
2009-06-24 09:48:08 +10:00
/**
* ttm_bo_unmap_virtual
*
* @ bo : tear down the virtual mappings for this BO
*/
2017-04-12 15:08:17 +02:00
void ttm_bo_unmap_virtual ( struct ttm_buffer_object * bo ) ;
2009-06-10 15:20:19 +02:00
2010-11-11 09:41:57 +01:00
/**
* ttm_bo_unmap_virtual
*
* @ bo : tear down the virtual mappings for this BO
*
* The caller must take ttm_mem_io_lock before calling this function .
*/
2017-04-12 15:08:17 +02:00
void ttm_bo_unmap_virtual_locked ( struct ttm_buffer_object * bo ) ;
2010-11-11 09:41:57 +01:00
2017-04-12 15:08:17 +02:00
int ttm_mem_io_reserve_vm ( struct ttm_buffer_object * bo ) ;
void ttm_mem_io_free_vm ( struct ttm_buffer_object * bo ) ;
int ttm_mem_io_lock ( struct ttm_mem_type_manager * man , bool interruptible ) ;
void ttm_mem_io_unlock ( struct ttm_mem_type_manager * man ) ;
2010-11-11 09:41:57 +01:00
2013-06-27 13:48:24 +02:00
/**
2020-08-04 12:55:38 +10:00
* ttm_bo_reserve :
2013-06-27 13:48:24 +02:00
*
* @ bo : A pointer to a struct ttm_buffer_object .
* @ interruptible : Sleep interruptible if waiting .
* @ no_wait : Don ' t sleep while trying to reserve , rather return - EBUSY .
2016-04-06 11:12:03 +02:00
* @ ticket : ticket used to acquire the ww_mutex .
2013-06-27 13:48:24 +02:00
*
2020-08-04 12:55:38 +10:00
* Locks a buffer object for validation . ( Or prevents other processes from
* locking it for validation ) , while taking a number of measures to prevent
* deadlocks .
2013-06-27 13:48:24 +02:00
*
* Returns :
* - EDEADLK : The reservation may cause a deadlock .
* Release all buffer reservations , wait for @ bo to become unreserved and
2020-08-04 12:55:38 +10:00
* try again .
2013-06-27 13:48:24 +02:00
* - ERESTARTSYS : A wait for the buffer to become unreserved was interrupted by
* a signal . Release all buffer reservations and return to user - space .
* - EBUSY : The function needed to sleep , but @ no_wait was true
* - EALREADY : Bo already reserved using @ ticket . This error code will only
* be returned if @ use_ticket is set to true .
*/
2020-08-04 12:55:38 +10:00
static inline int ttm_bo_reserve ( struct ttm_buffer_object * bo ,
bool interruptible , bool no_wait ,
struct ww_acquire_ctx * ticket )
2013-06-27 13:48:24 +02:00
{
int ret = 0 ;
if ( no_wait ) {
bool success ;
if ( WARN_ON ( ticket ) )
return - EBUSY ;
2019-08-11 10:06:32 +02:00
success = dma_resv_trylock ( bo - > base . resv ) ;
2013-06-27 13:48:24 +02:00
return success ? 0 : - EBUSY ;
}
if ( interruptible )
2019-08-11 10:06:32 +02:00
ret = dma_resv_lock_interruptible ( bo - > base . resv , ticket ) ;
2013-06-27 13:48:24 +02:00
else
2019-08-11 10:06:32 +02:00
ret = dma_resv_lock ( bo - > base . resv , ticket ) ;
2013-06-27 13:48:24 +02:00
if ( ret = = - EINTR )
return - ERESTARTSYS ;
return ret ;
}
2010-11-11 09:41:57 +01:00
2013-01-15 14:57:05 +01:00
/**
* ttm_bo_reserve_slowpath :
* @ bo : A pointer to a struct ttm_buffer_object .
* @ interruptible : Sleep interruptible if waiting .
* @ sequence : Set ( @ bo ) - > sequence to this value after lock
*
* This is called after ttm_bo_reserve returns - EAGAIN and we backed off
* from all our other reservations . Because there are no other reservations
* held by us , this function cannot deadlock any more .
*/
2013-06-27 13:48:24 +02:00
static inline int ttm_bo_reserve_slowpath ( struct ttm_buffer_object * bo ,
bool interruptible ,
struct ww_acquire_ctx * ticket )
{
2020-08-04 12:55:38 +10:00
if ( interruptible ) {
int ret = dma_resv_lock_slow_interruptible ( bo - > base . resv ,
ticket ) ;
if ( ret = = - EINTR )
ret = - ERESTARTSYS ;
return ret ;
}
dma_resv_lock_slow ( bo - > base . resv , ticket ) ;
return 0 ;
2013-06-27 13:48:24 +02:00
}
2009-06-10 15:20:19 +02:00
2020-08-04 12:55:39 +10:00
static inline void ttm_bo_move_to_lru_tail_unlocked ( struct ttm_buffer_object * bo )
{
spin_lock ( & ttm_bo_glob . lru_lock ) ;
ttm_bo_move_to_lru_tail ( bo , NULL ) ;
spin_unlock ( & ttm_bo_glob . lru_lock ) ;
}
2013-06-27 13:48:17 +02:00
/**
2013-06-27 13:48:24 +02:00
* ttm_bo_unreserve
*
2010-11-17 12:28:30 +00:00
* @ bo : A pointer to a struct ttm_buffer_object .
*
2013-06-27 13:48:24 +02:00
* Unreserve a previous reservation of @ bo .
2010-11-17 12:28:30 +00:00
*/
2013-06-27 13:48:24 +02:00
static inline void ttm_bo_unreserve ( struct ttm_buffer_object * bo )
{
2020-08-04 12:55:39 +10:00
ttm_bo_move_to_lru_tail_unlocked ( bo ) ;
2019-08-11 10:06:32 +02:00
dma_resv_unlock ( bo - > base . resv ) ;
2014-02-20 11:36:25 +01:00
}
2020-08-04 12:55:46 +10:00
/**
* ttm_mem_type_manager_set_used
*
* @ man : A memory manager object .
* @ used : usage state to set .
*
* Set the manager in use flag . If disabled the manager is no longer
* used for object placement .
*/
static inline void ttm_mem_type_manager_set_used ( struct ttm_mem_type_manager * man , bool used )
{
man - > has_type = true ;
man - > use_type = used ;
}
2020-08-04 12:55:58 +10:00
/**
* ttm_mem_type_manager_disable .
*
* @ man : A memory manager object .
*
* Indicate the manager is not to be used and deregistered . ( temporary during rework ) .
*/
static inline void ttm_mem_type_manager_disable ( struct ttm_mem_type_manager * man )
{
man - > has_type = false ;
man - > use_type = false ;
}
/**
* ttm_mem_type_manager_cleanup
*
* @ man : A memory manager object .
*
* Cleanup the move fences from the memory manager object .
*/
static inline void ttm_mem_type_manager_cleanup ( struct ttm_mem_type_manager * man )
{
dma_fence_put ( man - > move ) ;
man - > move = NULL ;
}
2009-06-10 15:20:19 +02:00
/*
* ttm_bo_util . c
*/
2013-01-22 13:56:04 +10:00
int ttm_mem_io_reserve ( struct ttm_bo_device * bdev ,
struct ttm_mem_reg * mem ) ;
void ttm_mem_io_free ( struct ttm_bo_device * bdev ,
struct ttm_mem_reg * mem ) ;
2009-06-10 15:20:19 +02:00
/**
* ttm_bo_move_ttm
*
* @ bo : A pointer to a struct ttm_buffer_object .
2016-08-05 18:36:10 +09:00
* @ interruptible : Sleep interruptible if waiting .
2010-04-07 10:21:19 +00:00
* @ no_wait_gpu : Return immediately if the GPU is busy .
2009-06-10 15:20:19 +02:00
* @ new_mem : struct ttm_mem_reg indicating where to move .
*
* Optimized move function for a buffer object with both old and
* new placement backed by a TTM . The function will , if successful ,
* free any old aperture space , and set ( @ new_mem ) - > mm_node to NULL ,
* and update the ( @ bo ) - > mem placement flags . If unsuccessful , the old
* data remains untouched , and it ' s up to the caller to free the
* memory space indicated by @ new_mem .
* Returns :
* ! 0 : Failure .
*/
2017-04-12 15:08:17 +02:00
int ttm_bo_move_ttm ( struct ttm_buffer_object * bo ,
2017-12-08 20:19:32 +08:00
struct ttm_operation_ctx * ctx ,
2017-04-12 15:08:17 +02:00
struct ttm_mem_reg * new_mem ) ;
2009-06-10 15:20:19 +02:00
/**
* ttm_bo_move_memcpy
*
* @ bo : A pointer to a struct ttm_buffer_object .
2016-06-06 10:17:54 +02:00
* @ interruptible : Sleep interruptible if waiting .
2010-04-07 10:21:19 +00:00
* @ no_wait_gpu : Return immediately if the GPU is busy .
2009-06-10 15:20:19 +02:00
* @ new_mem : struct ttm_mem_reg indicating where to move .
*
* Fallback move function for a mappable buffer object in mappable memory .
* The function will , if successful ,
* free any old aperture space , and set ( @ new_mem ) - > mm_node to NULL ,
* and update the ( @ bo ) - > mem placement flags . If unsuccessful , the old
* data remains untouched , and it ' s up to the caller to free the
* memory space indicated by @ new_mem .
* Returns :
* ! 0 : Failure .
*/
2017-04-12 15:08:17 +02:00
int ttm_bo_move_memcpy ( struct ttm_buffer_object * bo ,
2017-12-08 20:19:32 +08:00
struct ttm_operation_ctx * ctx ,
2017-04-12 15:08:17 +02:00
struct ttm_mem_reg * new_mem ) ;
2009-06-10 15:20:19 +02:00
/**
* ttm_bo_free_old_node
*
* @ bo : A pointer to a struct ttm_buffer_object .
*
* Utility function to free an old placement after a successful move .
*/
2017-04-12 15:08:17 +02:00
void ttm_bo_free_old_node ( struct ttm_buffer_object * bo ) ;
2009-06-10 15:20:19 +02:00
/**
* ttm_bo_move_accel_cleanup .
*
* @ bo : A pointer to a struct ttm_buffer_object .
2014-04-02 17:14:48 +02:00
* @ fence : A fence object that signals when moving is complete .
2009-06-10 15:20:19 +02:00
* @ evict : This is an evict move . Don ' t return until the buffer is idle .
* @ new_mem : struct ttm_mem_reg indicating where to move .
*
* Accelerated move function to be called when an accelerated move
* has been scheduled . The function will create a new temporary buffer object
* representing the old placement , and put the sync object on both buffer
* objects . After that the newly created buffer object is unref ' d to be
* destroyed when the move is complete . This will help pipeline
* buffer moves .
*/
2017-04-12 15:08:17 +02:00
int ttm_bo_move_accel_cleanup ( struct ttm_buffer_object * bo ,
struct dma_fence * fence , bool evict ,
struct ttm_mem_reg * new_mem ) ;
2016-06-15 13:44:03 +02:00
/**
* ttm_bo_pipeline_move .
*
* @ bo : A pointer to a struct ttm_buffer_object .
* @ fence : A fence object that signals when moving is complete .
* @ evict : This is an evict move . Don ' t return until the buffer is idle .
* @ new_mem : struct ttm_mem_reg indicating where to move .
*
* Function for pipelining accelerated moves . Either free the memory
* immediately or hang it on a temporary buffer object .
*/
int ttm_bo_pipeline_move ( struct ttm_buffer_object * bo ,
2016-10-25 13:00:45 +01:00
struct dma_fence * fence , bool evict ,
2016-06-15 13:44:03 +02:00
struct ttm_mem_reg * new_mem ) ;
2018-02-20 15:35:21 +01:00
/**
* ttm_bo_pipeline_gutting .
*
* @ bo : A pointer to a struct ttm_buffer_object .
*
2019-02-01 17:23:26 -08:00
* Pipelined gutting a BO of its backing store .
2018-02-20 15:35:21 +01:00
*/
int ttm_bo_pipeline_gutting ( struct ttm_buffer_object * bo ) ;
2009-06-10 15:20:19 +02:00
/**
* ttm_io_prot
*
* @ c_state : Caching state .
* @ tmp : Page protection flag for a normal , cached mapping .
*
* Utility function that returns the pgprot_t that should be used for
* setting up a PTE with the caching model indicated by @ c_state .
*/
2017-04-12 15:08:17 +02:00
pgprot_t ttm_io_prot ( uint32_t caching_flags , pgprot_t tmp ) ;
2009-06-10 15:20:19 +02:00
2020-08-04 12:55:47 +10:00
/**
* ttm_range_man_init
*
* @ bdev : ttm device
* @ man : the manager to initialise with the range manager .
* @ p_size : size of area to be managed in pages .
*
* Initialise a generic range manager for the selected memory type .
* The range manager is installed for this device in the type slot .
*/
int ttm_range_man_init ( struct ttm_bo_device * bdev ,
struct ttm_mem_type_manager * man ,
unsigned long p_size ) ;
2020-08-04 12:55:41 +10:00
/**
* ttm_mem_type_manager_debug
*
* @ man : manager type to dump .
* @ p : printer to use for debug .
*/
void ttm_mem_type_manager_debug ( struct ttm_mem_type_manager * man ,
struct drm_printer * p ) ;
2009-06-10 15:20:19 +02:00
# endif