2009-06-10 15:20:19 +02:00
/**************************************************************************
*
* Copyright ( c ) 2006 - 2009 Vmware , Inc . , Palo Alto , CA . , USA
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors : Thomas Hellstrom < thellstrom - at - vmware - dot - com >
*/
# ifndef _TTM_BO_DRIVER_H_
# define _TTM_BO_DRIVER_H_
2012-10-02 18:01:25 +01:00
# include <drm/drm_mm.h>
2013-07-24 21:08:53 +02:00
# include <drm/drm_vma_manager.h>
2012-10-02 18:01:25 +01:00
# include <linux/workqueue.h>
# include <linux/fs.h>
# include <linux/spinlock.h>
2019-08-11 10:06:32 +02:00
# include <linux/dma-resv.h>
2009-06-10 15:20:19 +02:00
2020-10-01 14:51:40 +02:00
# include <drm/ttm/ttm_device.h>
2017-04-24 13:50:20 +09:00
# include "ttm_bo_api.h"
2021-06-02 10:38:10 +02:00
# include "ttm_kmap_iter.h"
2017-04-24 13:50:20 +09:00
# include "ttm_placement.h"
2018-02-22 09:23:44 +01:00
# include "ttm_tt.h"
2020-10-24 13:10:28 +02:00
# include "ttm_pool.h"
2017-04-24 13:50:20 +09:00
2018-08-06 16:46:26 +08:00
/**
* struct ttm_lru_bulk_move_pos
*
* @ first : first BO in the bulk move range
* @ last : last BO in the bulk move range
*
* Positions for a lru bulk move .
*/
struct ttm_lru_bulk_move_pos {
struct ttm_buffer_object * first ;
struct ttm_buffer_object * last ;
} ;
/**
* struct ttm_lru_bulk_move
*
* @ tt : first / last lru entry for BOs in the TT domain
* @ vram : first / last lru entry for BOs in the VRAM domain
* @ swap : first / last lru entry for BOs on the swap list
*
* Helper structure for bulk moves on the LRU list .
*/
struct ttm_lru_bulk_move {
struct ttm_lru_bulk_move_pos tt [ TTM_MAX_BO_PRIORITY ] ;
struct ttm_lru_bulk_move_pos vram [ TTM_MAX_BO_PRIORITY ] ;
} ;
2009-06-10 15:20:19 +02:00
/*
* ttm_bo . c
*/
/**
* ttm_bo_mem_space
*
* @ bo : Pointer to a struct ttm_buffer_object . the data of which
* we want to allocate space for .
* @ proposed_placement : Proposed new placement for the buffer object .
2020-08-04 12:56:32 +10:00
* @ mem : A struct ttm_resource .
2009-06-10 15:20:19 +02:00
* @ interruptible : Sleep interruptible when sliping .
2010-04-07 10:21:19 +00:00
* @ no_wait_gpu : Return immediately if the GPU is busy .
2009-06-10 15:20:19 +02:00
*
* Allocate memory space for the buffer object pointed to by @ bo , using
* the placement flags in @ mem , potentially evicting other idle buffer objects .
* This function may sleep while waiting for space to become available .
* Returns :
* - EBUSY : No space available ( only if no_wait = = 1 ) .
* - ENOMEM : Could not allocate memory for the buffer object , either due to
* fragmentation or concurrent allocators .
2009-12-07 18:36:18 +01:00
* - ERESTARTSYS : An interruptible sleep was interrupted by a signal .
2009-06-10 15:20:19 +02:00
*/
2017-04-12 15:08:17 +02:00
int ttm_bo_mem_space ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement ,
2021-04-15 09:52:58 +02:00
struct ttm_resource * * mem ,
2017-04-12 15:33:00 +02:00
struct ttm_operation_ctx * ctx ) ;
2017-04-12 15:08:17 +02:00
2009-06-24 09:48:08 +10:00
/**
* ttm_bo_unmap_virtual
*
* @ bo : tear down the virtual mappings for this BO
*/
2017-04-12 15:08:17 +02:00
void ttm_bo_unmap_virtual ( struct ttm_buffer_object * bo ) ;
2009-06-10 15:20:19 +02:00
2013-06-27 13:48:24 +02:00
/**
2020-08-04 12:55:38 +10:00
* ttm_bo_reserve :
2013-06-27 13:48:24 +02:00
*
* @ bo : A pointer to a struct ttm_buffer_object .
* @ interruptible : Sleep interruptible if waiting .
* @ no_wait : Don ' t sleep while trying to reserve , rather return - EBUSY .
2016-04-06 11:12:03 +02:00
* @ ticket : ticket used to acquire the ww_mutex .
2013-06-27 13:48:24 +02:00
*
2020-08-04 12:55:38 +10:00
* Locks a buffer object for validation . ( Or prevents other processes from
* locking it for validation ) , while taking a number of measures to prevent
* deadlocks .
2013-06-27 13:48:24 +02:00
*
* Returns :
* - EDEADLK : The reservation may cause a deadlock .
* Release all buffer reservations , wait for @ bo to become unreserved and
2020-08-04 12:55:38 +10:00
* try again .
2013-06-27 13:48:24 +02:00
* - ERESTARTSYS : A wait for the buffer to become unreserved was interrupted by
* a signal . Release all buffer reservations and return to user - space .
* - EBUSY : The function needed to sleep , but @ no_wait was true
* - EALREADY : Bo already reserved using @ ticket . This error code will only
* be returned if @ use_ticket is set to true .
*/
2020-08-04 12:55:38 +10:00
static inline int ttm_bo_reserve ( struct ttm_buffer_object * bo ,
bool interruptible , bool no_wait ,
struct ww_acquire_ctx * ticket )
2013-06-27 13:48:24 +02:00
{
int ret = 0 ;
if ( no_wait ) {
bool success ;
if ( WARN_ON ( ticket ) )
return - EBUSY ;
2019-08-11 10:06:32 +02:00
success = dma_resv_trylock ( bo - > base . resv ) ;
2013-06-27 13:48:24 +02:00
return success ? 0 : - EBUSY ;
}
if ( interruptible )
2019-08-11 10:06:32 +02:00
ret = dma_resv_lock_interruptible ( bo - > base . resv , ticket ) ;
2013-06-27 13:48:24 +02:00
else
2019-08-11 10:06:32 +02:00
ret = dma_resv_lock ( bo - > base . resv , ticket ) ;
2013-06-27 13:48:24 +02:00
if ( ret = = - EINTR )
return - ERESTARTSYS ;
return ret ;
}
2010-11-11 09:41:57 +01:00
2013-01-15 14:57:05 +01:00
/**
* ttm_bo_reserve_slowpath :
* @ bo : A pointer to a struct ttm_buffer_object .
* @ interruptible : Sleep interruptible if waiting .
* @ sequence : Set ( @ bo ) - > sequence to this value after lock
*
* This is called after ttm_bo_reserve returns - EAGAIN and we backed off
* from all our other reservations . Because there are no other reservations
* held by us , this function cannot deadlock any more .
*/
2013-06-27 13:48:24 +02:00
static inline int ttm_bo_reserve_slowpath ( struct ttm_buffer_object * bo ,
bool interruptible ,
struct ww_acquire_ctx * ticket )
{
2020-08-04 12:55:38 +10:00
if ( interruptible ) {
int ret = dma_resv_lock_slow_interruptible ( bo - > base . resv ,
ticket ) ;
if ( ret = = - EINTR )
ret = - ERESTARTSYS ;
return ret ;
}
dma_resv_lock_slow ( bo - > base . resv , ticket ) ;
return 0 ;
2013-06-27 13:48:24 +02:00
}
2009-06-10 15:20:19 +02:00
2020-11-27 15:14:34 +01:00
static inline void
ttm_bo_move_to_lru_tail_unlocked ( struct ttm_buffer_object * bo )
2020-08-04 12:55:39 +10:00
{
2020-10-06 17:26:42 +02:00
spin_lock ( & bo - > bdev - > lru_lock ) ;
2021-04-12 15:11:47 +02:00
ttm_bo_move_to_lru_tail ( bo , bo - > resource , NULL ) ;
2020-10-06 17:26:42 +02:00
spin_unlock ( & bo - > bdev - > lru_lock ) ;
2020-08-04 12:55:39 +10:00
}
2020-09-17 14:03:46 +10:00
static inline void ttm_bo_assign_mem ( struct ttm_buffer_object * bo ,
struct ttm_resource * new_mem )
{
2021-04-15 09:52:58 +02:00
WARN_ON ( bo - > resource ) ;
bo - > resource = new_mem ;
2020-09-17 14:03:46 +10:00
}
2020-09-08 06:46:18 +10:00
/**
* ttm_bo_move_null = assign memory for a buffer object .
* @ bo : The bo to assign the memory to
* @ new_mem : The memory to be assigned .
*
* Assign the memory from new_mem to the memory of the buffer object bo .
*/
static inline void ttm_bo_move_null ( struct ttm_buffer_object * bo ,
struct ttm_resource * new_mem )
{
2021-04-15 09:52:58 +02:00
ttm_resource_free ( bo , & bo - > resource ) ;
2020-09-17 14:03:46 +10:00
ttm_bo_assign_mem ( bo , new_mem ) ;
2020-09-08 06:46:18 +10:00
}
2013-06-27 13:48:17 +02:00
/**
2013-06-27 13:48:24 +02:00
* ttm_bo_unreserve
*
2010-11-17 12:28:30 +00:00
* @ bo : A pointer to a struct ttm_buffer_object .
*
2013-06-27 13:48:24 +02:00
* Unreserve a previous reservation of @ bo .
2010-11-17 12:28:30 +00:00
*/
2013-06-27 13:48:24 +02:00
static inline void ttm_bo_unreserve ( struct ttm_buffer_object * bo )
{
2020-08-04 12:55:39 +10:00
ttm_bo_move_to_lru_tail_unlocked ( bo ) ;
2019-08-11 10:06:32 +02:00
dma_resv_unlock ( bo - > base . resv ) ;
2014-02-20 11:36:25 +01:00
}
2009-06-10 15:20:19 +02:00
/*
* ttm_bo_util . c
*/
2020-10-01 14:51:40 +02:00
int ttm_mem_io_reserve ( struct ttm_device * bdev ,
2020-08-04 12:56:32 +10:00
struct ttm_resource * mem ) ;
2020-10-01 14:51:40 +02:00
void ttm_mem_io_free ( struct ttm_device * bdev ,
2020-08-04 12:56:32 +10:00
struct ttm_resource * mem ) ;
2009-06-10 15:20:19 +02:00
/**
* ttm_bo_move_memcpy
*
* @ bo : A pointer to a struct ttm_buffer_object .
2016-06-06 10:17:54 +02:00
* @ interruptible : Sleep interruptible if waiting .
2010-04-07 10:21:19 +00:00
* @ no_wait_gpu : Return immediately if the GPU is busy .
2020-08-04 12:56:32 +10:00
* @ new_mem : struct ttm_resource indicating where to move .
2009-06-10 15:20:19 +02:00
*
* Fallback move function for a mappable buffer object in mappable memory .
* The function will , if successful ,
* free any old aperture space , and set ( @ new_mem ) - > mm_node to NULL ,
* and update the ( @ bo ) - > mem placement flags . If unsuccessful , the old
* data remains untouched , and it ' s up to the caller to free the
* memory space indicated by @ new_mem .
* Returns :
* ! 0 : Failure .
*/
2017-04-12 15:08:17 +02:00
int ttm_bo_move_memcpy ( struct ttm_buffer_object * bo ,
2017-12-08 20:19:32 +08:00
struct ttm_operation_ctx * ctx ,
2020-08-04 12:56:32 +10:00
struct ttm_resource * new_mem ) ;
2009-06-10 15:20:19 +02:00
/**
* ttm_bo_move_accel_cleanup .
*
* @ bo : A pointer to a struct ttm_buffer_object .
2014-04-02 17:14:48 +02:00
* @ fence : A fence object that signals when moving is complete .
2009-06-10 15:20:19 +02:00
* @ evict : This is an evict move . Don ' t return until the buffer is idle .
2020-09-17 16:36:14 +10:00
* @ pipeline : evictions are to be pipelined .
2020-08-04 12:56:32 +10:00
* @ new_mem : struct ttm_resource indicating where to move .
2009-06-10 15:20:19 +02:00
*
* Accelerated move function to be called when an accelerated move
* has been scheduled . The function will create a new temporary buffer object
* representing the old placement , and put the sync object on both buffer
* objects . After that the newly created buffer object is unref ' d to be
* destroyed when the move is complete . This will help pipeline
* buffer moves .
*/
2017-04-12 15:08:17 +02:00
int ttm_bo_move_accel_cleanup ( struct ttm_buffer_object * bo ,
struct dma_fence * fence , bool evict ,
2020-09-17 16:36:14 +10:00
bool pipeline ,
2020-08-04 12:56:32 +10:00
struct ttm_resource * new_mem ) ;
2016-06-15 13:44:03 +02:00
2021-06-02 10:38:10 +02:00
/**
* ttm_bo_move_accel_cleanup .
*
* @ bo : A pointer to a struct ttm_buffer_object .
* @ new_mem : struct ttm_resource indicating where to move .
*
* Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
* by the caller to be idle . Typically used after memcpy buffer moves .
*/
static inline void ttm_bo_move_sync_cleanup ( struct ttm_buffer_object * bo ,
struct ttm_resource * new_mem )
{
int ret = ttm_bo_move_accel_cleanup ( bo , NULL , true , false , new_mem ) ;
WARN_ON ( ret ) ;
}
2018-02-20 15:35:21 +01:00
/**
* ttm_bo_pipeline_gutting .
*
* @ bo : A pointer to a struct ttm_buffer_object .
*
2019-02-01 17:23:26 -08:00
* Pipelined gutting a BO of its backing store .
2018-02-20 15:35:21 +01:00
*/
int ttm_bo_pipeline_gutting ( struct ttm_buffer_object * bo ) ;
2009-06-10 15:20:19 +02:00
/**
* ttm_io_prot
*
2020-09-30 15:56:53 +02:00
* bo : ttm buffer object
* res : ttm resource object
2009-06-10 15:20:19 +02:00
* @ tmp : Page protection flag for a normal , cached mapping .
*
* Utility function that returns the pgprot_t that should be used for
* setting up a PTE with the caching model indicated by @ c_state .
*/
2020-09-30 15:56:53 +02:00
pgprot_t ttm_io_prot ( struct ttm_buffer_object * bo , struct ttm_resource * res ,
pgprot_t tmp ) ;
2009-06-10 15:20:19 +02:00
2020-09-15 11:34:51 +10:00
/**
* ttm_bo_tt_bind
*
* Bind the object tt to a memory resource .
*/
int ttm_bo_tt_bind ( struct ttm_buffer_object * bo , struct ttm_resource * mem ) ;
2020-09-15 11:02:12 +10:00
/**
* ttm_bo_tt_destroy .
*/
void ttm_bo_tt_destroy ( struct ttm_buffer_object * bo ) ;
2021-08-13 16:43:31 +02:00
void ttm_move_memcpy ( bool clear ,
2021-06-02 10:38:10 +02:00
u32 num_pages ,
struct ttm_kmap_iter * dst_iter ,
struct ttm_kmap_iter * src_iter ) ;
struct ttm_kmap_iter *
ttm_kmap_iter_iomap_init ( struct ttm_kmap_iter_iomap * iter_io ,
struct io_mapping * iomap ,
struct sg_table * st ,
resource_size_t start ) ;
2009-06-10 15:20:19 +02:00
# endif