2009-06-10 17:20:19 +04:00
/**************************************************************************
*
* Copyright ( c ) 2006 - 2009 Vmware , Inc . , Palo Alto , CA . , USA
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors : Thomas Hellstrom < thellstrom - at - vmware - dot - com >
*/
# ifndef _TTM_BO_DRIVER_H_
# define _TTM_BO_DRIVER_H_
2012-10-02 21:01:25 +04:00
# include <ttm/ttm_bo_api.h>
# include <ttm/ttm_memory.h>
# include <ttm/ttm_module.h>
2013-06-27 15:48:24 +04:00
# include <ttm/ttm_placement.h>
2012-10-02 21:01:25 +04:00
# include <drm/drm_mm.h>
# include <drm/drm_global.h>
2013-07-24 23:08:53 +04:00
# include <drm/drm_vma_manager.h>
2012-10-02 21:01:25 +04:00
# include <linux/workqueue.h>
# include <linux/fs.h>
# include <linux/spinlock.h>
2013-06-27 15:48:17 +04:00
# include <linux/reservation.h>
2009-06-10 17:20:19 +04:00
struct ttm_backend_func {
/**
* struct ttm_backend_func member bind
*
2011-11-02 04:46:13 +04:00
* @ ttm : Pointer to a struct ttm_tt .
2009-06-10 17:20:19 +04:00
* @ bo_mem : Pointer to a struct ttm_mem_reg describing the
* memory type and location for binding .
*
* Bind the backend pages into the aperture in the location
* indicated by @ bo_mem . This function should be able to handle
2011-06-08 21:06:15 +04:00
* differences between aperture and system page sizes .
2009-06-10 17:20:19 +04:00
*/
2011-11-02 04:46:13 +04:00
int ( * bind ) ( struct ttm_tt * ttm , struct ttm_mem_reg * bo_mem ) ;
2009-06-10 17:20:19 +04:00
/**
* struct ttm_backend_func member unbind
*
2011-11-02 04:46:13 +04:00
* @ ttm : Pointer to a struct ttm_tt .
2009-06-10 17:20:19 +04:00
*
* Unbind previously bound backend pages . This function should be
2011-06-08 21:06:15 +04:00
* able to handle differences between aperture and system page sizes .
2009-06-10 17:20:19 +04:00
*/
2011-11-02 04:46:13 +04:00
int ( * unbind ) ( struct ttm_tt * ttm ) ;
2009-06-10 17:20:19 +04:00
/**
* struct ttm_backend_func member destroy
*
2011-11-02 04:46:13 +04:00
* @ ttm : Pointer to a struct ttm_tt .
2009-06-10 17:20:19 +04:00
*
2011-11-02 04:46:13 +04:00
* Destroy the backend . This will be call back from ttm_tt_destroy so
* don ' t call ttm_tt_destroy from the callback or infinite loop .
2009-06-10 17:20:19 +04:00
*/
2011-11-02 04:46:13 +04:00
void ( * destroy ) ( struct ttm_tt * ttm ) ;
2009-06-10 17:20:19 +04:00
} ;
# define TTM_PAGE_FLAG_WRITE (1 << 3)
# define TTM_PAGE_FLAG_SWAPPED (1 << 4)
2011-04-04 03:25:18 +04:00
# define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
2009-06-10 17:20:19 +04:00
# define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
2009-07-10 16:36:26 +04:00
# define TTM_PAGE_FLAG_DMA32 (1 << 7)
2012-04-02 14:46:06 +04:00
# define TTM_PAGE_FLAG_SG (1 << 8)
2009-06-10 17:20:19 +04:00
enum ttm_caching_state {
tt_uncached ,
tt_wc ,
tt_cached
} ;
/**
* struct ttm_tt
*
2011-11-02 04:46:13 +04:00
* @ bdev : Pointer to a struct ttm_bo_device .
* @ func : Pointer to a struct ttm_backend_func that describes
* the backend methods .
2009-06-10 17:20:19 +04:00
* @ dummy_read_page : Page to map where the ttm_tt page array contains a NULL
* pointer .
* @ pages : Array of pages backing the data .
* @ num_pages : Number of pages in the page array .
* @ bdev : Pointer to the current struct ttm_bo_device .
* @ be : Pointer to the ttm backend .
* @ swap_storage : Pointer to shmem struct file for swap storage .
* @ caching_state : The current caching state of the pages .
* @ state : The current binding state of the pages .
*
* This is a structure holding the pages , caching - and aperture binding
* status for a buffer object that isn ' t backed by fixed ( VRAM / AGP )
* memory .
*/
struct ttm_tt {
2011-11-02 04:46:13 +04:00
struct ttm_bo_device * bdev ;
struct ttm_backend_func * func ;
2009-06-10 17:20:19 +04:00
struct page * dummy_read_page ;
struct page * * pages ;
uint32_t page_flags ;
unsigned long num_pages ;
2012-04-02 14:46:06 +04:00
struct sg_table * sg ; /* for SG objects via dma-buf */
2009-08-18 18:51:56 +04:00
struct ttm_bo_global * glob ;
2009-06-10 17:20:19 +04:00
struct file * swap_storage ;
enum ttm_caching_state caching_state ;
enum {
tt_bound ,
tt_unbound ,
tt_unpopulated ,
} state ;
2011-11-10 02:15:26 +04:00
} ;
/**
* struct ttm_dma_tt
*
* @ ttm : Base ttm_tt struct .
2014-08-04 13:28:54 +04:00
* @ cpu_address : The CPU address of the pages
2011-11-10 02:15:26 +04:00
* @ dma_address : The DMA ( bus ) addresses of the pages
* @ pages_list : used by some page allocation backend
*
* This is a structure holding the pages , caching - and aperture binding
* status for a buffer object that isn ' t backed by fixed ( VRAM / AGP )
* memory .
*/
struct ttm_dma_tt {
struct ttm_tt ttm ;
2014-08-04 13:28:54 +04:00
void * * cpu_address ;
2010-11-29 21:52:18 +03:00
dma_addr_t * dma_address ;
2011-11-10 02:15:26 +04:00
struct list_head pages_list ;
2009-06-10 17:20:19 +04:00
} ;
# define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
# define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
# define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
2010-08-05 04:48:18 +04:00
struct ttm_mem_type_manager ;
struct ttm_mem_type_manager_func {
2010-10-29 12:46:44 +04:00
/**
* struct ttm_mem_type_manager member init
*
* @ man : Pointer to a memory type manager .
* @ p_size : Implementation dependent , but typically the size of the
* range to be managed in pages .
*
* Called to initialize a private range manager . The function is
* expected to initialize the man : : priv member .
* Returns 0 on success , negative error code on failure .
*/
2010-08-05 04:48:18 +04:00
int ( * init ) ( struct ttm_mem_type_manager * man , unsigned long p_size ) ;
2010-10-29 12:46:44 +04:00
/**
* struct ttm_mem_type_manager member takedown
*
* @ man : Pointer to a memory type manager .
*
* Called to undo the setup done in init . All allocated resources
* should be freed .
*/
2010-08-05 04:48:18 +04:00
int ( * takedown ) ( struct ttm_mem_type_manager * man ) ;
2010-10-29 12:46:44 +04:00
/**
* struct ttm_mem_type_manager member get_node
*
* @ man : Pointer to a memory type manager .
* @ bo : Pointer to the buffer object we ' re allocating space for .
* @ placement : Placement details .
2014-07-03 11:02:23 +04:00
* @ flags : Additional placement flags .
2010-10-29 12:46:44 +04:00
* @ mem : Pointer to a struct ttm_mem_reg to be filled in .
*
* This function should allocate space in the memory type managed
* by @ man . Placement details if
* applicable are given by @ placement . If successful ,
* @ mem : : mm_node should be set to a non - null value , and
* @ mem : : start should be set to a value identifying the beginning
* of the range allocated , and the function should return zero .
2011-03-31 05:57:33 +04:00
* If the memory region accommodate the buffer object , @ mem : : mm_node
2010-10-29 12:46:44 +04:00
* should be set to NULL , and the function should return 0.
2011-03-31 05:57:33 +04:00
* If a system error occurred , preventing the request to be fulfilled ,
2010-10-29 12:46:44 +04:00
* the function should return a negative error code .
*
* Note that @ mem : : mm_node will only be dereferenced by
* struct ttm_mem_type_manager functions and optionally by the driver ,
* which has knowledge of the underlying type .
*
* This function may not be called from within atomic context , so
* an implementation can and must use either a mutex or a spinlock to
* protect any data structures managing the space .
*/
2010-08-05 04:48:18 +04:00
int ( * get_node ) ( struct ttm_mem_type_manager * man ,
struct ttm_buffer_object * bo ,
struct ttm_placement * placement ,
2014-07-03 11:02:23 +04:00
uint32_t flags ,
2010-08-05 04:48:18 +04:00
struct ttm_mem_reg * mem ) ;
2010-10-29 12:46:44 +04:00
/**
* struct ttm_mem_type_manager member put_node
*
* @ man : Pointer to a memory type manager .
* @ mem : Pointer to a struct ttm_mem_reg to be filled in .
*
* This function frees memory type resources previously allocated
* and that are identified by @ mem : : mm_node and @ mem : : start . May not
* be called from within atomic context .
*/
2010-08-05 04:48:18 +04:00
void ( * put_node ) ( struct ttm_mem_type_manager * man ,
struct ttm_mem_reg * mem ) ;
2010-10-29 12:46:44 +04:00
/**
* struct ttm_mem_type_manager member debug
*
* @ man : Pointer to a memory type manager .
* @ prefix : Prefix to be used in printout to identify the caller .
*
* This function is called to print out the state of the memory
* type manager to aid debugging of out - of - memory conditions .
* It may not be called from within atomic context .
*/
2010-08-05 04:48:18 +04:00
void ( * debug ) ( struct ttm_mem_type_manager * man , const char * prefix ) ;
} ;
2010-11-11 11:41:57 +03:00
/**
* struct ttm_mem_type_manager
*
* @ has_type : The memory type has been initialized .
* @ use_type : The memory type is enabled .
* @ flags : TTM_MEMTYPE_XX flags identifying the traits of the memory
* managed by this memory type .
* @ gpu_offset : If used , the GPU offset of the first managed page of
* fixed memory or the first managed location in an aperture .
* @ size : Size of the managed region .
* @ available_caching : A mask of available caching types , TTM_PL_FLAG_XX ,
* as defined in ttm_placement_common . h
* @ default_caching : The default caching policy used for a buffer object
* placed in this memory type if the user doesn ' t provide one .
* @ func : structure pointer implementing the range manager . See above
* @ priv : Driver private closure for @ func .
* @ io_reserve_mutex : Mutex optionally protecting shared io_reserve structures
* @ use_io_reserve_lru : Use an lru list to try to unreserve io_mem_regions
* reserved by the TTM vm system .
* @ io_reserve_lru : Optional lru list for unreserving io mem regions .
* @ io_reserve_fastpath : Only use bdev : : driver : : io_mem_reserve to obtain
* static information . bdev : : driver : : io_mem_free is never used .
* @ lru : The lru list for this memory type .
*
* This structure is used to identify and manage memory types for a device .
* It ' s set up by the ttm_bo_driver : : init_mem_type method .
*/
2009-06-10 17:20:19 +04:00
struct ttm_mem_type_manager {
2010-08-05 04:48:18 +04:00
struct ttm_bo_device * bdev ;
2009-06-10 17:20:19 +04:00
/*
* No protection . Constant from start .
*/
bool has_type ;
bool use_type ;
uint32_t flags ;
unsigned long gpu_offset ;
uint64_t size ;
uint32_t available_caching ;
uint32_t default_caching ;
2010-10-29 12:46:44 +04:00
const struct ttm_mem_type_manager_func * func ;
void * priv ;
2010-11-11 11:41:57 +03:00
struct mutex io_reserve_mutex ;
bool use_io_reserve_lru ;
bool io_reserve_fastpath ;
/*
* Protected by @ io_reserve_mutex :
*/
struct list_head io_reserve_lru ;
2009-06-10 17:20:19 +04:00
/*
2010-10-29 12:46:44 +04:00
* Protected by the global - > lru_lock .
2009-06-10 17:20:19 +04:00
*/
2010-10-29 12:46:44 +04:00
2009-06-10 17:20:19 +04:00
struct list_head lru ;
} ;
/**
* struct ttm_bo_driver
*
* @ create_ttm_backend_entry : Callback to create a struct ttm_backend .
* @ invalidate_caches : Callback to invalidate read caches when a buffer object
* has been evicted .
* @ init_mem_type : Callback to initialize a struct ttm_mem_type_manager
* structure .
* @ evict_flags : Callback to obtain placement flags when a buffer is evicted .
* @ move : Callback for a driver to hook in accelerated functions to
* move a buffer .
* If set to NULL , a potentially slow memcpy ( ) move is used .
* @ sync_obj_signaled : See ttm_fence_api . h
* @ sync_obj_wait : See ttm_fence_api . h
* @ sync_obj_flush : See ttm_fence_api . h
* @ sync_obj_unref : See ttm_fence_api . h
* @ sync_obj_ref : See ttm_fence_api . h
*/
struct ttm_bo_driver {
/**
2011-11-02 04:46:13 +04:00
* ttm_tt_create
2009-06-10 17:20:19 +04:00
*
2011-11-02 04:46:13 +04:00
* @ bdev : pointer to a struct ttm_bo_device :
* @ size : Size of the data needed backing .
* @ page_flags : Page flags as identified by TTM_PAGE_FLAG_XX flags .
* @ dummy_read_page : See struct ttm_bo_device .
2009-06-10 17:20:19 +04:00
*
2011-11-02 04:46:13 +04:00
* Create a struct ttm_tt to back data with system memory pages .
* No pages are actually allocated .
* Returns :
* NULL : Out of memory .
2009-06-10 17:20:19 +04:00
*/
2011-11-02 04:46:13 +04:00
struct ttm_tt * ( * ttm_tt_create ) ( struct ttm_bo_device * bdev ,
unsigned long size ,
uint32_t page_flags ,
struct page * dummy_read_page ) ;
2009-06-10 17:20:19 +04:00
2011-11-03 07:59:28 +04:00
/**
* ttm_tt_populate
*
* @ ttm : The struct ttm_tt to contain the backing pages .
*
* Allocate all backing pages
* Returns :
* - ENOMEM : Out of memory .
*/
int ( * ttm_tt_populate ) ( struct ttm_tt * ttm ) ;
/**
* ttm_tt_unpopulate
*
* @ ttm : The struct ttm_tt to contain the backing pages .
*
* Free all backing page
*/
void ( * ttm_tt_unpopulate ) ( struct ttm_tt * ttm ) ;
2009-06-10 17:20:19 +04:00
/**
* struct ttm_bo_driver member invalidate_caches
*
* @ bdev : the buffer object device .
* @ flags : new placement of the rebound buffer object .
*
* A previosly evicted buffer has been rebound in a
* potentially new location . Tell the driver that it might
* consider invalidating read ( texture ) caches on the next command
* submission as a consequence .
*/
int ( * invalidate_caches ) ( struct ttm_bo_device * bdev , uint32_t flags ) ;
int ( * init_mem_type ) ( struct ttm_bo_device * bdev , uint32_t type ,
struct ttm_mem_type_manager * man ) ;
/**
* struct ttm_bo_driver member evict_flags :
*
* @ bo : the buffer object to be evicted
*
* Return the bo flags for a buffer which is not mapped to the hardware .
* These will be placed in proposed_flags so that when the move is
* finished , they ' ll end up in bo - > mem . flags
*/
2009-12-08 17:33:32 +03:00
void ( * evict_flags ) ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement ) ;
2009-06-10 17:20:19 +04:00
/**
* struct ttm_bo_driver member move :
*
* @ bo : the buffer to move
* @ evict : whether this motion is evicting the buffer from
* the graphics address space
* @ interruptible : Use interruptible sleeps if possible when sleeping .
* @ no_wait : whether this should give up and return - EBUSY
* if this move would require sleeping
* @ new_mem : the new memory region receiving the buffer
*
* Move a buffer between two memory regions .
*/
int ( * move ) ( struct ttm_buffer_object * bo ,
bool evict , bool interruptible ,
2012-11-28 15:25:44 +04:00
bool no_wait_gpu ,
2010-04-07 14:21:19 +04:00
struct ttm_mem_reg * new_mem ) ;
2009-06-10 17:20:19 +04:00
/**
* struct ttm_bo_driver_member verify_access
*
* @ bo : Pointer to a buffer object .
* @ filp : Pointer to a struct file trying to access the object .
*
* Called from the map / write / read methods to verify that the
* caller is permitted to access the buffer object .
* This member may be set to NULL , which will refuse this kind of
* access for all buffer objects .
* This function should return 0 if access is granted , - EPERM otherwise .
*/
int ( * verify_access ) ( struct ttm_buffer_object * bo ,
struct file * filp ) ;
/**
* In case a driver writer dislikes the TTM fence objects ,
* the driver writer can replace those with sync objects of
* his / her own . If it turns out that no driver writer is
* using these . I suggest we remove these hooks and plug in
* fences directly . The bo driver needs the following functionality :
* See the corresponding functions in the fence object API
* documentation .
*/
2012-10-12 19:04:00 +04:00
bool ( * sync_obj_signaled ) ( void * sync_obj ) ;
int ( * sync_obj_wait ) ( void * sync_obj ,
2009-06-10 17:20:19 +04:00
bool lazy , bool interruptible ) ;
2012-10-12 19:04:00 +04:00
int ( * sync_obj_flush ) ( void * sync_obj ) ;
2009-06-10 17:20:19 +04:00
void ( * sync_obj_unref ) ( void * * sync_obj ) ;
void * ( * sync_obj_ref ) ( void * sync_obj ) ;
2009-06-24 03:48:08 +04:00
/* hook to notify driver about a driver move so it
* can do tiling things */
void ( * move_notify ) ( struct ttm_buffer_object * bo ,
struct ttm_mem_reg * new_mem ) ;
/* notify the driver we are taking a fault on this BO
* and have reserved it */
2010-04-09 16:39:23 +04:00
int ( * fault_reserve_notify ) ( struct ttm_buffer_object * bo ) ;
2010-01-14 00:28:40 +03:00
/**
* notify the driver that we ' re about to swap out this bo
*/
void ( * swap_notify ) ( struct ttm_buffer_object * bo ) ;
2010-04-09 16:39:23 +04:00
/**
* Driver callback on when mapping io memory ( for bo_move_memcpy
* for instance ) . TTM will take care to call io_mem_free whenever
* the mapping is not use anymore . io_mem_reserve & io_mem_free
* are balanced .
*/
int ( * io_mem_reserve ) ( struct ttm_bo_device * bdev , struct ttm_mem_reg * mem ) ;
void ( * io_mem_free ) ( struct ttm_bo_device * bdev , struct ttm_mem_reg * mem ) ;
2009-06-10 17:20:19 +04:00
} ;
2009-08-18 18:51:56 +04:00
/**
* struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global .
*/
struct ttm_bo_global_ref {
2010-03-09 03:56:52 +03:00
struct drm_global_reference ref ;
2009-08-18 18:51:56 +04:00
struct ttm_mem_global * mem_glob ;
} ;
2009-06-10 17:20:19 +04:00
/**
2009-08-18 18:51:56 +04:00
* struct ttm_bo_global - Buffer object driver global data .
2009-06-10 17:20:19 +04:00
*
* @ mem_glob : Pointer to a struct ttm_mem_global object for accounting .
* @ dummy_read_page : Pointer to a dummy page used for mapping requests
* of unpopulated pages .
2009-08-18 18:51:56 +04:00
* @ shrink : A shrink callback object used for buffer object swap .
* @ device_list_mutex : Mutex protecting the device list .
* This mutex is held while traversing the device list for pm options .
* @ lru_lock : Spinlock protecting the bo subsystem lru lists .
* @ device_list : List of buffer object devices .
* @ swap_lru : Lru list of buffer objects used for swapping .
*/
struct ttm_bo_global {
/**
* Constant after init .
*/
struct kobject kobj ;
struct ttm_mem_global * mem_glob ;
struct page * dummy_read_page ;
struct ttm_mem_shrink shrink ;
struct mutex device_list_mutex ;
spinlock_t lru_lock ;
/**
* Protected by device_list_mutex .
*/
struct list_head device_list ;
/**
* Protected by the lru_lock .
*/
struct list_head swap_lru ;
/**
* Internal protection .
*/
atomic_t bo_count ;
} ;
# define TTM_NUM_MEM_TYPES 8
# define TTM_BO_PRIV_FLAG_MOVING 0 / * Buffer object is moving and needs
idling before CPU mapping */
# define TTM_BO_PRIV_FLAG_MAX 1
/**
* struct ttm_bo_device - Buffer object driver device - specific data .
*
* @ driver : Pointer to a struct ttm_bo_driver struct setup by the driver .
2009-06-10 17:20:19 +04:00
* @ man : An array of mem_type_managers .
2010-11-17 15:28:29 +03:00
* @ fence_lock : Protects the synchronizing members on * all * bos belonging
* to this device .
2013-07-24 23:08:53 +04:00
* @ vma_manager : Address space manager
2009-06-10 17:20:19 +04:00
* lru_lock : Spinlock that protects the buffer + device lru lists and
* ddestroy lists .
2010-11-17 15:28:31 +03:00
* @ val_seq : Current validation sequence .
2009-06-10 17:20:19 +04:00
* @ dev_mapping : A pointer to the struct address_space representing the
* device address space .
* @ wq : Work queue structure for the delayed delete workqueue .
*
*/
struct ttm_bo_device {
/*
* Constant after bo device init / atomic .
*/
2009-08-18 18:51:56 +04:00
struct list_head device_list ;
struct ttm_bo_global * glob ;
2009-06-10 17:20:19 +04:00
struct ttm_bo_driver * driver ;
2009-08-18 18:51:56 +04:00
struct ttm_mem_type_manager man [ TTM_NUM_MEM_TYPES ] ;
2010-11-17 15:28:29 +03:00
spinlock_t fence_lock ;
2013-07-24 23:08:53 +04:00
2009-06-10 17:20:19 +04:00
/*
2013-07-24 23:08:53 +04:00
* Protected by internal locks .
2009-06-10 17:20:19 +04:00
*/
2013-07-24 23:08:53 +04:00
struct drm_vma_offset_manager vma_manager ;
2009-06-10 17:20:19 +04:00
/*
2009-08-18 18:51:56 +04:00
* Protected by the global : lru lock .
2009-06-10 17:20:19 +04:00
*/
struct list_head ddestroy ;
2010-11-17 15:28:31 +03:00
uint32_t val_seq ;
2009-06-10 17:20:19 +04:00
/*
* Protected by load / firstopen / lastclose / unload sync .
*/
struct address_space * dev_mapping ;
/*
* Internal protection .
*/
struct delayed_work wq ;
2009-07-10 16:36:26 +04:00
bool need_dma32 ;
2009-06-10 17:20:19 +04:00
} ;
/**
* ttm_flag_masked
*
* @ old : Pointer to the result and original value .
* @ new : New value of bits .
* @ mask : Mask of bits to change .
*
* Convenience function to change a number of bits identified by a mask .
*/
static inline uint32_t
ttm_flag_masked ( uint32_t * old , uint32_t new , uint32_t mask )
{
* old ^ = ( * old ^ new ) & mask ;
return * old ;
}
/**
2011-11-02 04:46:13 +04:00
* ttm_tt_init
2009-06-10 17:20:19 +04:00
*
2011-11-02 04:46:13 +04:00
* @ ttm : The struct ttm_tt .
2009-06-10 17:20:19 +04:00
* @ bdev : pointer to a struct ttm_bo_device :
* @ size : Size of the data needed backing .
* @ page_flags : Page flags as identified by TTM_PAGE_FLAG_XX flags .
* @ dummy_read_page : See struct ttm_bo_device .
*
* Create a struct ttm_tt to back data with system memory pages .
* No pages are actually allocated .
* Returns :
* NULL : Out of memory .
*/
2011-11-02 04:46:13 +04:00
extern int ttm_tt_init ( struct ttm_tt * ttm , struct ttm_bo_device * bdev ,
unsigned long size , uint32_t page_flags ,
struct page * dummy_read_page ) ;
2011-11-10 02:15:26 +04:00
extern int ttm_dma_tt_init ( struct ttm_dma_tt * ttm_dma , struct ttm_bo_device * bdev ,
unsigned long size , uint32_t page_flags ,
struct page * dummy_read_page ) ;
/**
* ttm_tt_fini
*
* @ ttm : the ttm_tt structure .
*
* Free memory of ttm_tt structure
*/
extern void ttm_tt_fini ( struct ttm_tt * ttm ) ;
extern void ttm_dma_tt_fini ( struct ttm_dma_tt * ttm_dma ) ;
2009-06-10 17:20:19 +04:00
/**
* ttm_ttm_bind :
*
* @ ttm : The struct ttm_tt containing backing pages .
* @ bo_mem : The struct ttm_mem_reg identifying the binding location .
*
* Bind the pages of @ ttm to an aperture location identified by @ bo_mem
*/
extern int ttm_tt_bind ( struct ttm_tt * ttm , struct ttm_mem_reg * bo_mem ) ;
/**
* ttm_ttm_destroy :
*
* @ ttm : The struct ttm_tt .
*
2011-11-02 04:46:13 +04:00
* Unbind , unpopulate and destroy common struct ttm_tt .
2009-06-10 17:20:19 +04:00
*/
extern void ttm_tt_destroy ( struct ttm_tt * ttm ) ;
/**
* ttm_ttm_unbind :
*
* @ ttm : The struct ttm_tt .
*
* Unbind a struct ttm_tt .
*/
extern void ttm_tt_unbind ( struct ttm_tt * ttm ) ;
/**
2011-11-03 07:59:28 +04:00
* ttm_tt_swapin :
2009-06-10 17:20:19 +04:00
*
* @ ttm : The struct ttm_tt .
*
2011-11-03 07:59:28 +04:00
* Swap in a previously swap out ttm_tt .
2009-06-10 17:20:19 +04:00
*/
2011-11-03 07:59:28 +04:00
extern int ttm_tt_swapin ( struct ttm_tt * ttm ) ;
2009-06-10 17:20:19 +04:00
/**
* ttm_tt_set_placement_caching :
*
* @ ttm A struct ttm_tt the backing pages of which will change caching policy .
* @ placement : Flag indicating the desired caching policy .
*
* This function will change caching policy of any default kernel mappings of
* the pages backing @ ttm . If changing from cached to uncached or
* write - combined ,
* all CPU caches will first be flushed to make sure the data of the pages
* hit RAM . This function may be very costly as it involves global TLB
* and cache flushes and potential page splitting / combining .
*/
extern int ttm_tt_set_placement_caching ( struct ttm_tt * ttm , uint32_t placement ) ;
extern int ttm_tt_swapout ( struct ttm_tt * ttm ,
2011-04-04 03:25:18 +04:00
struct file * persistent_swap_storage ) ;
2009-06-10 17:20:19 +04:00
2014-01-03 14:47:23 +04:00
/**
* ttm_tt_unpopulate - free pages from a ttm
*
* @ ttm : Pointer to the ttm_tt structure
*
* Calls the driver method to free all pages from a ttm
*/
extern void ttm_tt_unpopulate ( struct ttm_tt * ttm ) ;
2009-06-10 17:20:19 +04:00
/*
* ttm_bo . c
*/
/**
* ttm_mem_reg_is_pci
*
* @ bdev : Pointer to a struct ttm_bo_device .
* @ mem : A valid struct ttm_mem_reg .
*
* Returns true if the memory described by @ mem is PCI memory ,
* false otherwise .
*/
extern bool ttm_mem_reg_is_pci ( struct ttm_bo_device * bdev ,
struct ttm_mem_reg * mem ) ;
/**
* ttm_bo_mem_space
*
* @ bo : Pointer to a struct ttm_buffer_object . the data of which
* we want to allocate space for .
* @ proposed_placement : Proposed new placement for the buffer object .
* @ mem : A struct ttm_mem_reg .
* @ interruptible : Sleep interruptible when sliping .
2010-04-07 14:21:19 +04:00
* @ no_wait_gpu : Return immediately if the GPU is busy .
2009-06-10 17:20:19 +04:00
*
* Allocate memory space for the buffer object pointed to by @ bo , using
* the placement flags in @ mem , potentially evicting other idle buffer objects .
* This function may sleep while waiting for space to become available .
* Returns :
* - EBUSY : No space available ( only if no_wait = = 1 ) .
* - ENOMEM : Could not allocate memory for the buffer object , either due to
* fragmentation or concurrent allocators .
2009-12-07 20:36:18 +03:00
* - ERESTARTSYS : An interruptible sleep was interrupted by a signal .
2009-06-10 17:20:19 +04:00
*/
extern int ttm_bo_mem_space ( struct ttm_buffer_object * bo ,
2009-12-08 17:33:32 +03:00
struct ttm_placement * placement ,
struct ttm_mem_reg * mem ,
2010-04-07 14:21:19 +04:00
bool interruptible ,
2012-11-28 15:25:44 +04:00
bool no_wait_gpu ) ;
2010-08-04 06:07:08 +04:00
extern void ttm_bo_mem_put ( struct ttm_buffer_object * bo ,
struct ttm_mem_reg * mem ) ;
2010-10-08 02:57:10 +04:00
extern void ttm_bo_mem_put_locked ( struct ttm_buffer_object * bo ,
struct ttm_mem_reg * mem ) ;
2010-08-04 06:07:08 +04:00
2010-03-09 03:56:52 +03:00
extern void ttm_bo_global_release ( struct drm_global_reference * ref ) ;
extern int ttm_bo_global_init ( struct drm_global_reference * ref ) ;
2009-08-18 18:51:56 +04:00
2009-06-10 17:20:19 +04:00
extern int ttm_bo_device_release ( struct ttm_bo_device * bdev ) ;
/**
* ttm_bo_device_init
*
* @ bdev : A pointer to a struct ttm_bo_device to initialize .
2011-06-08 21:06:15 +04:00
* @ glob : A pointer to an initialized struct ttm_bo_global .
2009-06-10 17:20:19 +04:00
* @ driver : A pointer to a struct ttm_bo_driver set up by the caller .
2013-08-13 21:10:30 +04:00
* @ mapping : The address space to use for this bo .
2009-06-10 17:20:19 +04:00
* @ file_page_offset : Offset into the device address space that is available
* for buffer data . This ensures compatibility with other users of the
* address space .
*
* Initializes a struct ttm_bo_device :
* Returns :
* ! 0 : Failure .
*/
extern int ttm_bo_device_init ( struct ttm_bo_device * bdev ,
2009-08-18 18:51:56 +04:00
struct ttm_bo_global * glob ,
2009-06-10 17:20:19 +04:00
struct ttm_bo_driver * driver ,
2013-08-13 21:10:30 +04:00
struct address_space * mapping ,
2009-07-10 16:36:26 +04:00
uint64_t file_page_offset , bool need_dma32 ) ;
2009-06-10 17:20:19 +04:00
2009-06-24 03:48:08 +04:00
/**
* ttm_bo_unmap_virtual
*
* @ bo : tear down the virtual mappings for this BO
*/
extern void ttm_bo_unmap_virtual ( struct ttm_buffer_object * bo ) ;
2009-06-10 17:20:19 +04:00
2010-11-11 11:41:57 +03:00
/**
* ttm_bo_unmap_virtual
*
* @ bo : tear down the virtual mappings for this BO
*
* The caller must take ttm_mem_io_lock before calling this function .
*/
extern void ttm_bo_unmap_virtual_locked ( struct ttm_buffer_object * bo ) ;
extern int ttm_mem_io_reserve_vm ( struct ttm_buffer_object * bo ) ;
extern void ttm_mem_io_free_vm ( struct ttm_buffer_object * bo ) ;
extern int ttm_mem_io_lock ( struct ttm_mem_type_manager * man ,
bool interruptible ) ;
extern void ttm_mem_io_unlock ( struct ttm_mem_type_manager * man ) ;
2013-06-27 15:48:24 +04:00
extern void ttm_bo_del_sub_from_lru ( struct ttm_buffer_object * bo ) ;
extern void ttm_bo_add_to_lru ( struct ttm_buffer_object * bo ) ;
/**
2014-02-20 14:36:25 +04:00
* __ttm_bo_reserve :
2013-06-27 15:48:24 +04:00
*
* @ bo : A pointer to a struct ttm_buffer_object .
* @ interruptible : Sleep interruptible if waiting .
* @ no_wait : Don ' t sleep while trying to reserve , rather return - EBUSY .
* @ use_ticket : If @ bo is already reserved , Only sleep waiting for
* it to become unreserved if @ ticket - > stamp is older .
*
* Will not remove reserved buffers from the lru lists .
* Otherwise identical to ttm_bo_reserve .
*
* Returns :
* - EDEADLK : The reservation may cause a deadlock .
* Release all buffer reservations , wait for @ bo to become unreserved and
* try again . ( only if use_sequence = = 1 ) .
* - ERESTARTSYS : A wait for the buffer to become unreserved was interrupted by
* a signal . Release all buffer reservations and return to user - space .
* - EBUSY : The function needed to sleep , but @ no_wait was true
* - EALREADY : Bo already reserved using @ ticket . This error code will only
* be returned if @ use_ticket is set to true .
*/
2014-02-20 14:36:25 +04:00
static inline int __ttm_bo_reserve ( struct ttm_buffer_object * bo ,
bool interruptible ,
bool no_wait , bool use_ticket ,
struct ww_acquire_ctx * ticket )
2013-06-27 15:48:24 +04:00
{
int ret = 0 ;
if ( no_wait ) {
bool success ;
if ( WARN_ON ( ticket ) )
return - EBUSY ;
success = ww_mutex_trylock ( & bo - > resv - > lock ) ;
return success ? 0 : - EBUSY ;
}
if ( interruptible )
ret = ww_mutex_lock_interruptible ( & bo - > resv - > lock , ticket ) ;
else
ret = ww_mutex_lock ( & bo - > resv - > lock , ticket ) ;
if ( ret = = - EINTR )
return - ERESTARTSYS ;
return ret ;
}
2010-11-11 11:41:57 +03:00
2009-06-10 17:20:19 +04:00
/**
* ttm_bo_reserve :
*
* @ bo : A pointer to a struct ttm_buffer_object .
* @ interruptible : Sleep interruptible if waiting .
* @ no_wait : Don ' t sleep while trying to reserve , rather return - EBUSY .
2013-06-27 15:48:17 +04:00
* @ use_ticket : If @ bo is already reserved , Only sleep waiting for
2013-06-27 15:48:24 +04:00
* it to become unreserved if @ ticket - > stamp is older .
2009-06-10 17:20:19 +04:00
*
* Locks a buffer object for validation . ( Or prevents other processes from
* locking it for validation ) and removes it from lru lists , while taking
* a number of measures to prevent deadlocks .
*
* Deadlocks may occur when two processes try to reserve multiple buffers in
* different order , either by will or as a result of a buffer being evicted
* to make room for a buffer already reserved . ( Buffers are reserved before
* they are evicted ) . The following algorithm prevents such deadlocks from
2011-03-31 05:57:33 +04:00
* occurring :
2013-01-15 17:56:37 +04:00
* Processes attempting to reserve multiple buffers other than for eviction ,
2009-06-10 17:20:19 +04:00
* ( typically execbuf ) , should first obtain a unique 32 - bit
* validation sequence number ,
2013-06-27 15:48:24 +04:00
* and call this function with @ use_ticket = = 1 and @ ticket - > stamp = = the unique
2009-06-10 17:20:19 +04:00
* sequence number . If upon call of this function , the buffer object is already
* reserved , the validation sequence is checked against the validation
* sequence of the process currently reserving the buffer ,
* and if the current validation sequence is greater than that of the process
* holding the reservation , the function returns - EAGAIN . Otherwise it sleeps
* waiting for the buffer to become unreserved , after which it retries
* reserving .
* The caller should , when receiving an - EAGAIN error
* release all its buffer reservations , wait for @ bo to become unreserved , and
* then rerun the validation with the same validation sequence . This procedure
* will always guarantee that the process with the lowest validation sequence
* will eventually succeed , preventing both deadlocks and starvation .
*
* Returns :
2013-06-27 15:48:24 +04:00
* - EDEADLK : The reservation may cause a deadlock .
2009-06-10 17:20:19 +04:00
* Release all buffer reservations , wait for @ bo to become unreserved and
* try again . ( only if use_sequence = = 1 ) .
2009-12-07 20:36:18 +03:00
* - ERESTARTSYS : A wait for the buffer to become unreserved was interrupted by
2009-06-10 17:20:19 +04:00
* a signal . Release all buffer reservations and return to user - space .
2010-11-17 15:28:28 +03:00
* - EBUSY : The function needed to sleep , but @ no_wait was true
2013-06-27 15:48:24 +04:00
* - EALREADY : Bo already reserved using @ ticket . This error code will only
* be returned if @ use_ticket is set to true .
2009-06-10 17:20:19 +04:00
*/
2013-06-27 15:48:24 +04:00
static inline int ttm_bo_reserve ( struct ttm_buffer_object * bo ,
bool interruptible ,
bool no_wait , bool use_ticket ,
struct ww_acquire_ctx * ticket )
{
int ret ;
2009-06-10 17:20:19 +04:00
2013-06-27 15:48:24 +04:00
WARN_ON ( ! atomic_read ( & bo - > kref . refcount ) ) ;
2014-02-20 14:36:25 +04:00
ret = __ttm_bo_reserve ( bo , interruptible , no_wait , use_ticket , ticket ) ;
2013-06-27 15:48:24 +04:00
if ( likely ( ret = = 0 ) )
ttm_bo_del_sub_from_lru ( bo ) ;
2013-01-15 17:57:05 +04:00
2013-06-27 15:48:24 +04:00
return ret ;
}
2013-01-15 17:57:05 +04:00
/**
* ttm_bo_reserve_slowpath :
* @ bo : A pointer to a struct ttm_buffer_object .
* @ interruptible : Sleep interruptible if waiting .
* @ sequence : Set ( @ bo ) - > sequence to this value after lock
*
* This is called after ttm_bo_reserve returns - EAGAIN and we backed off
* from all our other reservations . Because there are no other reservations
* held by us , this function cannot deadlock any more .
*/
2013-06-27 15:48:24 +04:00
static inline int ttm_bo_reserve_slowpath ( struct ttm_buffer_object * bo ,
bool interruptible ,
struct ww_acquire_ctx * ticket )
{
int ret = 0 ;
2010-11-22 06:24:40 +03:00
2013-06-27 15:48:24 +04:00
WARN_ON ( ! atomic_read ( & bo - > kref . refcount ) ) ;
2010-11-22 06:24:40 +03:00
2013-06-27 15:48:24 +04:00
if ( interruptible )
ret = ww_mutex_lock_slow_interruptible ( & bo - > resv - > lock ,
ticket ) ;
else
ww_mutex_lock_slow ( & bo - > resv - > lock , ticket ) ;
if ( likely ( ret = = 0 ) )
ttm_bo_del_sub_from_lru ( bo ) ;
else if ( ret = = - EINTR )
ret = - ERESTARTSYS ;
return ret ;
}
2009-06-10 17:20:19 +04:00
2010-11-17 15:28:30 +03:00
/**
2014-02-20 14:36:25 +04:00
* __ttm_bo_unreserve
2013-06-27 15:48:17 +04:00
* @ bo : A pointer to a struct ttm_buffer_object .
2010-11-17 15:28:30 +03:00
*
2014-02-20 14:36:25 +04:00
* Unreserve a previous reservation of @ bo where the buffer object is
* already on lru lists .
2013-06-27 15:48:17 +04:00
*/
2014-02-20 14:36:25 +04:00
static inline void __ttm_bo_unreserve ( struct ttm_buffer_object * bo )
2013-06-27 15:48:24 +04:00
{
ww_mutex_unlock ( & bo - > resv - > lock ) ;
}
2013-06-27 15:48:17 +04:00
/**
2013-06-27 15:48:24 +04:00
* ttm_bo_unreserve
*
2010-11-17 15:28:30 +03:00
* @ bo : A pointer to a struct ttm_buffer_object .
*
2013-06-27 15:48:24 +04:00
* Unreserve a previous reservation of @ bo .
2010-11-17 15:28:30 +03:00
*/
2013-06-27 15:48:24 +04:00
static inline void ttm_bo_unreserve ( struct ttm_buffer_object * bo )
{
2014-02-20 14:36:25 +04:00
if ( ! ( bo - > mem . placement & TTM_PL_FLAG_NO_EVICT ) ) {
spin_lock ( & bo - > glob - > lru_lock ) ;
ttm_bo_add_to_lru ( bo ) ;
spin_unlock ( & bo - > glob - > lru_lock ) ;
}
__ttm_bo_unreserve ( bo ) ;
}
/**
* ttm_bo_unreserve_ticket
* @ bo : A pointer to a struct ttm_buffer_object .
* @ ticket : ww_acquire_ctx used for reserving
*
* Unreserve a previous reservation of @ bo made with @ ticket .
*/
static inline void ttm_bo_unreserve_ticket ( struct ttm_buffer_object * bo ,
struct ww_acquire_ctx * t )
{
ttm_bo_unreserve ( bo ) ;
2013-06-27 15:48:24 +04:00
}
2010-11-17 15:28:30 +03:00
2009-06-10 17:20:19 +04:00
/*
* ttm_bo_util . c
*/
2013-01-22 07:56:04 +04:00
int ttm_mem_io_reserve ( struct ttm_bo_device * bdev ,
struct ttm_mem_reg * mem ) ;
void ttm_mem_io_free ( struct ttm_bo_device * bdev ,
struct ttm_mem_reg * mem ) ;
2009-06-10 17:20:19 +04:00
/**
* ttm_bo_move_ttm
*
* @ bo : A pointer to a struct ttm_buffer_object .
* @ evict : 1 : This is an eviction . Don ' t try to pipeline .
2010-04-07 14:21:19 +04:00
* @ no_wait_gpu : Return immediately if the GPU is busy .
2009-06-10 17:20:19 +04:00
* @ new_mem : struct ttm_mem_reg indicating where to move .
*
* Optimized move function for a buffer object with both old and
* new placement backed by a TTM . The function will , if successful ,
* free any old aperture space , and set ( @ new_mem ) - > mm_node to NULL ,
* and update the ( @ bo ) - > mem placement flags . If unsuccessful , the old
* data remains untouched , and it ' s up to the caller to free the
* memory space indicated by @ new_mem .
* Returns :
* ! 0 : Failure .
*/
extern int ttm_bo_move_ttm ( struct ttm_buffer_object * bo ,
2012-11-28 15:25:44 +04:00
bool evict , bool no_wait_gpu ,
struct ttm_mem_reg * new_mem ) ;
2009-06-10 17:20:19 +04:00
/**
* ttm_bo_move_memcpy
*
* @ bo : A pointer to a struct ttm_buffer_object .
* @ evict : 1 : This is an eviction . Don ' t try to pipeline .
2010-04-07 14:21:19 +04:00
* @ no_wait_gpu : Return immediately if the GPU is busy .
2009-06-10 17:20:19 +04:00
* @ new_mem : struct ttm_mem_reg indicating where to move .
*
* Fallback move function for a mappable buffer object in mappable memory .
* The function will , if successful ,
* free any old aperture space , and set ( @ new_mem ) - > mm_node to NULL ,
* and update the ( @ bo ) - > mem placement flags . If unsuccessful , the old
* data remains untouched , and it ' s up to the caller to free the
* memory space indicated by @ new_mem .
* Returns :
* ! 0 : Failure .
*/
extern int ttm_bo_move_memcpy ( struct ttm_buffer_object * bo ,
2012-11-28 15:25:44 +04:00
bool evict , bool no_wait_gpu ,
struct ttm_mem_reg * new_mem ) ;
2009-06-10 17:20:19 +04:00
/**
* ttm_bo_free_old_node
*
* @ bo : A pointer to a struct ttm_buffer_object .
*
* Utility function to free an old placement after a successful move .
*/
extern void ttm_bo_free_old_node ( struct ttm_buffer_object * bo ) ;
/**
* ttm_bo_move_accel_cleanup .
*
* @ bo : A pointer to a struct ttm_buffer_object .
* @ sync_obj : A sync object that signals when moving is complete .
* @ evict : This is an evict move . Don ' t return until the buffer is idle .
2010-04-07 14:21:19 +04:00
* @ no_wait_gpu : Return immediately if the GPU is busy .
2009-06-10 17:20:19 +04:00
* @ new_mem : struct ttm_mem_reg indicating where to move .
*
* Accelerated move function to be called when an accelerated move
* has been scheduled . The function will create a new temporary buffer object
* representing the old placement , and put the sync object on both buffer
* objects . After that the newly created buffer object is unref ' d to be
* destroyed when the move is complete . This will help pipeline
* buffer moves .
*/
extern int ttm_bo_move_accel_cleanup ( struct ttm_buffer_object * bo ,
void * sync_obj ,
2012-11-28 15:25:44 +04:00
bool evict , bool no_wait_gpu ,
2009-06-10 17:20:19 +04:00
struct ttm_mem_reg * new_mem ) ;
/**
* ttm_io_prot
*
* @ c_state : Caching state .
* @ tmp : Page protection flag for a normal , cached mapping .
*
* Utility function that returns the pgprot_t that should be used for
* setting up a PTE with the caching model indicated by @ c_state .
*/
2010-02-25 01:29:14 +03:00
extern pgprot_t ttm_io_prot ( uint32_t caching_flags , pgprot_t tmp ) ;
2009-06-10 17:20:19 +04:00
2010-08-05 04:48:18 +04:00
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func ;
2009-06-10 17:20:19 +04:00
# if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
# define TTM_HAS_AGP
# include <linux/agp_backend.h>
/**
2011-11-02 04:46:13 +04:00
* ttm_agp_tt_create
2009-06-10 17:20:19 +04:00
*
* @ bdev : Pointer to a struct ttm_bo_device .
* @ bridge : The agp bridge this device is sitting on .
2011-11-02 04:46:13 +04:00
* @ size : Size of the data needed backing .
* @ page_flags : Page flags as identified by TTM_PAGE_FLAG_XX flags .
* @ dummy_read_page : See struct ttm_bo_device .
*
2009-06-10 17:20:19 +04:00
*
* Create a TTM backend that uses the indicated AGP bridge as an aperture
* for TT memory . This function uses the linux agpgart interface to
* bind and unbind memory backing a ttm_tt .
*/
2011-11-02 04:46:13 +04:00
extern struct ttm_tt * ttm_agp_tt_create ( struct ttm_bo_device * bdev ,
struct agp_bridge_data * bridge ,
unsigned long size , uint32_t page_flags ,
struct page * dummy_read_page ) ;
2012-01-04 02:37:37 +04:00
int ttm_agp_tt_populate ( struct ttm_tt * ttm ) ;
void ttm_agp_tt_unpopulate ( struct ttm_tt * ttm ) ;
2009-06-10 17:20:19 +04:00
# endif
# endif