2009-06-10 17:20:19 +04:00
/**************************************************************************
*
* Copyright ( c ) 2006 - 2009 VMware , Inc . , Palo Alto , CA . , USA
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors : Thomas Hellstrom < thellstrom - at - vmware - dot - com >
*/
# include "ttm/ttm_module.h"
# include "ttm/ttm_bo_driver.h"
# include "ttm/ttm_placement.h"
# include <linux/jiffies.h>
# include <linux/slab.h>
# include <linux/sched.h>
# include <linux/mm.h>
# include <linux/file.h>
# include <linux/module.h>
2010-10-29 12:46:48 +04:00
# include <asm/atomic.h>
2009-06-10 17:20:19 +04:00
# define TTM_ASSERT_LOCKED(param)
# define TTM_DEBUG(fmt, arg...)
# define TTM_BO_HASH_ORDER 13
static int ttm_bo_setup_vm ( struct ttm_buffer_object * bo ) ;
static int ttm_bo_swapout ( struct ttm_mem_shrink * shrink ) ;
2009-08-18 18:51:56 +04:00
static void ttm_bo_global_kobj_release ( struct kobject * kobj ) ;
static struct attribute ttm_bo_count = {
. name = " bo_count " ,
. mode = S_IRUGO
} ;
2009-12-09 23:55:10 +03:00
static inline int ttm_mem_type_from_flags ( uint32_t flags , uint32_t * mem_type )
{
int i ;
for ( i = 0 ; i < = TTM_PL_PRIV5 ; i + + )
if ( flags & ( 1 < < i ) ) {
* mem_type = i ;
return 0 ;
}
return - EINVAL ;
}
2009-12-10 20:07:26 +03:00
static void ttm_mem_type_debug ( struct ttm_bo_device * bdev , int mem_type )
2009-12-09 23:55:10 +03:00
{
2009-12-10 20:07:26 +03:00
struct ttm_mem_type_manager * man = & bdev - > man [ mem_type ] ;
2009-12-09 23:55:10 +03:00
printk ( KERN_ERR TTM_PFX " has_type: %d \n " , man - > has_type ) ;
printk ( KERN_ERR TTM_PFX " use_type: %d \n " , man - > use_type ) ;
printk ( KERN_ERR TTM_PFX " flags: 0x%08X \n " , man - > flags ) ;
printk ( KERN_ERR TTM_PFX " gpu_offset: 0x%08lX \n " , man - > gpu_offset ) ;
2009-12-10 18:15:52 +03:00
printk ( KERN_ERR TTM_PFX " size: %llu \n " , man - > size ) ;
2009-12-09 23:55:10 +03:00
printk ( KERN_ERR TTM_PFX " available_caching: 0x%08X \n " ,
man - > available_caching ) ;
printk ( KERN_ERR TTM_PFX " default_caching: 0x%08X \n " ,
man - > default_caching ) ;
2010-08-05 04:48:18 +04:00
if ( mem_type ! = TTM_PL_SYSTEM )
( * man - > func - > debug ) ( man , TTM_PFX ) ;
2009-12-09 23:55:10 +03:00
}
static void ttm_bo_mem_space_debug ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement )
{
int i , ret , mem_type ;
2009-12-10 18:15:52 +03:00
printk ( KERN_ERR TTM_PFX " No space for %p (%lu pages, %luK, %luM) \n " ,
2009-12-09 23:55:10 +03:00
bo , bo - > mem . num_pages , bo - > mem . size > > 10 ,
bo - > mem . size > > 20 ) ;
for ( i = 0 ; i < placement - > num_placement ; i + + ) {
ret = ttm_mem_type_from_flags ( placement - > placement [ i ] ,
& mem_type ) ;
if ( ret )
return ;
printk ( KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d) \n " ,
i , placement - > placement [ i ] , mem_type ) ;
2009-12-10 20:07:26 +03:00
ttm_mem_type_debug ( bo - > bdev , mem_type ) ;
2009-12-09 23:55:10 +03:00
}
}
2009-08-18 18:51:56 +04:00
static ssize_t ttm_bo_global_show ( struct kobject * kobj ,
struct attribute * attr ,
char * buffer )
{
struct ttm_bo_global * glob =
container_of ( kobj , struct ttm_bo_global , kobj ) ;
return snprintf ( buffer , PAGE_SIZE , " %lu \n " ,
( unsigned long ) atomic_read ( & glob - > bo_count ) ) ;
}
static struct attribute * ttm_bo_global_attrs [ ] = {
& ttm_bo_count ,
NULL
} ;
2010-01-19 04:58:23 +03:00
static const struct sysfs_ops ttm_bo_global_ops = {
2009-08-18 18:51:56 +04:00
. show = & ttm_bo_global_show
} ;
static struct kobj_type ttm_bo_glob_kobj_type = {
. release = & ttm_bo_global_kobj_release ,
. sysfs_ops = & ttm_bo_global_ops ,
. default_attrs = ttm_bo_global_attrs
} ;
2009-06-10 17:20:19 +04:00
static inline uint32_t ttm_bo_type_flags ( unsigned type )
{
return 1 < < ( type ) ;
}
static void ttm_bo_release_list ( struct kref * list_kref )
{
struct ttm_buffer_object * bo =
container_of ( list_kref , struct ttm_buffer_object , list_kref ) ;
struct ttm_bo_device * bdev = bo - > bdev ;
BUG_ON ( atomic_read ( & bo - > list_kref . refcount ) ) ;
BUG_ON ( atomic_read ( & bo - > kref . refcount ) ) ;
BUG_ON ( atomic_read ( & bo - > cpu_writers ) ) ;
BUG_ON ( bo - > sync_obj ! = NULL ) ;
BUG_ON ( bo - > mem . mm_node ! = NULL ) ;
BUG_ON ( ! list_empty ( & bo - > lru ) ) ;
BUG_ON ( ! list_empty ( & bo - > ddestroy ) ) ;
if ( bo - > ttm )
ttm_tt_destroy ( bo - > ttm ) ;
2009-08-18 18:51:56 +04:00
atomic_dec ( & bo - > glob - > bo_count ) ;
2009-06-10 17:20:19 +04:00
if ( bo - > destroy )
bo - > destroy ( bo ) ;
else {
2009-08-18 18:51:56 +04:00
ttm_mem_global_free ( bdev - > glob - > mem_glob , bo - > acc_size ) ;
2009-06-10 17:20:19 +04:00
kfree ( bo ) ;
}
}
int ttm_bo_wait_unreserved ( struct ttm_buffer_object * bo , bool interruptible )
{
if ( interruptible ) {
2010-10-09 16:36:45 +04:00
return wait_event_interruptible ( bo - > event_queue ,
2009-06-10 17:20:19 +04:00
atomic_read ( & bo - > reserved ) = = 0 ) ;
} else {
wait_event ( bo - > event_queue , atomic_read ( & bo - > reserved ) = = 0 ) ;
2010-10-09 16:36:45 +04:00
return 0 ;
2009-06-10 17:20:19 +04:00
}
}
2009-12-11 08:13:00 +03:00
EXPORT_SYMBOL ( ttm_bo_wait_unreserved ) ;
2009-06-10 17:20:19 +04:00
2010-11-22 06:24:40 +03:00
void ttm_bo_add_to_lru ( struct ttm_buffer_object * bo )
2009-06-10 17:20:19 +04:00
{
struct ttm_bo_device * bdev = bo - > bdev ;
struct ttm_mem_type_manager * man ;
BUG_ON ( ! atomic_read ( & bo - > reserved ) ) ;
if ( ! ( bo - > mem . placement & TTM_PL_FLAG_NO_EVICT ) ) {
BUG_ON ( ! list_empty ( & bo - > lru ) ) ;
man = & bdev - > man [ bo - > mem . mem_type ] ;
list_add_tail ( & bo - > lru , & man - > lru ) ;
kref_get ( & bo - > list_kref ) ;
if ( bo - > ttm ! = NULL ) {
2009-08-18 18:51:56 +04:00
list_add_tail ( & bo - > swap , & bo - > glob - > swap_lru ) ;
2009-06-10 17:20:19 +04:00
kref_get ( & bo - > list_kref ) ;
}
}
}
2010-11-22 06:24:40 +03:00
int ttm_bo_del_from_lru ( struct ttm_buffer_object * bo )
2009-06-10 17:20:19 +04:00
{
int put_count = 0 ;
if ( ! list_empty ( & bo - > swap ) ) {
list_del_init ( & bo - > swap ) ;
+ + put_count ;
}
if ( ! list_empty ( & bo - > lru ) ) {
list_del_init ( & bo - > lru ) ;
+ + put_count ;
}
/*
* TODO : Add a driver hook to delete from
* driver - specific LRU ' s here .
*/
return put_count ;
}
int ttm_bo_reserve_locked ( struct ttm_buffer_object * bo ,
bool interruptible ,
bool no_wait , bool use_sequence , uint32_t sequence )
{
2009-08-18 18:51:56 +04:00
struct ttm_bo_global * glob = bo - > glob ;
2009-06-10 17:20:19 +04:00
int ret ;
while ( unlikely ( atomic_cmpxchg ( & bo - > reserved , 0 , 1 ) ! = 0 ) ) {
2010-11-11 12:04:53 +03:00
/**
* Deadlock avoidance for multi - bo reserving .
*/
2010-11-17 15:28:28 +03:00
if ( use_sequence & & bo - > seq_valid ) {
/**
* We ' ve already reserved this one .
*/
if ( unlikely ( sequence = = bo - > val_seq ) )
return - EDEADLK ;
/**
* Already reserved by a thread that will not back
* off for us . We need to back off .
*/
if ( unlikely ( sequence - bo - > val_seq < ( 1 < < 31 ) ) )
return - EAGAIN ;
2009-06-10 17:20:19 +04:00
}
if ( no_wait )
return - EBUSY ;
2009-08-18 18:51:56 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
ret = ttm_bo_wait_unreserved ( bo , interruptible ) ;
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
if ( unlikely ( ret ) )
return ret ;
}
if ( use_sequence ) {
2010-11-11 12:04:53 +03:00
/**
* Wake up waiters that may need to recheck for deadlock ,
* if we decreased the sequence number .
*/
if ( unlikely ( ( bo - > val_seq - sequence < ( 1 < < 31 ) )
| | ! bo - > seq_valid ) )
wake_up_all ( & bo - > event_queue ) ;
2009-06-10 17:20:19 +04:00
bo - > val_seq = sequence ;
bo - > seq_valid = true ;
} else {
bo - > seq_valid = false ;
}
return 0 ;
}
EXPORT_SYMBOL ( ttm_bo_reserve ) ;
static void ttm_bo_ref_bug ( struct kref * list_kref )
{
BUG ( ) ;
}
2010-11-22 06:24:40 +03:00
void ttm_bo_list_ref_sub ( struct ttm_buffer_object * bo , int count ,
bool never_free )
{
2010-11-16 17:21:08 +03:00
kref_sub ( & bo - > list_kref , count ,
( never_free ) ? ttm_bo_ref_bug : ttm_bo_release_list ) ;
2010-11-22 06:24:40 +03:00
}
2009-06-10 17:20:19 +04:00
int ttm_bo_reserve ( struct ttm_buffer_object * bo ,
bool interruptible ,
bool no_wait , bool use_sequence , uint32_t sequence )
{
2009-08-18 18:51:56 +04:00
struct ttm_bo_global * glob = bo - > glob ;
2009-06-10 17:20:19 +04:00
int put_count = 0 ;
int ret ;
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
ret = ttm_bo_reserve_locked ( bo , interruptible , no_wait , use_sequence ,
sequence ) ;
if ( likely ( ret = = 0 ) )
put_count = ttm_bo_del_from_lru ( bo ) ;
2009-08-18 18:51:56 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
2010-11-22 06:24:40 +03:00
ttm_bo_list_ref_sub ( bo , put_count , true ) ;
2009-06-10 17:20:19 +04:00
return ret ;
}
2010-11-17 15:28:30 +03:00
void ttm_bo_unreserve_locked ( struct ttm_buffer_object * bo )
{
ttm_bo_add_to_lru ( bo ) ;
atomic_set ( & bo - > reserved , 0 ) ;
wake_up_all ( & bo - > event_queue ) ;
}
2009-06-10 17:20:19 +04:00
void ttm_bo_unreserve ( struct ttm_buffer_object * bo )
{
2009-08-18 18:51:56 +04:00
struct ttm_bo_global * glob = bo - > glob ;
2009-06-10 17:20:19 +04:00
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2010-11-17 15:28:30 +03:00
ttm_bo_unreserve_locked ( bo ) ;
2009-08-18 18:51:56 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
}
EXPORT_SYMBOL ( ttm_bo_unreserve ) ;
/*
* Call bo - > mutex locked .
*/
static int ttm_bo_add_ttm ( struct ttm_buffer_object * bo , bool zero_alloc )
{
struct ttm_bo_device * bdev = bo - > bdev ;
2009-08-18 18:51:56 +04:00
struct ttm_bo_global * glob = bo - > glob ;
2009-06-10 17:20:19 +04:00
int ret = 0 ;
uint32_t page_flags = 0 ;
TTM_ASSERT_LOCKED ( & bo - > mutex ) ;
bo - > ttm = NULL ;
2009-07-10 16:36:26 +04:00
if ( bdev - > need_dma32 )
page_flags | = TTM_PAGE_FLAG_DMA32 ;
2009-06-10 17:20:19 +04:00
switch ( bo - > type ) {
case ttm_bo_type_device :
if ( zero_alloc )
page_flags | = TTM_PAGE_FLAG_ZERO_ALLOC ;
case ttm_bo_type_kernel :
bo - > ttm = ttm_tt_create ( bdev , bo - > num_pages < < PAGE_SHIFT ,
2009-08-18 18:51:56 +04:00
page_flags , glob - > dummy_read_page ) ;
2009-06-10 17:20:19 +04:00
if ( unlikely ( bo - > ttm = = NULL ) )
ret = - ENOMEM ;
break ;
case ttm_bo_type_user :
bo - > ttm = ttm_tt_create ( bdev , bo - > num_pages < < PAGE_SHIFT ,
page_flags | TTM_PAGE_FLAG_USER ,
2009-08-18 18:51:56 +04:00
glob - > dummy_read_page ) ;
2009-12-08 02:25:45 +03:00
if ( unlikely ( bo - > ttm = = NULL ) ) {
2009-06-10 17:20:19 +04:00
ret = - ENOMEM ;
2009-12-08 02:25:45 +03:00
break ;
}
2009-06-10 17:20:19 +04:00
ret = ttm_tt_set_user ( bo - > ttm , current ,
bo - > buffer_start , bo - > num_pages ) ;
if ( unlikely ( ret ! = 0 ) )
ttm_tt_destroy ( bo - > ttm ) ;
break ;
default :
printk ( KERN_ERR TTM_PFX " Illegal buffer object type \n " ) ;
ret = - EINVAL ;
break ;
}
return ret ;
}
static int ttm_bo_handle_move_mem ( struct ttm_buffer_object * bo ,
struct ttm_mem_reg * mem ,
2010-04-07 14:21:19 +04:00
bool evict , bool interruptible ,
bool no_wait_reserve , bool no_wait_gpu )
2009-06-10 17:20:19 +04:00
{
struct ttm_bo_device * bdev = bo - > bdev ;
bool old_is_pci = ttm_mem_reg_is_pci ( bdev , & bo - > mem ) ;
bool new_is_pci = ttm_mem_reg_is_pci ( bdev , mem ) ;
struct ttm_mem_type_manager * old_man = & bdev - > man [ bo - > mem . mem_type ] ;
struct ttm_mem_type_manager * new_man = & bdev - > man [ mem - > mem_type ] ;
int ret = 0 ;
if ( old_is_pci | | new_is_pci | |
( ( mem - > placement & bo - > mem . placement & TTM_PL_MASK_CACHING ) = = 0 ) )
ttm_bo_unmap_virtual ( bo ) ;
/*
* Create and bind a ttm if required .
*/
if ( ! ( new_man - > flags & TTM_MEMTYPE_FLAG_FIXED ) & & ( bo - > ttm = = NULL ) ) {
ret = ttm_bo_add_ttm ( bo , false ) ;
if ( ret )
goto out_err ;
ret = ttm_tt_set_placement_caching ( bo - > ttm , mem - > placement ) ;
if ( ret )
2009-06-17 14:29:57 +04:00
goto out_err ;
2009-06-10 17:20:19 +04:00
if ( mem - > mem_type ! = TTM_PL_SYSTEM ) {
ret = ttm_tt_bind ( bo - > ttm , mem ) ;
if ( ret )
goto out_err ;
}
if ( bo - > mem . mem_type = = TTM_PL_SYSTEM ) {
2009-12-08 17:33:32 +03:00
bo - > mem = * mem ;
2009-06-10 17:20:19 +04:00
mem - > mm_node = NULL ;
goto moved ;
}
}
2009-06-24 03:48:08 +04:00
if ( bdev - > driver - > move_notify )
bdev - > driver - > move_notify ( bo , mem ) ;
2009-06-10 17:20:19 +04:00
if ( ! ( old_man - > flags & TTM_MEMTYPE_FLAG_FIXED ) & &
! ( new_man - > flags & TTM_MEMTYPE_FLAG_FIXED ) )
2010-04-07 14:21:19 +04:00
ret = ttm_bo_move_ttm ( bo , evict , no_wait_reserve , no_wait_gpu , mem ) ;
2009-06-10 17:20:19 +04:00
else if ( bdev - > driver - > move )
ret = bdev - > driver - > move ( bo , evict , interruptible ,
2010-04-07 14:21:19 +04:00
no_wait_reserve , no_wait_gpu , mem ) ;
2009-06-10 17:20:19 +04:00
else
2010-04-07 14:21:19 +04:00
ret = ttm_bo_move_memcpy ( bo , evict , no_wait_reserve , no_wait_gpu , mem ) ;
2009-06-10 17:20:19 +04:00
if ( ret )
goto out_err ;
moved :
if ( bo - > evicted ) {
ret = bdev - > driver - > invalidate_caches ( bdev , bo - > mem . placement ) ;
if ( ret )
printk ( KERN_ERR TTM_PFX " Can not flush read caches \n " ) ;
bo - > evicted = false ;
}
if ( bo - > mem . mm_node ) {
2010-08-05 04:48:18 +04:00
bo - > offset = ( bo - > mem . start < < PAGE_SHIFT ) +
2009-06-10 17:20:19 +04:00
bdev - > man [ bo - > mem . mem_type ] . gpu_offset ;
bo - > cur_placement = bo - > mem . placement ;
2010-01-14 00:28:45 +03:00
} else
bo - > offset = 0 ;
2009-06-10 17:20:19 +04:00
return 0 ;
out_err :
new_man = & bdev - > man [ bo - > mem . mem_type ] ;
if ( ( new_man - > flags & TTM_MEMTYPE_FLAG_FIXED ) & & bo - > ttm ) {
ttm_tt_unbind ( bo - > ttm ) ;
ttm_tt_destroy ( bo - > ttm ) ;
bo - > ttm = NULL ;
}
return ret ;
}
2010-09-30 14:36:45 +04:00
/**
2010-10-19 11:01:00 +04:00
* Call bo : : reserved .
2010-09-30 14:36:45 +04:00
* Will release GPU memory type usage on destruction .
2010-10-19 11:01:00 +04:00
* This is the place to put in driver specific hooks to release
* driver private resources .
* Will release the bo : : reserved lock .
2010-09-30 14:36:45 +04:00
*/
static void ttm_bo_cleanup_memtype_use ( struct ttm_buffer_object * bo )
{
if ( bo - > ttm ) {
ttm_tt_unbind ( bo - > ttm ) ;
ttm_tt_destroy ( bo - > ttm ) ;
bo - > ttm = NULL ;
}
2010-10-19 11:01:00 +04:00
ttm_bo_mem_put ( bo , & bo - > mem ) ;
2010-09-30 14:36:45 +04:00
atomic_set ( & bo - > reserved , 0 ) ;
2010-10-29 12:46:48 +04:00
/*
* Make processes trying to reserve really pick it up .
*/
smp_mb__after_atomic_dec ( ) ;
2010-09-30 14:36:45 +04:00
wake_up_all ( & bo - > event_queue ) ;
}
2010-10-19 11:01:01 +04:00
static void ttm_bo_cleanup_refs_or_queue ( struct ttm_buffer_object * bo )
2009-06-10 17:20:19 +04:00
{
struct ttm_bo_device * bdev = bo - > bdev ;
2009-08-18 18:51:56 +04:00
struct ttm_bo_global * glob = bo - > glob ;
2010-10-19 11:01:01 +04:00
struct ttm_bo_driver * driver ;
2010-11-02 16:21:47 +03:00
void * sync_obj = NULL ;
2010-10-19 11:01:01 +04:00
void * sync_obj_arg ;
int put_count ;
2009-06-10 17:20:19 +04:00
int ret ;
2010-11-17 15:28:29 +03:00
spin_lock ( & bdev - > fence_lock ) ;
2010-10-19 11:01:01 +04:00
( void ) ttm_bo_wait ( bo , false , false , true ) ;
2009-06-10 17:20:19 +04:00
if ( ! bo - > sync_obj ) {
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2009-12-02 20:33:45 +03:00
2010-09-30 14:36:45 +04:00
/**
2010-11-17 15:28:29 +03:00
* Lock inversion between bo : reserve and bdev : : fence_lock here ,
2010-10-19 11:01:01 +04:00
* but that ' s OK , since we ' re only trylocking .
2010-09-30 14:36:45 +04:00
*/
2010-10-19 11:01:01 +04:00
ret = ttm_bo_reserve_locked ( bo , false , true , false , 0 ) ;
2010-09-30 14:36:45 +04:00
2010-10-19 11:01:01 +04:00
if ( unlikely ( ret = = - EBUSY ) )
goto queue ;
2010-09-30 14:36:45 +04:00
2010-11-17 15:28:29 +03:00
spin_unlock ( & bdev - > fence_lock ) ;
2010-09-30 14:36:45 +04:00
put_count = ttm_bo_del_from_lru ( bo ) ;
2009-06-10 17:20:19 +04:00
2010-10-19 11:01:00 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2010-09-30 14:36:45 +04:00
ttm_bo_cleanup_memtype_use ( bo ) ;
2009-06-10 17:20:19 +04:00
2010-11-22 06:24:40 +03:00
ttm_bo_list_ref_sub ( bo , put_count , true ) ;
2009-06-10 17:20:19 +04:00
2010-10-19 11:01:01 +04:00
return ;
} else {
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
}
2010-10-19 11:01:01 +04:00
queue :
driver = bdev - > driver ;
2010-11-02 16:21:47 +03:00
if ( bo - > sync_obj )
sync_obj = driver - > sync_obj_ref ( bo - > sync_obj ) ;
sync_obj_arg = bo - > sync_obj_arg ;
2010-10-19 11:01:01 +04:00
kref_get ( & bo - > list_kref ) ;
list_add_tail ( & bo - > ddestroy , & bdev - > ddestroy ) ;
spin_unlock ( & glob - > lru_lock ) ;
2010-11-17 15:28:29 +03:00
spin_unlock ( & bdev - > fence_lock ) ;
2010-10-19 11:01:01 +04:00
2010-11-02 16:21:47 +03:00
if ( sync_obj ) {
2010-10-19 11:01:01 +04:00
driver - > sync_obj_flush ( sync_obj , sync_obj_arg ) ;
2010-11-02 16:21:47 +03:00
driver - > sync_obj_unref ( & sync_obj ) ;
}
2010-10-19 11:01:01 +04:00
schedule_delayed_work ( & bdev - > wq ,
( ( HZ / 100 ) < 1 ) ? 1 : HZ / 100 ) ;
}
/**
* function ttm_bo_cleanup_refs
* If bo idle , remove from delayed - and lru lists , and unref .
* If not idle , do nothing .
*
* @ interruptible Any sleeps should occur interruptibly .
* @ no_wait_reserve Never wait for reserve . Return - EBUSY instead .
* @ no_wait_gpu Never wait for gpu . Return - EBUSY instead .
*/
static int ttm_bo_cleanup_refs ( struct ttm_buffer_object * bo ,
bool interruptible ,
bool no_wait_reserve ,
bool no_wait_gpu )
{
2010-11-17 15:28:29 +03:00
struct ttm_bo_device * bdev = bo - > bdev ;
2010-10-19 11:01:01 +04:00
struct ttm_bo_global * glob = bo - > glob ;
int put_count ;
int ret = 0 ;
retry :
2010-11-17 15:28:29 +03:00
spin_lock ( & bdev - > fence_lock ) ;
2010-10-19 11:01:01 +04:00
ret = ttm_bo_wait ( bo , false , interruptible , no_wait_gpu ) ;
2010-11-17 15:28:29 +03:00
spin_unlock ( & bdev - > fence_lock ) ;
2010-10-19 11:01:01 +04:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2010-10-19 11:01:01 +04:00
ret = ttm_bo_reserve_locked ( bo , interruptible ,
no_wait_reserve , false , 0 ) ;
2009-06-10 17:20:19 +04:00
2010-10-19 11:01:01 +04:00
if ( unlikely ( ret ! = 0 ) | | list_empty ( & bo - > ddestroy ) ) {
2009-08-18 18:51:56 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2010-10-19 11:01:01 +04:00
return ret ;
}
2009-06-10 17:20:19 +04:00
2010-10-19 11:01:01 +04:00
/**
* We can re - check for sync object without taking
* the bo : : lock since setting the sync object requires
* also bo : : reserved . A busy object at this point may
* be caused by another thread recently starting an accelerated
* eviction .
*/
2009-06-10 17:20:19 +04:00
2010-10-19 11:01:01 +04:00
if ( unlikely ( bo - > sync_obj ) ) {
atomic_set ( & bo - > reserved , 0 ) ;
wake_up_all ( & bo - > event_queue ) ;
2009-08-18 18:51:56 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2010-10-19 11:01:01 +04:00
goto retry ;
2009-06-10 17:20:19 +04:00
}
2010-10-19 11:01:01 +04:00
put_count = ttm_bo_del_from_lru ( bo ) ;
list_del_init ( & bo - > ddestroy ) ;
+ + put_count ;
spin_unlock ( & glob - > lru_lock ) ;
ttm_bo_cleanup_memtype_use ( bo ) ;
2010-11-22 06:24:40 +03:00
ttm_bo_list_ref_sub ( bo , put_count , true ) ;
2010-10-19 11:01:01 +04:00
return 0 ;
2009-06-10 17:20:19 +04:00
}
/**
* Traverse the delayed list , and call ttm_bo_cleanup_refs on all
* encountered buffers .
*/
static int ttm_bo_delayed_delete ( struct ttm_bo_device * bdev , bool remove_all )
{
2009-08-18 18:51:56 +04:00
struct ttm_bo_global * glob = bdev - > glob ;
2010-01-20 22:01:30 +03:00
struct ttm_buffer_object * entry = NULL ;
int ret = 0 ;
2009-06-10 17:20:19 +04:00
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2010-01-20 22:01:30 +03:00
if ( list_empty ( & bdev - > ddestroy ) )
goto out_unlock ;
entry = list_first_entry ( & bdev - > ddestroy ,
struct ttm_buffer_object , ddestroy ) ;
kref_get ( & entry - > list_kref ) ;
for ( ; ; ) {
struct ttm_buffer_object * nentry = NULL ;
if ( entry - > ddestroy . next ! = & bdev - > ddestroy ) {
nentry = list_first_entry ( & entry - > ddestroy ,
struct ttm_buffer_object , ddestroy ) ;
2009-06-10 17:20:19 +04:00
kref_get ( & nentry - > list_kref ) ;
}
2009-08-18 18:51:56 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2010-10-19 11:01:01 +04:00
ret = ttm_bo_cleanup_refs ( entry , false , ! remove_all ,
! remove_all ) ;
2009-06-10 17:20:19 +04:00
kref_put ( & entry - > list_kref , ttm_bo_release_list ) ;
2010-01-20 22:01:30 +03:00
entry = nentry ;
if ( ret | | ! entry )
goto out ;
2009-06-10 17:20:19 +04:00
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2010-01-20 22:01:30 +03:00
if ( list_empty ( & entry - > ddestroy ) )
2009-06-10 17:20:19 +04:00
break ;
}
2010-01-20 22:01:30 +03:00
out_unlock :
spin_unlock ( & glob - > lru_lock ) ;
out :
if ( entry )
kref_put ( & entry - > list_kref , ttm_bo_release_list ) ;
2009-06-10 17:20:19 +04:00
return ret ;
}
static void ttm_bo_delayed_workqueue ( struct work_struct * work )
{
struct ttm_bo_device * bdev =
container_of ( work , struct ttm_bo_device , wq . work ) ;
if ( ttm_bo_delayed_delete ( bdev , false ) ) {
schedule_delayed_work ( & bdev - > wq ,
( ( HZ / 100 ) < 1 ) ? 1 : HZ / 100 ) ;
}
}
static void ttm_bo_release ( struct kref * kref )
{
struct ttm_buffer_object * bo =
container_of ( kref , struct ttm_buffer_object , kref ) ;
struct ttm_bo_device * bdev = bo - > bdev ;
if ( likely ( bo - > vm_node ! = NULL ) ) {
rb_erase ( & bo - > vm_rb , & bdev - > addr_space_rb ) ;
drm_mm_put_block ( bo - > vm_node ) ;
bo - > vm_node = NULL ;
}
write_unlock ( & bdev - > vm_lock ) ;
2010-10-19 11:01:01 +04:00
ttm_bo_cleanup_refs_or_queue ( bo ) ;
2009-06-10 17:20:19 +04:00
kref_put ( & bo - > list_kref , ttm_bo_release_list ) ;
write_lock ( & bdev - > vm_lock ) ;
}
void ttm_bo_unref ( struct ttm_buffer_object * * p_bo )
{
struct ttm_buffer_object * bo = * p_bo ;
struct ttm_bo_device * bdev = bo - > bdev ;
* p_bo = NULL ;
write_lock ( & bdev - > vm_lock ) ;
kref_put ( & bo - > kref , ttm_bo_release ) ;
write_unlock ( & bdev - > vm_lock ) ;
}
EXPORT_SYMBOL ( ttm_bo_unref ) ;
2010-04-27 00:00:09 +04:00
int ttm_bo_lock_delayed_workqueue ( struct ttm_bo_device * bdev )
{
return cancel_delayed_work_sync ( & bdev - > wq ) ;
}
EXPORT_SYMBOL ( ttm_bo_lock_delayed_workqueue ) ;
void ttm_bo_unlock_delayed_workqueue ( struct ttm_bo_device * bdev , int resched )
{
if ( resched )
schedule_delayed_work ( & bdev - > wq ,
( ( HZ / 100 ) < 1 ) ? 1 : HZ / 100 ) ;
}
EXPORT_SYMBOL ( ttm_bo_unlock_delayed_workqueue ) ;
2009-12-08 17:33:32 +03:00
static int ttm_bo_evict ( struct ttm_buffer_object * bo , bool interruptible ,
2010-04-07 14:21:19 +04:00
bool no_wait_reserve , bool no_wait_gpu )
2009-06-10 17:20:19 +04:00
{
struct ttm_bo_device * bdev = bo - > bdev ;
struct ttm_mem_reg evict_mem ;
2009-12-08 17:33:32 +03:00
struct ttm_placement placement ;
int ret = 0 ;
2009-06-10 17:20:19 +04:00
2010-11-17 15:28:29 +03:00
spin_lock ( & bdev - > fence_lock ) ;
2010-04-07 14:21:19 +04:00
ret = ttm_bo_wait ( bo , false , interruptible , no_wait_gpu ) ;
2010-11-17 15:28:29 +03:00
spin_unlock ( & bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
2009-06-17 14:29:55 +04:00
if ( unlikely ( ret ! = 0 ) ) {
2009-12-07 20:36:18 +03:00
if ( ret ! = - ERESTARTSYS ) {
2009-06-17 14:29:55 +04:00
printk ( KERN_ERR TTM_PFX
" Failed to expire sync object before "
" buffer eviction. \n " ) ;
}
2009-06-10 17:20:19 +04:00
goto out ;
}
BUG_ON ( ! atomic_read ( & bo - > reserved ) ) ;
evict_mem = bo - > mem ;
evict_mem . mm_node = NULL ;
2010-04-09 16:39:23 +04:00
evict_mem . bus . io_reserved = false ;
2009-06-10 17:20:19 +04:00
2009-12-10 00:14:27 +03:00
placement . fpfn = 0 ;
placement . lpfn = 0 ;
placement . num_placement = 0 ;
placement . num_busy_placement = 0 ;
2009-12-08 17:33:32 +03:00
bdev - > driver - > evict_flags ( bo , & placement ) ;
ret = ttm_bo_mem_space ( bo , & placement , & evict_mem , interruptible ,
2010-04-07 14:21:19 +04:00
no_wait_reserve , no_wait_gpu ) ;
2009-06-10 17:20:19 +04:00
if ( ret ) {
2009-12-09 23:55:10 +03:00
if ( ret ! = - ERESTARTSYS ) {
2009-06-10 17:20:19 +04:00
printk ( KERN_ERR TTM_PFX
" Failed to find memory space for "
" buffer 0x%p eviction. \n " , bo ) ;
2009-12-09 23:55:10 +03:00
ttm_bo_mem_space_debug ( bo , & placement ) ;
}
2009-06-10 17:20:19 +04:00
goto out ;
}
ret = ttm_bo_handle_move_mem ( bo , & evict_mem , true , interruptible ,
2010-04-07 14:21:19 +04:00
no_wait_reserve , no_wait_gpu ) ;
2009-06-10 17:20:19 +04:00
if ( ret ) {
2009-12-07 20:36:18 +03:00
if ( ret ! = - ERESTARTSYS )
2009-06-10 17:20:19 +04:00
printk ( KERN_ERR TTM_PFX " Buffer eviction failed \n " ) ;
2010-08-04 06:07:08 +04:00
ttm_bo_mem_put ( bo , & evict_mem ) ;
2009-06-10 17:20:19 +04:00
goto out ;
}
2009-12-08 17:33:32 +03:00
bo - > evicted = true ;
out :
return ret ;
}
static int ttm_mem_evict_first ( struct ttm_bo_device * bdev ,
uint32_t mem_type ,
2010-04-07 14:21:19 +04:00
bool interruptible , bool no_wait_reserve ,
bool no_wait_gpu )
2009-12-08 17:33:32 +03:00
{
struct ttm_bo_global * glob = bdev - > glob ;
struct ttm_mem_type_manager * man = & bdev - > man [ mem_type ] ;
struct ttm_buffer_object * bo ;
int ret , put_count = 0 ;
2009-06-10 17:20:19 +04:00
2009-12-02 20:33:46 +03:00
retry :
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2009-12-02 20:33:46 +03:00
if ( list_empty ( & man - > lru ) ) {
spin_unlock ( & glob - > lru_lock ) ;
return - EBUSY ;
}
2009-12-08 17:33:32 +03:00
bo = list_first_entry ( & man - > lru , struct ttm_buffer_object , lru ) ;
kref_get ( & bo - > list_kref ) ;
2009-12-02 20:33:46 +03:00
2010-10-19 11:01:01 +04:00
if ( ! list_empty ( & bo - > ddestroy ) ) {
spin_unlock ( & glob - > lru_lock ) ;
ret = ttm_bo_cleanup_refs ( bo , interruptible ,
no_wait_reserve , no_wait_gpu ) ;
kref_put ( & bo - > list_kref , ttm_bo_release_list ) ;
if ( likely ( ret = = 0 | | ret = = - ERESTARTSYS ) )
return ret ;
goto retry ;
}
2010-04-07 14:21:19 +04:00
ret = ttm_bo_reserve_locked ( bo , false , no_wait_reserve , false , 0 ) ;
2009-12-02 20:33:46 +03:00
if ( unlikely ( ret = = - EBUSY ) ) {
spin_unlock ( & glob - > lru_lock ) ;
2010-04-07 14:21:19 +04:00
if ( likely ( ! no_wait_gpu ) )
2009-12-02 20:33:46 +03:00
ret = ttm_bo_wait_unreserved ( bo , interruptible ) ;
kref_put ( & bo - > list_kref , ttm_bo_release_list ) ;
/**
* We * need * to retry after releasing the lru lock .
*/
if ( unlikely ( ret ! = 0 ) )
return ret ;
goto retry ;
}
put_count = ttm_bo_del_from_lru ( bo ) ;
2009-08-18 18:51:56 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2009-12-02 20:33:46 +03:00
BUG_ON ( ret ! = 0 ) ;
2010-11-22 06:24:40 +03:00
ttm_bo_list_ref_sub ( bo , put_count , true ) ;
2009-12-02 20:33:46 +03:00
2010-04-07 14:21:19 +04:00
ret = ttm_bo_evict ( bo , interruptible , no_wait_reserve , no_wait_gpu ) ;
2009-12-08 17:33:32 +03:00
ttm_bo_unreserve ( bo ) ;
2009-12-02 20:33:46 +03:00
2009-12-08 17:33:32 +03:00
kref_put ( & bo - > list_kref , ttm_bo_release_list ) ;
2009-06-10 17:20:19 +04:00
return ret ;
}
2010-08-04 06:07:08 +04:00
void ttm_bo_mem_put ( struct ttm_buffer_object * bo , struct ttm_mem_reg * mem )
{
2010-08-05 04:48:18 +04:00
struct ttm_mem_type_manager * man = & bo - > bdev - > man [ mem - > mem_type ] ;
2010-08-04 06:07:08 +04:00
2010-08-05 04:48:18 +04:00
if ( mem - > mm_node )
( * man - > func - > put_node ) ( man , mem ) ;
2010-08-04 06:07:08 +04:00
}
EXPORT_SYMBOL ( ttm_bo_mem_put ) ;
2009-06-10 17:20:19 +04:00
/**
* Repeatedly evict memory from the LRU for @ mem_type until we create enough
* space , or we ' ve evicted everything and there isn ' t enough space .
*/
2009-12-08 17:33:32 +03:00
static int ttm_bo_mem_force_space ( struct ttm_buffer_object * bo ,
uint32_t mem_type ,
struct ttm_placement * placement ,
struct ttm_mem_reg * mem ,
2010-04-07 14:21:19 +04:00
bool interruptible ,
bool no_wait_reserve ,
bool no_wait_gpu )
2009-06-10 17:20:19 +04:00
{
2009-12-08 17:33:32 +03:00
struct ttm_bo_device * bdev = bo - > bdev ;
2009-06-10 17:20:19 +04:00
struct ttm_mem_type_manager * man = & bdev - > man [ mem_type ] ;
int ret ;
do {
2010-08-05 04:48:18 +04:00
ret = ( * man - > func - > get_node ) ( man , bo , placement , mem ) ;
2009-12-08 17:33:32 +03:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2010-08-05 04:48:18 +04:00
if ( mem - > mm_node )
2009-06-10 17:20:19 +04:00
break ;
2009-12-08 17:33:32 +03:00
ret = ttm_mem_evict_first ( bdev , mem_type , interruptible ,
2010-04-07 14:21:19 +04:00
no_wait_reserve , no_wait_gpu ) ;
2009-06-10 17:20:19 +04:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
} while ( 1 ) ;
2010-08-05 04:48:18 +04:00
if ( mem - > mm_node = = NULL )
2009-06-10 17:20:19 +04:00
return - ENOMEM ;
mem - > mem_type = mem_type ;
return 0 ;
}
2009-06-24 21:57:34 +04:00
static uint32_t ttm_bo_select_caching ( struct ttm_mem_type_manager * man ,
uint32_t cur_placement ,
uint32_t proposed_placement )
{
uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING ;
uint32_t result = proposed_placement & ~ TTM_PL_MASK_CACHING ;
/**
* Keep current caching if possible .
*/
if ( ( cur_placement & caching ) ! = 0 )
result | = ( cur_placement & caching ) ;
else if ( ( man - > default_caching & caching ) ! = 0 )
result | = man - > default_caching ;
else if ( ( TTM_PL_FLAG_CACHED & caching ) ! = 0 )
result | = TTM_PL_FLAG_CACHED ;
else if ( ( TTM_PL_FLAG_WC & caching ) ! = 0 )
result | = TTM_PL_FLAG_WC ;
else if ( ( TTM_PL_FLAG_UNCACHED & caching ) ! = 0 )
result | = TTM_PL_FLAG_UNCACHED ;
return result ;
}
2009-06-10 17:20:19 +04:00
static bool ttm_bo_mt_compatible ( struct ttm_mem_type_manager * man ,
bool disallow_fixed ,
uint32_t mem_type ,
2009-06-24 21:57:34 +04:00
uint32_t proposed_placement ,
uint32_t * masked_placement )
2009-06-10 17:20:19 +04:00
{
uint32_t cur_flags = ttm_bo_type_flags ( mem_type ) ;
if ( ( man - > flags & TTM_MEMTYPE_FLAG_FIXED ) & & disallow_fixed )
return false ;
2009-06-24 21:57:34 +04:00
if ( ( cur_flags & proposed_placement & TTM_PL_MASK_MEM ) = = 0 )
2009-06-10 17:20:19 +04:00
return false ;
2009-06-24 21:57:34 +04:00
if ( ( proposed_placement & man - > available_caching ) = = 0 )
2009-06-10 17:20:19 +04:00
return false ;
2009-06-24 21:57:34 +04:00
cur_flags | = ( proposed_placement & man - > available_caching ) ;
* masked_placement = cur_flags ;
2009-06-10 17:20:19 +04:00
return true ;
}
/**
* Creates space for memory region @ mem according to its type .
*
* This function first searches for free space in compatible memory types in
* the priority order defined by the driver . If free space isn ' t found , then
* ttm_bo_mem_force_space is attempted in priority order to evict and find
* space .
*/
int ttm_bo_mem_space ( struct ttm_buffer_object * bo ,
2009-12-08 17:33:32 +03:00
struct ttm_placement * placement ,
struct ttm_mem_reg * mem ,
2010-04-07 14:21:19 +04:00
bool interruptible , bool no_wait_reserve ,
bool no_wait_gpu )
2009-06-10 17:20:19 +04:00
{
struct ttm_bo_device * bdev = bo - > bdev ;
struct ttm_mem_type_manager * man ;
uint32_t mem_type = TTM_PL_SYSTEM ;
uint32_t cur_flags = 0 ;
bool type_found = false ;
bool type_ok = false ;
2009-12-07 20:36:18 +03:00
bool has_erestartsys = false ;
2009-12-08 17:33:32 +03:00
int i , ret ;
2009-06-10 17:20:19 +04:00
mem - > mm_node = NULL ;
2009-12-14 07:51:35 +03:00
for ( i = 0 ; i < placement - > num_placement ; + + i ) {
2009-12-08 17:33:32 +03:00
ret = ttm_mem_type_from_flags ( placement - > placement [ i ] ,
& mem_type ) ;
if ( ret )
return ret ;
2009-06-10 17:20:19 +04:00
man = & bdev - > man [ mem_type ] ;
type_ok = ttm_bo_mt_compatible ( man ,
2009-12-08 17:33:32 +03:00
bo - > type = = ttm_bo_type_user ,
mem_type ,
placement - > placement [ i ] ,
& cur_flags ) ;
2009-06-10 17:20:19 +04:00
if ( ! type_ok )
continue ;
2009-06-24 21:57:34 +04:00
cur_flags = ttm_bo_select_caching ( man , bo - > mem . placement ,
cur_flags ) ;
2009-12-08 17:33:32 +03:00
/*
* Use the access and other non - mapping - related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked ( & cur_flags , placement - > placement [ i ] ,
~ TTM_PL_MASK_MEMTYPE ) ;
2009-06-24 21:57:34 +04:00
2009-06-10 17:20:19 +04:00
if ( mem_type = = TTM_PL_SYSTEM )
break ;
if ( man - > has_type & & man - > use_type ) {
type_found = true ;
2010-08-05 04:48:18 +04:00
ret = ( * man - > func - > get_node ) ( man , bo , placement , mem ) ;
2009-12-08 17:33:32 +03:00
if ( unlikely ( ret ) )
return ret ;
2009-06-10 17:20:19 +04:00
}
2010-08-05 04:48:18 +04:00
if ( mem - > mm_node )
2009-06-10 17:20:19 +04:00
break ;
}
2010-08-05 04:48:18 +04:00
if ( ( type_ok & & ( mem_type = = TTM_PL_SYSTEM ) ) | | mem - > mm_node ) {
2009-06-10 17:20:19 +04:00
mem - > mem_type = mem_type ;
mem - > placement = cur_flags ;
return 0 ;
}
if ( ! type_found )
return - EINVAL ;
2009-12-14 07:51:35 +03:00
for ( i = 0 ; i < placement - > num_busy_placement ; + + i ) {
ret = ttm_mem_type_from_flags ( placement - > busy_placement [ i ] ,
2009-12-08 17:33:32 +03:00
& mem_type ) ;
if ( ret )
return ret ;
2009-06-10 17:20:19 +04:00
man = & bdev - > man [ mem_type ] ;
if ( ! man - > has_type )
continue ;
if ( ! ttm_bo_mt_compatible ( man ,
2009-12-08 17:33:32 +03:00
bo - > type = = ttm_bo_type_user ,
mem_type ,
2009-12-14 07:51:35 +03:00
placement - > busy_placement [ i ] ,
2009-12-08 17:33:32 +03:00
& cur_flags ) )
2009-06-10 17:20:19 +04:00
continue ;
2009-06-24 21:57:34 +04:00
cur_flags = ttm_bo_select_caching ( man , bo - > mem . placement ,
cur_flags ) ;
2009-12-08 17:33:32 +03:00
/*
* Use the access and other non - mapping - related flag bits from
* the memory placement flags to the current flags
*/
2009-12-14 07:51:35 +03:00
ttm_flag_masked ( & cur_flags , placement - > busy_placement [ i ] ,
2009-12-08 17:33:32 +03:00
~ TTM_PL_MASK_MEMTYPE ) ;
2009-06-24 21:57:34 +04:00
2010-01-16 18:05:04 +03:00
if ( mem_type = = TTM_PL_SYSTEM ) {
mem - > mem_type = mem_type ;
mem - > placement = cur_flags ;
mem - > mm_node = NULL ;
return 0 ;
}
2009-12-08 17:33:32 +03:00
ret = ttm_bo_mem_force_space ( bo , mem_type , placement , mem ,
2010-04-07 14:21:19 +04:00
interruptible , no_wait_reserve , no_wait_gpu ) ;
2009-06-10 17:20:19 +04:00
if ( ret = = 0 & & mem - > mm_node ) {
mem - > placement = cur_flags ;
return 0 ;
}
2009-12-07 20:36:18 +03:00
if ( ret = = - ERESTARTSYS )
has_erestartsys = true ;
2009-06-10 17:20:19 +04:00
}
2009-12-07 20:36:18 +03:00
ret = ( has_erestartsys ) ? - ERESTARTSYS : - ENOMEM ;
2009-06-10 17:20:19 +04:00
return ret ;
}
EXPORT_SYMBOL ( ttm_bo_mem_space ) ;
int ttm_bo_wait_cpu ( struct ttm_buffer_object * bo , bool no_wait )
{
if ( ( atomic_read ( & bo - > cpu_writers ) > 0 ) & & no_wait )
return - EBUSY ;
2009-12-07 20:36:18 +03:00
return wait_event_interruptible ( bo - > event_queue ,
atomic_read ( & bo - > cpu_writers ) = = 0 ) ;
2009-06-10 17:20:19 +04:00
}
2009-12-11 08:13:00 +03:00
EXPORT_SYMBOL ( ttm_bo_wait_cpu ) ;
2009-06-10 17:20:19 +04:00
int ttm_bo_move_buffer ( struct ttm_buffer_object * bo ,
2009-12-08 17:33:32 +03:00
struct ttm_placement * placement ,
2010-04-07 14:21:19 +04:00
bool interruptible , bool no_wait_reserve ,
bool no_wait_gpu )
2009-06-10 17:20:19 +04:00
{
int ret = 0 ;
struct ttm_mem_reg mem ;
2010-11-17 15:28:29 +03:00
struct ttm_bo_device * bdev = bo - > bdev ;
2009-06-10 17:20:19 +04:00
BUG_ON ( ! atomic_read ( & bo - > reserved ) ) ;
/*
* FIXME : It ' s possible to pipeline buffer moves .
* Have the driver move function wait for idle when necessary ,
* instead of doing it here .
*/
2010-11-17 15:28:29 +03:00
spin_lock ( & bdev - > fence_lock ) ;
2010-04-07 14:21:19 +04:00
ret = ttm_bo_wait ( bo , false , interruptible , no_wait_gpu ) ;
2010-11-17 15:28:29 +03:00
spin_unlock ( & bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
if ( ret )
return ret ;
mem . num_pages = bo - > num_pages ;
mem . size = mem . num_pages < < PAGE_SHIFT ;
mem . page_alignment = bo - > mem . page_alignment ;
2010-04-09 16:39:23 +04:00
mem . bus . io_reserved = false ;
2009-06-10 17:20:19 +04:00
/*
* Determine where to move the buffer .
*/
2010-04-07 14:21:19 +04:00
ret = ttm_bo_mem_space ( bo , placement , & mem , interruptible , no_wait_reserve , no_wait_gpu ) ;
2009-06-10 17:20:19 +04:00
if ( ret )
goto out_unlock ;
2010-04-07 14:21:19 +04:00
ret = ttm_bo_handle_move_mem ( bo , & mem , false , interruptible , no_wait_reserve , no_wait_gpu ) ;
2009-06-10 17:20:19 +04:00
out_unlock :
2010-08-05 04:48:18 +04:00
if ( ret & & mem . mm_node )
ttm_bo_mem_put ( bo , & mem ) ;
2009-06-10 17:20:19 +04:00
return ret ;
}
2009-12-08 17:33:32 +03:00
static int ttm_bo_mem_compat ( struct ttm_placement * placement ,
2009-06-10 17:20:19 +04:00
struct ttm_mem_reg * mem )
{
2009-12-08 17:33:32 +03:00
int i ;
2010-02-12 02:18:00 +03:00
2010-08-05 04:48:18 +04:00
if ( mem - > mm_node & & placement - > lpfn ! = 0 & &
( mem - > start < placement - > fpfn | |
mem - > start + mem - > num_pages > placement - > lpfn ) )
2010-02-12 02:18:00 +03:00
return - 1 ;
2009-12-08 17:33:32 +03:00
for ( i = 0 ; i < placement - > num_placement ; i + + ) {
if ( ( placement - > placement [ i ] & mem - > placement &
TTM_PL_MASK_CACHING ) & &
( placement - > placement [ i ] & mem - > placement &
TTM_PL_MASK_MEM ) )
return i ;
}
return - 1 ;
2009-06-10 17:20:19 +04:00
}
2009-12-10 19:16:27 +03:00
int ttm_bo_validate ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement ,
2010-04-07 14:21:19 +04:00
bool interruptible , bool no_wait_reserve ,
bool no_wait_gpu )
2009-06-10 17:20:19 +04:00
{
int ret ;
BUG_ON ( ! atomic_read ( & bo - > reserved ) ) ;
2009-12-08 17:33:32 +03:00
/* Check that range is valid */
if ( placement - > lpfn | | placement - > fpfn )
if ( placement - > fpfn > placement - > lpfn | |
( placement - > lpfn - placement - > fpfn ) < bo - > num_pages )
return - EINVAL ;
2009-06-10 17:20:19 +04:00
/*
* Check whether we need to move buffer .
*/
2009-12-08 17:33:32 +03:00
ret = ttm_bo_mem_compat ( placement , & bo - > mem ) ;
if ( ret < 0 ) {
2010-04-07 14:21:19 +04:00
ret = ttm_bo_move_buffer ( bo , placement , interruptible , no_wait_reserve , no_wait_gpu ) ;
2009-12-08 17:33:32 +03:00
if ( ret )
2009-06-10 17:20:19 +04:00
return ret ;
2009-12-08 17:33:32 +03:00
} else {
/*
* Use the access and other non - mapping - related flag bits from
* the compatible memory placement flags to the active flags
*/
ttm_flag_masked ( & bo - > mem . placement , placement - > placement [ ret ] ,
~ TTM_PL_MASK_MEMTYPE ) ;
2009-06-10 17:20:19 +04:00
}
/*
* We might need to add a TTM .
*/
if ( bo - > mem . mem_type = = TTM_PL_SYSTEM & & bo - > ttm = = NULL ) {
ret = ttm_bo_add_ttm ( bo , true ) ;
if ( ret )
return ret ;
}
return 0 ;
}
2009-12-10 19:16:27 +03:00
EXPORT_SYMBOL ( ttm_bo_validate ) ;
2009-06-10 17:20:19 +04:00
2009-12-10 19:16:27 +03:00
int ttm_bo_check_placement ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement )
2009-06-10 17:20:19 +04:00
{
2010-11-02 16:21:48 +03:00
BUG_ON ( ( placement - > fpfn | | placement - > lpfn ) & &
( bo - > mem . num_pages > ( placement - > lpfn - placement - > fpfn ) ) ) ;
2009-06-10 17:20:19 +04:00
return 0 ;
}
2009-12-10 19:16:27 +03:00
int ttm_bo_init ( struct ttm_bo_device * bdev ,
struct ttm_buffer_object * bo ,
unsigned long size ,
enum ttm_bo_type type ,
struct ttm_placement * placement ,
uint32_t page_alignment ,
unsigned long buffer_start ,
bool interruptible ,
struct file * persistant_swap_storage ,
size_t acc_size ,
void ( * destroy ) ( struct ttm_buffer_object * ) )
2009-06-10 17:20:19 +04:00
{
2009-12-10 19:16:27 +03:00
int ret = 0 ;
2009-06-10 17:20:19 +04:00
unsigned long num_pages ;
size + = buffer_start & ~ PAGE_MASK ;
num_pages = ( size + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
if ( num_pages = = 0 ) {
printk ( KERN_ERR TTM_PFX " Illegal buffer object size. \n " ) ;
2010-11-09 23:31:44 +03:00
if ( destroy )
( * destroy ) ( bo ) ;
else
kfree ( bo ) ;
2009-06-10 17:20:19 +04:00
return - EINVAL ;
}
bo - > destroy = destroy ;
kref_init ( & bo - > kref ) ;
kref_init ( & bo - > list_kref ) ;
atomic_set ( & bo - > cpu_writers , 0 ) ;
atomic_set ( & bo - > reserved , 1 ) ;
init_waitqueue_head ( & bo - > event_queue ) ;
INIT_LIST_HEAD ( & bo - > lru ) ;
INIT_LIST_HEAD ( & bo - > ddestroy ) ;
INIT_LIST_HEAD ( & bo - > swap ) ;
bo - > bdev = bdev ;
2009-08-18 18:51:56 +04:00
bo - > glob = bdev - > glob ;
2009-06-10 17:20:19 +04:00
bo - > type = type ;
bo - > num_pages = num_pages ;
2009-12-10 18:15:52 +03:00
bo - > mem . size = num_pages < < PAGE_SHIFT ;
2009-06-10 17:20:19 +04:00
bo - > mem . mem_type = TTM_PL_SYSTEM ;
bo - > mem . num_pages = bo - > num_pages ;
bo - > mem . mm_node = NULL ;
bo - > mem . page_alignment = page_alignment ;
2010-04-09 16:39:23 +04:00
bo - > mem . bus . io_reserved = false ;
2009-06-10 17:20:19 +04:00
bo - > buffer_start = buffer_start & PAGE_MASK ;
bo - > priv_flags = 0 ;
bo - > mem . placement = ( TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED ) ;
bo - > seq_valid = false ;
bo - > persistant_swap_storage = persistant_swap_storage ;
bo - > acc_size = acc_size ;
2009-08-18 18:51:56 +04:00
atomic_inc ( & bo - > glob - > bo_count ) ;
2009-06-10 17:20:19 +04:00
2009-12-10 19:16:27 +03:00
ret = ttm_bo_check_placement ( bo , placement ) ;
2009-06-10 17:20:19 +04:00
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
/*
* For ttm_bo_type_device buffers , allocate
* address space from the device .
*/
if ( bo - > type = = ttm_bo_type_device ) {
ret = ttm_bo_setup_vm ( bo ) ;
if ( ret )
goto out_err ;
}
2010-04-07 14:21:19 +04:00
ret = ttm_bo_validate ( bo , placement , interruptible , false , false ) ;
2009-06-10 17:20:19 +04:00
if ( ret )
goto out_err ;
ttm_bo_unreserve ( bo ) ;
return 0 ;
out_err :
ttm_bo_unreserve ( bo ) ;
ttm_bo_unref ( & bo ) ;
return ret ;
}
2009-12-10 19:16:27 +03:00
EXPORT_SYMBOL ( ttm_bo_init ) ;
2009-06-10 17:20:19 +04:00
2009-08-18 18:51:56 +04:00
static inline size_t ttm_bo_size ( struct ttm_bo_global * glob ,
2009-06-10 17:20:19 +04:00
unsigned long num_pages )
{
size_t page_array_size = ( num_pages * sizeof ( void * ) + PAGE_SIZE - 1 ) &
PAGE_MASK ;
2009-08-18 18:51:56 +04:00
return glob - > ttm_bo_size + 2 * page_array_size ;
2009-06-10 17:20:19 +04:00
}
2009-12-10 19:16:27 +03:00
int ttm_bo_create ( struct ttm_bo_device * bdev ,
unsigned long size ,
enum ttm_bo_type type ,
struct ttm_placement * placement ,
uint32_t page_alignment ,
unsigned long buffer_start ,
bool interruptible ,
struct file * persistant_swap_storage ,
struct ttm_buffer_object * * p_bo )
2009-06-10 17:20:19 +04:00
{
struct ttm_buffer_object * bo ;
2009-08-18 18:51:56 +04:00
struct ttm_mem_global * mem_glob = bdev - > glob - > mem_glob ;
2009-12-08 17:33:32 +03:00
int ret ;
2009-06-10 17:20:19 +04:00
size_t acc_size =
2009-08-18 18:51:56 +04:00
ttm_bo_size ( bdev - > glob , ( size + PAGE_SIZE - 1 ) > > PAGE_SHIFT ) ;
2009-08-17 18:28:39 +04:00
ret = ttm_mem_global_alloc ( mem_glob , acc_size , false , false ) ;
2009-06-10 17:20:19 +04:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
bo = kzalloc ( sizeof ( * bo ) , GFP_KERNEL ) ;
if ( unlikely ( bo = = NULL ) ) {
2009-08-17 18:28:39 +04:00
ttm_mem_global_free ( mem_glob , acc_size ) ;
2009-06-10 17:20:19 +04:00
return - ENOMEM ;
}
2009-12-10 19:16:27 +03:00
ret = ttm_bo_init ( bdev , bo , size , type , placement , page_alignment ,
buffer_start , interruptible ,
persistant_swap_storage , acc_size , NULL ) ;
2009-06-10 17:20:19 +04:00
if ( likely ( ret = = 0 ) )
* p_bo = bo ;
return ret ;
}
static int ttm_bo_force_list_clean ( struct ttm_bo_device * bdev ,
2009-12-08 17:33:32 +03:00
unsigned mem_type , bool allow_errors )
2009-06-10 17:20:19 +04:00
{
2009-12-08 17:33:32 +03:00
struct ttm_mem_type_manager * man = & bdev - > man [ mem_type ] ;
2009-08-18 18:51:56 +04:00
struct ttm_bo_global * glob = bdev - > glob ;
2009-06-10 17:20:19 +04:00
int ret ;
/*
* Can ' t use standard list traversal since we ' re unlocking .
*/
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2009-12-08 17:33:32 +03:00
while ( ! list_empty ( & man - > lru ) ) {
2009-08-18 18:51:56 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2010-04-07 14:21:19 +04:00
ret = ttm_mem_evict_first ( bdev , mem_type , false , false , false ) ;
2009-12-08 17:33:32 +03:00
if ( ret ) {
if ( allow_errors ) {
return ret ;
} else {
printk ( KERN_ERR TTM_PFX
" Cleanup eviction failed \n " ) ;
}
}
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
}
2009-08-18 18:51:56 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
return 0 ;
}
int ttm_bo_clean_mm ( struct ttm_bo_device * bdev , unsigned mem_type )
{
2009-08-03 16:22:53 +04:00
struct ttm_mem_type_manager * man ;
2009-06-10 17:20:19 +04:00
int ret = - EINVAL ;
if ( mem_type > = TTM_NUM_MEM_TYPES ) {
printk ( KERN_ERR TTM_PFX " Illegal memory type %d \n " , mem_type ) ;
return ret ;
}
2009-08-03 16:22:53 +04:00
man = & bdev - > man [ mem_type ] ;
2009-06-10 17:20:19 +04:00
if ( ! man - > has_type ) {
printk ( KERN_ERR TTM_PFX " Trying to take down uninitialized "
" memory manager type %u \n " , mem_type ) ;
return ret ;
}
man - > use_type = false ;
man - > has_type = false ;
ret = 0 ;
if ( mem_type > 0 ) {
2009-12-08 17:33:32 +03:00
ttm_bo_force_list_clean ( bdev , mem_type , false ) ;
2009-06-10 17:20:19 +04:00
2010-08-05 04:48:18 +04:00
ret = ( * man - > func - > takedown ) ( man ) ;
2009-06-10 17:20:19 +04:00
}
return ret ;
}
EXPORT_SYMBOL ( ttm_bo_clean_mm ) ;
int ttm_bo_evict_mm ( struct ttm_bo_device * bdev , unsigned mem_type )
{
struct ttm_mem_type_manager * man = & bdev - > man [ mem_type ] ;
if ( mem_type = = 0 | | mem_type > = TTM_NUM_MEM_TYPES ) {
printk ( KERN_ERR TTM_PFX
" Illegal memory manager memory type %u. \n " ,
mem_type ) ;
return - EINVAL ;
}
if ( ! man - > has_type ) {
printk ( KERN_ERR TTM_PFX
" Memory type %u has not been initialized. \n " ,
mem_type ) ;
return 0 ;
}
2009-12-08 17:33:32 +03:00
return ttm_bo_force_list_clean ( bdev , mem_type , true ) ;
2009-06-10 17:20:19 +04:00
}
EXPORT_SYMBOL ( ttm_bo_evict_mm ) ;
int ttm_bo_init_mm ( struct ttm_bo_device * bdev , unsigned type ,
2009-12-08 17:33:32 +03:00
unsigned long p_size )
2009-06-10 17:20:19 +04:00
{
int ret = - EINVAL ;
struct ttm_mem_type_manager * man ;
2010-10-29 12:46:47 +04:00
BUG_ON ( type > = TTM_NUM_MEM_TYPES ) ;
2009-06-10 17:20:19 +04:00
man = & bdev - > man [ type ] ;
2010-10-29 12:46:47 +04:00
BUG_ON ( man - > has_type ) ;
2009-06-10 17:20:19 +04:00
ret = bdev - > driver - > init_mem_type ( bdev , type , man ) ;
if ( ret )
return ret ;
2010-08-05 04:48:18 +04:00
man - > bdev = bdev ;
2009-06-10 17:20:19 +04:00
ret = 0 ;
if ( type ! = TTM_PL_SYSTEM ) {
2010-08-05 04:48:18 +04:00
ret = ( * man - > func - > init ) ( man , p_size ) ;
2009-06-10 17:20:19 +04:00
if ( ret )
return ret ;
}
man - > has_type = true ;
man - > use_type = true ;
man - > size = p_size ;
INIT_LIST_HEAD ( & man - > lru ) ;
return 0 ;
}
EXPORT_SYMBOL ( ttm_bo_init_mm ) ;
2009-08-18 18:51:56 +04:00
static void ttm_bo_global_kobj_release ( struct kobject * kobj )
{
struct ttm_bo_global * glob =
container_of ( kobj , struct ttm_bo_global , kobj ) ;
ttm_mem_unregister_shrink ( glob - > mem_glob , & glob - > shrink ) ;
__free_page ( glob - > dummy_read_page ) ;
kfree ( glob ) ;
}
2010-03-09 03:56:52 +03:00
void ttm_bo_global_release ( struct drm_global_reference * ref )
2009-08-18 18:51:56 +04:00
{
struct ttm_bo_global * glob = ref - > object ;
kobject_del ( & glob - > kobj ) ;
kobject_put ( & glob - > kobj ) ;
}
EXPORT_SYMBOL ( ttm_bo_global_release ) ;
2010-03-09 03:56:52 +03:00
int ttm_bo_global_init ( struct drm_global_reference * ref )
2009-08-18 18:51:56 +04:00
{
struct ttm_bo_global_ref * bo_ref =
container_of ( ref , struct ttm_bo_global_ref , ref ) ;
struct ttm_bo_global * glob = ref - > object ;
int ret ;
mutex_init ( & glob - > device_list_mutex ) ;
spin_lock_init ( & glob - > lru_lock ) ;
glob - > mem_glob = bo_ref - > mem_glob ;
glob - > dummy_read_page = alloc_page ( __GFP_ZERO | GFP_DMA32 ) ;
if ( unlikely ( glob - > dummy_read_page = = NULL ) ) {
ret = - ENOMEM ;
goto out_no_drp ;
}
INIT_LIST_HEAD ( & glob - > swap_lru ) ;
INIT_LIST_HEAD ( & glob - > device_list ) ;
ttm_mem_init_shrink ( & glob - > shrink , ttm_bo_swapout ) ;
ret = ttm_mem_register_shrink ( glob - > mem_glob , & glob - > shrink ) ;
if ( unlikely ( ret ! = 0 ) ) {
printk ( KERN_ERR TTM_PFX
" Could not register buffer object swapout. \n " ) ;
goto out_no_shrink ;
}
glob - > ttm_bo_extra_size =
ttm_round_pot ( sizeof ( struct ttm_tt ) ) +
ttm_round_pot ( sizeof ( struct ttm_backend ) ) ;
glob - > ttm_bo_size = glob - > ttm_bo_extra_size +
ttm_round_pot ( sizeof ( struct ttm_buffer_object ) ) ;
atomic_set ( & glob - > bo_count , 0 ) ;
2010-03-13 13:36:32 +03:00
ret = kobject_init_and_add (
& glob - > kobj , & ttm_bo_glob_kobj_type , ttm_get_kobj ( ) , " buffer_objects " ) ;
2009-08-18 18:51:56 +04:00
if ( unlikely ( ret ! = 0 ) )
kobject_put ( & glob - > kobj ) ;
return ret ;
out_no_shrink :
__free_page ( glob - > dummy_read_page ) ;
out_no_drp :
kfree ( glob ) ;
return ret ;
}
EXPORT_SYMBOL ( ttm_bo_global_init ) ;
2009-06-10 17:20:19 +04:00
int ttm_bo_device_release ( struct ttm_bo_device * bdev )
{
int ret = 0 ;
unsigned i = TTM_NUM_MEM_TYPES ;
struct ttm_mem_type_manager * man ;
2009-08-18 18:51:56 +04:00
struct ttm_bo_global * glob = bdev - > glob ;
2009-06-10 17:20:19 +04:00
while ( i - - ) {
man = & bdev - > man [ i ] ;
if ( man - > has_type ) {
man - > use_type = false ;
if ( ( i ! = TTM_PL_SYSTEM ) & & ttm_bo_clean_mm ( bdev , i ) ) {
ret = - EBUSY ;
printk ( KERN_ERR TTM_PFX
" DRM memory manager type %d "
" is not clean. \n " , i ) ;
}
man - > has_type = false ;
}
}
2009-08-18 18:51:56 +04:00
mutex_lock ( & glob - > device_list_mutex ) ;
list_del ( & bdev - > device_list ) ;
mutex_unlock ( & glob - > device_list_mutex ) ;
2009-06-10 17:20:19 +04:00
if ( ! cancel_delayed_work ( & bdev - > wq ) )
flush_scheduled_work ( ) ;
while ( ttm_bo_delayed_delete ( bdev , true ) )
;
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
if ( list_empty ( & bdev - > ddestroy ) )
TTM_DEBUG ( " Delayed destroy list was clean \n " ) ;
if ( list_empty ( & bdev - > man [ 0 ] . lru ) )
TTM_DEBUG ( " Swap list was clean \n " ) ;
2009-08-18 18:51:56 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
BUG_ON ( ! drm_mm_clean ( & bdev - > addr_space_mm ) ) ;
write_lock ( & bdev - > vm_lock ) ;
drm_mm_takedown ( & bdev - > addr_space_mm ) ;
write_unlock ( & bdev - > vm_lock ) ;
return ret ;
}
EXPORT_SYMBOL ( ttm_bo_device_release ) ;
int ttm_bo_device_init ( struct ttm_bo_device * bdev ,
2009-08-18 18:51:56 +04:00
struct ttm_bo_global * glob ,
struct ttm_bo_driver * driver ,
2009-08-20 07:38:04 +04:00
uint64_t file_page_offset ,
2009-07-10 16:36:26 +04:00
bool need_dma32 )
2009-06-10 17:20:19 +04:00
{
int ret = - EINVAL ;
rwlock_init ( & bdev - > vm_lock ) ;
bdev - > driver = driver ;
memset ( bdev - > man , 0 , sizeof ( bdev - > man ) ) ;
/*
* Initialize the system memory buffer type .
* Other types need to be driver / IOCTL initialized .
*/
2009-12-08 17:33:32 +03:00
ret = ttm_bo_init_mm ( bdev , TTM_PL_SYSTEM , 0 ) ;
2009-06-10 17:20:19 +04:00
if ( unlikely ( ret ! = 0 ) )
2009-08-18 18:51:56 +04:00
goto out_no_sys ;
2009-06-10 17:20:19 +04:00
bdev - > addr_space_rb = RB_ROOT ;
ret = drm_mm_init ( & bdev - > addr_space_mm , file_page_offset , 0x10000000 ) ;
if ( unlikely ( ret ! = 0 ) )
2009-08-18 18:51:56 +04:00
goto out_no_addr_mm ;
2009-06-10 17:20:19 +04:00
INIT_DELAYED_WORK ( & bdev - > wq , ttm_bo_delayed_workqueue ) ;
bdev - > nice_mode = true ;
INIT_LIST_HEAD ( & bdev - > ddestroy ) ;
bdev - > dev_mapping = NULL ;
2009-08-18 18:51:56 +04:00
bdev - > glob = glob ;
2009-07-10 16:36:26 +04:00
bdev - > need_dma32 = need_dma32 ;
2010-11-17 15:28:31 +03:00
bdev - > val_seq = 0 ;
2010-11-17 15:28:29 +03:00
spin_lock_init ( & bdev - > fence_lock ) ;
2009-08-18 18:51:56 +04:00
mutex_lock ( & glob - > device_list_mutex ) ;
list_add_tail ( & bdev - > device_list , & glob - > device_list ) ;
mutex_unlock ( & glob - > device_list_mutex ) ;
2009-06-10 17:20:19 +04:00
return 0 ;
2009-08-18 18:51:56 +04:00
out_no_addr_mm :
2009-06-10 17:20:19 +04:00
ttm_bo_clean_mm ( bdev , 0 ) ;
2009-08-18 18:51:56 +04:00
out_no_sys :
2009-06-10 17:20:19 +04:00
return ret ;
}
EXPORT_SYMBOL ( ttm_bo_device_init ) ;
/*
* buffer object vm functions .
*/
bool ttm_mem_reg_is_pci ( struct ttm_bo_device * bdev , struct ttm_mem_reg * mem )
{
struct ttm_mem_type_manager * man = & bdev - > man [ mem - > mem_type ] ;
if ( ! ( man - > flags & TTM_MEMTYPE_FLAG_FIXED ) ) {
if ( mem - > mem_type = = TTM_PL_SYSTEM )
return false ;
if ( man - > flags & TTM_MEMTYPE_FLAG_CMA )
return false ;
if ( mem - > placement & TTM_PL_FLAG_CACHED )
return false ;
}
return true ;
}
void ttm_bo_unmap_virtual ( struct ttm_buffer_object * bo )
{
struct ttm_bo_device * bdev = bo - > bdev ;
loff_t offset = ( loff_t ) bo - > addr_space_offset ;
loff_t holelen = ( ( loff_t ) bo - > mem . num_pages ) < < PAGE_SHIFT ;
if ( ! bdev - > dev_mapping )
return ;
unmap_mapping_range ( bdev - > dev_mapping , offset , holelen , 1 ) ;
2010-04-09 16:39:23 +04:00
ttm_mem_io_free ( bdev , & bo - > mem ) ;
2009-06-10 17:20:19 +04:00
}
2009-06-24 03:48:08 +04:00
EXPORT_SYMBOL ( ttm_bo_unmap_virtual ) ;
2009-06-10 17:20:19 +04:00
static void ttm_bo_vm_insert_rb ( struct ttm_buffer_object * bo )
{
struct ttm_bo_device * bdev = bo - > bdev ;
struct rb_node * * cur = & bdev - > addr_space_rb . rb_node ;
struct rb_node * parent = NULL ;
struct ttm_buffer_object * cur_bo ;
unsigned long offset = bo - > vm_node - > start ;
unsigned long cur_offset ;
while ( * cur ) {
parent = * cur ;
cur_bo = rb_entry ( parent , struct ttm_buffer_object , vm_rb ) ;
cur_offset = cur_bo - > vm_node - > start ;
if ( offset < cur_offset )
cur = & parent - > rb_left ;
else if ( offset > cur_offset )
cur = & parent - > rb_right ;
else
BUG ( ) ;
}
rb_link_node ( & bo - > vm_rb , parent , cur ) ;
rb_insert_color ( & bo - > vm_rb , & bdev - > addr_space_rb ) ;
}
/**
* ttm_bo_setup_vm :
*
* @ bo : the buffer to allocate address space for
*
* Allocate address space in the drm device so that applications
* can mmap the buffer and access the contents . This only
* applies to ttm_bo_type_device objects as others are not
* placed in the drm device address space .
*/
static int ttm_bo_setup_vm ( struct ttm_buffer_object * bo )
{
struct ttm_bo_device * bdev = bo - > bdev ;
int ret ;
retry_pre_get :
ret = drm_mm_pre_get ( & bdev - > addr_space_mm ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
write_lock ( & bdev - > vm_lock ) ;
bo - > vm_node = drm_mm_search_free ( & bdev - > addr_space_mm ,
bo - > mem . num_pages , 0 , 0 ) ;
if ( unlikely ( bo - > vm_node = = NULL ) ) {
ret = - ENOMEM ;
goto out_unlock ;
}
bo - > vm_node = drm_mm_get_block_atomic ( bo - > vm_node ,
bo - > mem . num_pages , 0 ) ;
if ( unlikely ( bo - > vm_node = = NULL ) ) {
write_unlock ( & bdev - > vm_lock ) ;
goto retry_pre_get ;
}
ttm_bo_vm_insert_rb ( bo ) ;
write_unlock ( & bdev - > vm_lock ) ;
bo - > addr_space_offset = ( ( uint64_t ) bo - > vm_node - > start ) < < PAGE_SHIFT ;
return 0 ;
out_unlock :
write_unlock ( & bdev - > vm_lock ) ;
return ret ;
}
int ttm_bo_wait ( struct ttm_buffer_object * bo ,
bool lazy , bool interruptible , bool no_wait )
{
struct ttm_bo_driver * driver = bo - > bdev - > driver ;
2010-11-17 15:28:29 +03:00
struct ttm_bo_device * bdev = bo - > bdev ;
2009-06-10 17:20:19 +04:00
void * sync_obj ;
void * sync_obj_arg ;
int ret = 0 ;
if ( likely ( bo - > sync_obj = = NULL ) )
return 0 ;
while ( bo - > sync_obj ) {
if ( driver - > sync_obj_signaled ( bo - > sync_obj , bo - > sync_obj_arg ) ) {
void * tmp_obj = bo - > sync_obj ;
bo - > sync_obj = NULL ;
clear_bit ( TTM_BO_PRIV_FLAG_MOVING , & bo - > priv_flags ) ;
2010-11-17 15:28:29 +03:00
spin_unlock ( & bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
driver - > sync_obj_unref ( & tmp_obj ) ;
2010-11-17 15:28:29 +03:00
spin_lock ( & bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
continue ;
}
if ( no_wait )
return - EBUSY ;
sync_obj = driver - > sync_obj_ref ( bo - > sync_obj ) ;
sync_obj_arg = bo - > sync_obj_arg ;
2010-11-17 15:28:29 +03:00
spin_unlock ( & bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
ret = driver - > sync_obj_wait ( sync_obj , sync_obj_arg ,
lazy , interruptible ) ;
if ( unlikely ( ret ! = 0 ) ) {
driver - > sync_obj_unref ( & sync_obj ) ;
2010-11-17 15:28:29 +03:00
spin_lock ( & bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
return ret ;
}
2010-11-17 15:28:29 +03:00
spin_lock ( & bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
if ( likely ( bo - > sync_obj = = sync_obj & &
bo - > sync_obj_arg = = sync_obj_arg ) ) {
void * tmp_obj = bo - > sync_obj ;
bo - > sync_obj = NULL ;
clear_bit ( TTM_BO_PRIV_FLAG_MOVING ,
& bo - > priv_flags ) ;
2010-11-17 15:28:29 +03:00
spin_unlock ( & bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
driver - > sync_obj_unref ( & sync_obj ) ;
driver - > sync_obj_unref ( & tmp_obj ) ;
2010-11-17 15:28:29 +03:00
spin_lock ( & bdev - > fence_lock ) ;
2009-08-03 14:39:06 +04:00
} else {
2010-11-17 15:28:29 +03:00
spin_unlock ( & bdev - > fence_lock ) ;
2009-08-03 14:39:06 +04:00
driver - > sync_obj_unref ( & sync_obj ) ;
2010-11-17 15:28:29 +03:00
spin_lock ( & bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
}
}
return 0 ;
}
EXPORT_SYMBOL ( ttm_bo_wait ) ;
int ttm_bo_synccpu_write_grab ( struct ttm_buffer_object * bo , bool no_wait )
{
2010-11-17 15:28:29 +03:00
struct ttm_bo_device * bdev = bo - > bdev ;
2009-06-10 17:20:19 +04:00
int ret = 0 ;
/*
2010-04-28 13:33:25 +04:00
* Using ttm_bo_reserve makes sure the lru lists are updated .
2009-06-10 17:20:19 +04:00
*/
ret = ttm_bo_reserve ( bo , true , no_wait , false , 0 ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2010-11-17 15:28:29 +03:00
spin_lock ( & bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
ret = ttm_bo_wait ( bo , false , true , no_wait ) ;
2010-11-17 15:28:29 +03:00
spin_unlock ( & bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
if ( likely ( ret = = 0 ) )
atomic_inc ( & bo - > cpu_writers ) ;
ttm_bo_unreserve ( bo ) ;
return ret ;
}
2009-12-11 08:13:00 +03:00
EXPORT_SYMBOL ( ttm_bo_synccpu_write_grab ) ;
2009-06-10 17:20:19 +04:00
void ttm_bo_synccpu_write_release ( struct ttm_buffer_object * bo )
{
if ( atomic_dec_and_test ( & bo - > cpu_writers ) )
wake_up_all ( & bo - > event_queue ) ;
}
2009-12-11 08:13:00 +03:00
EXPORT_SYMBOL ( ttm_bo_synccpu_write_release ) ;
2009-06-10 17:20:19 +04:00
/**
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global : : swap_lru list .
*/
static int ttm_bo_swapout ( struct ttm_mem_shrink * shrink )
{
2009-08-18 18:51:56 +04:00
struct ttm_bo_global * glob =
container_of ( shrink , struct ttm_bo_global , shrink ) ;
2009-06-10 17:20:19 +04:00
struct ttm_buffer_object * bo ;
int ret = - EBUSY ;
int put_count ;
uint32_t swap_placement = ( TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM ) ;
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
while ( ret = = - EBUSY ) {
2009-08-18 18:51:56 +04:00
if ( unlikely ( list_empty ( & glob - > swap_lru ) ) ) {
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
return - EBUSY ;
}
2009-08-18 18:51:56 +04:00
bo = list_first_entry ( & glob - > swap_lru ,
2009-06-10 17:20:19 +04:00
struct ttm_buffer_object , swap ) ;
kref_get ( & bo - > list_kref ) ;
2010-10-19 11:01:01 +04:00
if ( ! list_empty ( & bo - > ddestroy ) ) {
spin_unlock ( & glob - > lru_lock ) ;
( void ) ttm_bo_cleanup_refs ( bo , false , false , false ) ;
kref_put ( & bo - > list_kref , ttm_bo_release_list ) ;
continue ;
}
2009-06-10 17:20:19 +04:00
/**
* Reserve buffer . Since we unlock while sleeping , we need
* to re - check that nobody removed us from the swap - list while
* we slept .
*/
ret = ttm_bo_reserve_locked ( bo , false , true , false , 0 ) ;
if ( unlikely ( ret = = - EBUSY ) ) {
2009-08-18 18:51:56 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
ttm_bo_wait_unreserved ( bo , false ) ;
kref_put ( & bo - > list_kref , ttm_bo_release_list ) ;
2009-08-18 18:51:56 +04:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
}
}
BUG_ON ( ret ! = 0 ) ;
put_count = ttm_bo_del_from_lru ( bo ) ;
2009-08-18 18:51:56 +04:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 17:20:19 +04:00
2010-11-22 06:24:40 +03:00
ttm_bo_list_ref_sub ( bo , put_count , true ) ;
2009-06-10 17:20:19 +04:00
/**
* Wait for GPU , then move to system cached .
*/
2010-11-17 15:28:29 +03:00
spin_lock ( & bo - > bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
ret = ttm_bo_wait ( bo , false , false , false ) ;
2010-11-17 15:28:29 +03:00
spin_unlock ( & bo - > bdev - > fence_lock ) ;
2009-06-10 17:20:19 +04:00
if ( unlikely ( ret ! = 0 ) )
goto out ;
if ( ( bo - > mem . placement & swap_placement ) ! = swap_placement ) {
struct ttm_mem_reg evict_mem ;
evict_mem = bo - > mem ;
evict_mem . mm_node = NULL ;
evict_mem . placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED ;
evict_mem . mem_type = TTM_PL_SYSTEM ;
ret = ttm_bo_handle_move_mem ( bo , & evict_mem , true ,
2010-04-07 14:21:19 +04:00
false , false , false ) ;
2009-06-10 17:20:19 +04:00
if ( unlikely ( ret ! = 0 ) )
goto out ;
}
ttm_bo_unmap_virtual ( bo ) ;
/**
* Swap out . Buffer will be swapped in again as soon as
* anyone tries to access a ttm page .
*/
2010-01-14 00:28:40 +03:00
if ( bo - > bdev - > driver - > swap_notify )
bo - > bdev - > driver - > swap_notify ( bo ) ;
2009-06-10 17:20:19 +04:00
ret = ttm_tt_swapout ( bo - > ttm , bo - > persistant_swap_storage ) ;
out :
/**
*
* Unreserve without putting on LRU to avoid swapping out an
* already swapped buffer .
*/
atomic_set ( & bo - > reserved , 0 ) ;
wake_up_all ( & bo - > event_queue ) ;
kref_put ( & bo - > list_kref , ttm_bo_release_list ) ;
return ret ;
}
void ttm_bo_swapout_all ( struct ttm_bo_device * bdev )
{
2009-08-18 18:51:56 +04:00
while ( ttm_bo_swapout ( & bdev - > glob - > shrink ) = = 0 )
2009-06-10 17:20:19 +04:00
;
}
2010-01-14 00:28:42 +03:00
EXPORT_SYMBOL ( ttm_bo_swapout_all ) ;