2009-06-10 15:20:19 +02:00
/**************************************************************************
*
* Copyright ( c ) 2006 - 2009 VMware , Inc . , Palo Alto , CA . , USA
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors : Thomas Hellstrom < thellstrom - at - vmware - dot - com >
*/
2009-12-08 15:33:32 +01:00
/* Notes:
*
* We store bo pointer in drm_mm_node struct so we know which bo own a
* specific node . There is no protection on the pointer , thus to make
* sure things don ' t go berserk you have to access this pointer while
* holding the global lru lock and make sure anytime you free a node you
* reset the pointer to NULL .
*/
2009-06-10 15:20:19 +02:00
# include "ttm/ttm_module.h"
# include "ttm/ttm_bo_driver.h"
# include "ttm/ttm_placement.h"
# include <linux/jiffies.h>
# include <linux/slab.h>
# include <linux/sched.h>
# include <linux/mm.h>
# include <linux/file.h>
# include <linux/module.h>
# define TTM_ASSERT_LOCKED(param)
# define TTM_DEBUG(fmt, arg...)
# define TTM_BO_HASH_ORDER 13
static int ttm_bo_setup_vm ( struct ttm_buffer_object * bo ) ;
static int ttm_bo_swapout ( struct ttm_mem_shrink * shrink ) ;
2009-08-18 16:51:56 +02:00
static void ttm_bo_global_kobj_release ( struct kobject * kobj ) ;
static struct attribute ttm_bo_count = {
. name = " bo_count " ,
. mode = S_IRUGO
} ;
2009-12-09 21:55:10 +01:00
static inline int ttm_mem_type_from_flags ( uint32_t flags , uint32_t * mem_type )
{
int i ;
for ( i = 0 ; i < = TTM_PL_PRIV5 ; i + + )
if ( flags & ( 1 < < i ) ) {
* mem_type = i ;
return 0 ;
}
return - EINVAL ;
}
static void ttm_mem_type_manager_debug ( struct ttm_bo_global * glob ,
struct ttm_mem_type_manager * man )
{
printk ( KERN_ERR TTM_PFX " has_type: %d \n " , man - > has_type ) ;
printk ( KERN_ERR TTM_PFX " use_type: %d \n " , man - > use_type ) ;
printk ( KERN_ERR TTM_PFX " flags: 0x%08X \n " , man - > flags ) ;
printk ( KERN_ERR TTM_PFX " gpu_offset: 0x%08lX \n " , man - > gpu_offset ) ;
printk ( KERN_ERR TTM_PFX " io_offset: 0x%08lX \n " , man - > io_offset ) ;
printk ( KERN_ERR TTM_PFX " io_size: %ld \n " , man - > io_size ) ;
printk ( KERN_ERR TTM_PFX " size: %ld \n " , ( unsigned long ) man - > size ) ;
printk ( KERN_ERR TTM_PFX " available_caching: 0x%08X \n " ,
man - > available_caching ) ;
printk ( KERN_ERR TTM_PFX " default_caching: 0x%08X \n " ,
man - > default_caching ) ;
spin_lock ( & glob - > lru_lock ) ;
drm_mm_debug_table ( & man - > manager , TTM_PFX ) ;
spin_unlock ( & glob - > lru_lock ) ;
}
static void ttm_bo_mem_space_debug ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement )
{
struct ttm_bo_device * bdev = bo - > bdev ;
struct ttm_bo_global * glob = bo - > glob ;
struct ttm_mem_type_manager * man ;
int i , ret , mem_type ;
printk ( KERN_ERR TTM_PFX " No space for %p (%ld pages, %ldK, %ldM) \n " ,
bo , bo - > mem . num_pages , bo - > mem . size > > 10 ,
bo - > mem . size > > 20 ) ;
for ( i = 0 ; i < placement - > num_placement ; i + + ) {
ret = ttm_mem_type_from_flags ( placement - > placement [ i ] ,
& mem_type ) ;
if ( ret )
return ;
man = & bdev - > man [ mem_type ] ;
printk ( KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d) \n " ,
i , placement - > placement [ i ] , mem_type ) ;
ttm_mem_type_manager_debug ( glob , man ) ;
}
}
2009-08-18 16:51:56 +02:00
static ssize_t ttm_bo_global_show ( struct kobject * kobj ,
struct attribute * attr ,
char * buffer )
{
struct ttm_bo_global * glob =
container_of ( kobj , struct ttm_bo_global , kobj ) ;
return snprintf ( buffer , PAGE_SIZE , " %lu \n " ,
( unsigned long ) atomic_read ( & glob - > bo_count ) ) ;
}
static struct attribute * ttm_bo_global_attrs [ ] = {
& ttm_bo_count ,
NULL
} ;
static struct sysfs_ops ttm_bo_global_ops = {
. show = & ttm_bo_global_show
} ;
static struct kobj_type ttm_bo_glob_kobj_type = {
. release = & ttm_bo_global_kobj_release ,
. sysfs_ops = & ttm_bo_global_ops ,
. default_attrs = ttm_bo_global_attrs
} ;
2009-06-10 15:20:19 +02:00
static inline uint32_t ttm_bo_type_flags ( unsigned type )
{
return 1 < < ( type ) ;
}
static void ttm_bo_release_list ( struct kref * list_kref )
{
struct ttm_buffer_object * bo =
container_of ( list_kref , struct ttm_buffer_object , list_kref ) ;
struct ttm_bo_device * bdev = bo - > bdev ;
BUG_ON ( atomic_read ( & bo - > list_kref . refcount ) ) ;
BUG_ON ( atomic_read ( & bo - > kref . refcount ) ) ;
BUG_ON ( atomic_read ( & bo - > cpu_writers ) ) ;
BUG_ON ( bo - > sync_obj ! = NULL ) ;
BUG_ON ( bo - > mem . mm_node ! = NULL ) ;
BUG_ON ( ! list_empty ( & bo - > lru ) ) ;
BUG_ON ( ! list_empty ( & bo - > ddestroy ) ) ;
if ( bo - > ttm )
ttm_tt_destroy ( bo - > ttm ) ;
2009-08-18 16:51:56 +02:00
atomic_dec ( & bo - > glob - > bo_count ) ;
2009-06-10 15:20:19 +02:00
if ( bo - > destroy )
bo - > destroy ( bo ) ;
else {
2009-08-18 16:51:56 +02:00
ttm_mem_global_free ( bdev - > glob - > mem_glob , bo - > acc_size ) ;
2009-06-10 15:20:19 +02:00
kfree ( bo ) ;
}
}
int ttm_bo_wait_unreserved ( struct ttm_buffer_object * bo , bool interruptible )
{
if ( interruptible ) {
int ret = 0 ;
ret = wait_event_interruptible ( bo - > event_queue ,
atomic_read ( & bo - > reserved ) = = 0 ) ;
if ( unlikely ( ret ! = 0 ) )
2009-12-07 18:36:18 +01:00
return ret ;
2009-06-10 15:20:19 +02:00
} else {
wait_event ( bo - > event_queue , atomic_read ( & bo - > reserved ) = = 0 ) ;
}
return 0 ;
}
static void ttm_bo_add_to_lru ( struct ttm_buffer_object * bo )
{
struct ttm_bo_device * bdev = bo - > bdev ;
struct ttm_mem_type_manager * man ;
BUG_ON ( ! atomic_read ( & bo - > reserved ) ) ;
if ( ! ( bo - > mem . placement & TTM_PL_FLAG_NO_EVICT ) ) {
BUG_ON ( ! list_empty ( & bo - > lru ) ) ;
man = & bdev - > man [ bo - > mem . mem_type ] ;
list_add_tail ( & bo - > lru , & man - > lru ) ;
kref_get ( & bo - > list_kref ) ;
if ( bo - > ttm ! = NULL ) {
2009-08-18 16:51:56 +02:00
list_add_tail ( & bo - > swap , & bo - > glob - > swap_lru ) ;
2009-06-10 15:20:19 +02:00
kref_get ( & bo - > list_kref ) ;
}
}
}
/**
* Call with the lru_lock held .
*/
static int ttm_bo_del_from_lru ( struct ttm_buffer_object * bo )
{
int put_count = 0 ;
if ( ! list_empty ( & bo - > swap ) ) {
list_del_init ( & bo - > swap ) ;
+ + put_count ;
}
if ( ! list_empty ( & bo - > lru ) ) {
list_del_init ( & bo - > lru ) ;
+ + put_count ;
}
/*
* TODO : Add a driver hook to delete from
* driver - specific LRU ' s here .
*/
return put_count ;
}
int ttm_bo_reserve_locked ( struct ttm_buffer_object * bo ,
bool interruptible ,
bool no_wait , bool use_sequence , uint32_t sequence )
{
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob = bo - > glob ;
2009-06-10 15:20:19 +02:00
int ret ;
while ( unlikely ( atomic_cmpxchg ( & bo - > reserved , 0 , 1 ) ! = 0 ) ) {
if ( use_sequence & & bo - > seq_valid & &
( sequence - bo - > val_seq < ( 1 < < 31 ) ) ) {
return - EAGAIN ;
}
if ( no_wait )
return - EBUSY ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
ret = ttm_bo_wait_unreserved ( bo , interruptible ) ;
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
if ( unlikely ( ret ) )
return ret ;
}
if ( use_sequence ) {
bo - > val_seq = sequence ;
bo - > seq_valid = true ;
} else {
bo - > seq_valid = false ;
}
return 0 ;
}
EXPORT_SYMBOL ( ttm_bo_reserve ) ;
static void ttm_bo_ref_bug ( struct kref * list_kref )
{
BUG ( ) ;
}
int ttm_bo_reserve ( struct ttm_buffer_object * bo ,
bool interruptible ,
bool no_wait , bool use_sequence , uint32_t sequence )
{
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob = bo - > glob ;
2009-06-10 15:20:19 +02:00
int put_count = 0 ;
int ret ;
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
ret = ttm_bo_reserve_locked ( bo , interruptible , no_wait , use_sequence ,
sequence ) ;
if ( likely ( ret = = 0 ) )
put_count = ttm_bo_del_from_lru ( bo ) ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
while ( put_count - - )
kref_put ( & bo - > list_kref , ttm_bo_ref_bug ) ;
return ret ;
}
void ttm_bo_unreserve ( struct ttm_buffer_object * bo )
{
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob = bo - > glob ;
2009-06-10 15:20:19 +02:00
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
ttm_bo_add_to_lru ( bo ) ;
atomic_set ( & bo - > reserved , 0 ) ;
wake_up_all ( & bo - > event_queue ) ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
}
EXPORT_SYMBOL ( ttm_bo_unreserve ) ;
/*
* Call bo - > mutex locked .
*/
static int ttm_bo_add_ttm ( struct ttm_buffer_object * bo , bool zero_alloc )
{
struct ttm_bo_device * bdev = bo - > bdev ;
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob = bo - > glob ;
2009-06-10 15:20:19 +02:00
int ret = 0 ;
uint32_t page_flags = 0 ;
TTM_ASSERT_LOCKED ( & bo - > mutex ) ;
bo - > ttm = NULL ;
2009-07-10 22:36:26 +10:00
if ( bdev - > need_dma32 )
page_flags | = TTM_PAGE_FLAG_DMA32 ;
2009-06-10 15:20:19 +02:00
switch ( bo - > type ) {
case ttm_bo_type_device :
if ( zero_alloc )
page_flags | = TTM_PAGE_FLAG_ZERO_ALLOC ;
case ttm_bo_type_kernel :
bo - > ttm = ttm_tt_create ( bdev , bo - > num_pages < < PAGE_SHIFT ,
2009-08-18 16:51:56 +02:00
page_flags , glob - > dummy_read_page ) ;
2009-06-10 15:20:19 +02:00
if ( unlikely ( bo - > ttm = = NULL ) )
ret = - ENOMEM ;
break ;
case ttm_bo_type_user :
bo - > ttm = ttm_tt_create ( bdev , bo - > num_pages < < PAGE_SHIFT ,
page_flags | TTM_PAGE_FLAG_USER ,
2009-08-18 16:51:56 +02:00
glob - > dummy_read_page ) ;
2009-12-08 09:25:45 +10:00
if ( unlikely ( bo - > ttm = = NULL ) ) {
2009-06-10 15:20:19 +02:00
ret = - ENOMEM ;
2009-12-08 09:25:45 +10:00
break ;
}
2009-06-10 15:20:19 +02:00
ret = ttm_tt_set_user ( bo - > ttm , current ,
bo - > buffer_start , bo - > num_pages ) ;
if ( unlikely ( ret ! = 0 ) )
ttm_tt_destroy ( bo - > ttm ) ;
break ;
default :
printk ( KERN_ERR TTM_PFX " Illegal buffer object type \n " ) ;
ret = - EINVAL ;
break ;
}
return ret ;
}
static int ttm_bo_handle_move_mem ( struct ttm_buffer_object * bo ,
struct ttm_mem_reg * mem ,
bool evict , bool interruptible , bool no_wait )
{
struct ttm_bo_device * bdev = bo - > bdev ;
bool old_is_pci = ttm_mem_reg_is_pci ( bdev , & bo - > mem ) ;
bool new_is_pci = ttm_mem_reg_is_pci ( bdev , mem ) ;
struct ttm_mem_type_manager * old_man = & bdev - > man [ bo - > mem . mem_type ] ;
struct ttm_mem_type_manager * new_man = & bdev - > man [ mem - > mem_type ] ;
int ret = 0 ;
if ( old_is_pci | | new_is_pci | |
( ( mem - > placement & bo - > mem . placement & TTM_PL_MASK_CACHING ) = = 0 ) )
ttm_bo_unmap_virtual ( bo ) ;
/*
* Create and bind a ttm if required .
*/
if ( ! ( new_man - > flags & TTM_MEMTYPE_FLAG_FIXED ) & & ( bo - > ttm = = NULL ) ) {
ret = ttm_bo_add_ttm ( bo , false ) ;
if ( ret )
goto out_err ;
ret = ttm_tt_set_placement_caching ( bo - > ttm , mem - > placement ) ;
if ( ret )
2009-06-17 12:29:57 +02:00
goto out_err ;
2009-06-10 15:20:19 +02:00
if ( mem - > mem_type ! = TTM_PL_SYSTEM ) {
ret = ttm_tt_bind ( bo - > ttm , mem ) ;
if ( ret )
goto out_err ;
}
if ( bo - > mem . mem_type = = TTM_PL_SYSTEM ) {
2009-12-08 15:33:32 +01:00
bo - > mem = * mem ;
2009-06-10 15:20:19 +02:00
mem - > mm_node = NULL ;
goto moved ;
}
}
2009-06-24 09:48:08 +10:00
if ( bdev - > driver - > move_notify )
bdev - > driver - > move_notify ( bo , mem ) ;
2009-06-10 15:20:19 +02:00
if ( ! ( old_man - > flags & TTM_MEMTYPE_FLAG_FIXED ) & &
! ( new_man - > flags & TTM_MEMTYPE_FLAG_FIXED ) )
ret = ttm_bo_move_ttm ( bo , evict , no_wait , mem ) ;
else if ( bdev - > driver - > move )
ret = bdev - > driver - > move ( bo , evict , interruptible ,
no_wait , mem ) ;
else
ret = ttm_bo_move_memcpy ( bo , evict , no_wait , mem ) ;
if ( ret )
goto out_err ;
moved :
if ( bo - > evicted ) {
ret = bdev - > driver - > invalidate_caches ( bdev , bo - > mem . placement ) ;
if ( ret )
printk ( KERN_ERR TTM_PFX " Can not flush read caches \n " ) ;
bo - > evicted = false ;
}
if ( bo - > mem . mm_node ) {
spin_lock ( & bo - > lock ) ;
bo - > offset = ( bo - > mem . mm_node - > start < < PAGE_SHIFT ) +
bdev - > man [ bo - > mem . mem_type ] . gpu_offset ;
bo - > cur_placement = bo - > mem . placement ;
spin_unlock ( & bo - > lock ) ;
}
return 0 ;
out_err :
new_man = & bdev - > man [ bo - > mem . mem_type ] ;
if ( ( new_man - > flags & TTM_MEMTYPE_FLAG_FIXED ) & & bo - > ttm ) {
ttm_tt_unbind ( bo - > ttm ) ;
ttm_tt_destroy ( bo - > ttm ) ;
bo - > ttm = NULL ;
}
return ret ;
}
/**
* If bo idle , remove from delayed - and lru lists , and unref .
* If not idle , and already on delayed list , do nothing .
* If not idle , and not on delayed list , put on delayed list ,
* up the list_kref and schedule a delayed list check .
*/
static int ttm_bo_cleanup_refs ( struct ttm_buffer_object * bo , bool remove_all )
{
struct ttm_bo_device * bdev = bo - > bdev ;
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob = bo - > glob ;
2009-06-10 15:20:19 +02:00
struct ttm_bo_driver * driver = bdev - > driver ;
int ret ;
spin_lock ( & bo - > lock ) ;
( void ) ttm_bo_wait ( bo , false , false , ! remove_all ) ;
if ( ! bo - > sync_obj ) {
int put_count ;
spin_unlock ( & bo - > lock ) ;
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
ret = ttm_bo_reserve_locked ( bo , false , false , false , 0 ) ;
BUG_ON ( ret ) ;
if ( bo - > ttm )
ttm_tt_unbind ( bo - > ttm ) ;
if ( ! list_empty ( & bo - > ddestroy ) ) {
list_del_init ( & bo - > ddestroy ) ;
kref_put ( & bo - > list_kref , ttm_bo_ref_bug ) ;
}
if ( bo - > mem . mm_node ) {
2009-12-08 15:33:32 +01:00
bo - > mem . mm_node - > private = NULL ;
2009-06-10 15:20:19 +02:00
drm_mm_put_block ( bo - > mem . mm_node ) ;
bo - > mem . mm_node = NULL ;
}
put_count = ttm_bo_del_from_lru ( bo ) ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
atomic_set ( & bo - > reserved , 0 ) ;
while ( put_count - - )
kref_put ( & bo - > list_kref , ttm_bo_release_list ) ;
return 0 ;
}
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
if ( list_empty ( & bo - > ddestroy ) ) {
void * sync_obj = bo - > sync_obj ;
void * sync_obj_arg = bo - > sync_obj_arg ;
kref_get ( & bo - > list_kref ) ;
list_add_tail ( & bo - > ddestroy , & bdev - > ddestroy ) ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
spin_unlock ( & bo - > lock ) ;
if ( sync_obj )
driver - > sync_obj_flush ( sync_obj , sync_obj_arg ) ;
schedule_delayed_work ( & bdev - > wq ,
( ( HZ / 100 ) < 1 ) ? 1 : HZ / 100 ) ;
ret = 0 ;
} else {
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
spin_unlock ( & bo - > lock ) ;
ret = - EBUSY ;
}
return ret ;
}
/**
* Traverse the delayed list , and call ttm_bo_cleanup_refs on all
* encountered buffers .
*/
static int ttm_bo_delayed_delete ( struct ttm_bo_device * bdev , bool remove_all )
{
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob = bdev - > glob ;
2009-06-10 15:20:19 +02:00
struct ttm_buffer_object * entry , * nentry ;
struct list_head * list , * next ;
int ret ;
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
list_for_each_safe ( list , next , & bdev - > ddestroy ) {
entry = list_entry ( list , struct ttm_buffer_object , ddestroy ) ;
nentry = NULL ;
/*
* Protect the next list entry from destruction while we
* unlock the lru_lock .
*/
if ( next ! = & bdev - > ddestroy ) {
nentry = list_entry ( next , struct ttm_buffer_object ,
ddestroy ) ;
kref_get ( & nentry - > list_kref ) ;
}
kref_get ( & entry - > list_kref ) ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
ret = ttm_bo_cleanup_refs ( entry , remove_all ) ;
kref_put ( & entry - > list_kref , ttm_bo_release_list ) ;
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
if ( nentry ) {
bool next_onlist = ! list_empty ( next ) ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
kref_put ( & nentry - > list_kref , ttm_bo_release_list ) ;
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
/*
* Someone might have raced us and removed the
* next entry from the list . We don ' t bother restarting
* list traversal .
*/
if ( ! next_onlist )
break ;
}
if ( ret )
break ;
}
ret = ! list_empty ( & bdev - > ddestroy ) ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
return ret ;
}
static void ttm_bo_delayed_workqueue ( struct work_struct * work )
{
struct ttm_bo_device * bdev =
container_of ( work , struct ttm_bo_device , wq . work ) ;
if ( ttm_bo_delayed_delete ( bdev , false ) ) {
schedule_delayed_work ( & bdev - > wq ,
( ( HZ / 100 ) < 1 ) ? 1 : HZ / 100 ) ;
}
}
static void ttm_bo_release ( struct kref * kref )
{
struct ttm_buffer_object * bo =
container_of ( kref , struct ttm_buffer_object , kref ) ;
struct ttm_bo_device * bdev = bo - > bdev ;
if ( likely ( bo - > vm_node ! = NULL ) ) {
rb_erase ( & bo - > vm_rb , & bdev - > addr_space_rb ) ;
drm_mm_put_block ( bo - > vm_node ) ;
bo - > vm_node = NULL ;
}
write_unlock ( & bdev - > vm_lock ) ;
ttm_bo_cleanup_refs ( bo , false ) ;
kref_put ( & bo - > list_kref , ttm_bo_release_list ) ;
write_lock ( & bdev - > vm_lock ) ;
}
void ttm_bo_unref ( struct ttm_buffer_object * * p_bo )
{
struct ttm_buffer_object * bo = * p_bo ;
struct ttm_bo_device * bdev = bo - > bdev ;
* p_bo = NULL ;
write_lock ( & bdev - > vm_lock ) ;
kref_put ( & bo - > kref , ttm_bo_release ) ;
write_unlock ( & bdev - > vm_lock ) ;
}
EXPORT_SYMBOL ( ttm_bo_unref ) ;
2009-12-08 15:33:32 +01:00
static int ttm_bo_evict ( struct ttm_buffer_object * bo , bool interruptible ,
bool no_wait )
2009-06-10 15:20:19 +02:00
{
struct ttm_bo_device * bdev = bo - > bdev ;
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob = bo - > glob ;
2009-06-10 15:20:19 +02:00
struct ttm_mem_reg evict_mem ;
2009-12-08 15:33:32 +01:00
struct ttm_placement placement ;
int ret = 0 ;
2009-06-10 15:20:19 +02:00
spin_lock ( & bo - > lock ) ;
ret = ttm_bo_wait ( bo , false , interruptible , no_wait ) ;
spin_unlock ( & bo - > lock ) ;
2009-06-17 12:29:55 +02:00
if ( unlikely ( ret ! = 0 ) ) {
2009-12-07 18:36:18 +01:00
if ( ret ! = - ERESTARTSYS ) {
2009-06-17 12:29:55 +02:00
printk ( KERN_ERR TTM_PFX
" Failed to expire sync object before "
" buffer eviction. \n " ) ;
}
2009-06-10 15:20:19 +02:00
goto out ;
}
BUG_ON ( ! atomic_read ( & bo - > reserved ) ) ;
evict_mem = bo - > mem ;
evict_mem . mm_node = NULL ;
2009-12-09 22:14:27 +01:00
placement . fpfn = 0 ;
placement . lpfn = 0 ;
placement . num_placement = 0 ;
placement . num_busy_placement = 0 ;
2009-12-08 15:33:32 +01:00
bdev - > driver - > evict_flags ( bo , & placement ) ;
ret = ttm_bo_mem_space ( bo , & placement , & evict_mem , interruptible ,
no_wait ) ;
2009-06-10 15:20:19 +02:00
if ( ret ) {
2009-12-09 21:55:10 +01:00
if ( ret ! = - ERESTARTSYS ) {
2009-06-10 15:20:19 +02:00
printk ( KERN_ERR TTM_PFX
" Failed to find memory space for "
" buffer 0x%p eviction. \n " , bo ) ;
2009-12-09 21:55:10 +01:00
ttm_bo_mem_space_debug ( bo , & placement ) ;
}
2009-06-10 15:20:19 +02:00
goto out ;
}
ret = ttm_bo_handle_move_mem ( bo , & evict_mem , true , interruptible ,
no_wait ) ;
if ( ret ) {
2009-12-07 18:36:18 +01:00
if ( ret ! = - ERESTARTSYS )
2009-06-10 15:20:19 +02:00
printk ( KERN_ERR TTM_PFX " Buffer eviction failed \n " ) ;
2009-12-08 15:33:32 +01:00
spin_lock ( & glob - > lru_lock ) ;
if ( evict_mem . mm_node ) {
evict_mem . mm_node - > private = NULL ;
drm_mm_put_block ( evict_mem . mm_node ) ;
evict_mem . mm_node = NULL ;
}
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
goto out ;
}
2009-12-08 15:33:32 +01:00
bo - > evicted = true ;
out :
return ret ;
}
static int ttm_mem_evict_first ( struct ttm_bo_device * bdev ,
uint32_t mem_type ,
bool interruptible , bool no_wait )
{
struct ttm_bo_global * glob = bdev - > glob ;
struct ttm_mem_type_manager * man = & bdev - > man [ mem_type ] ;
struct ttm_buffer_object * bo ;
int ret , put_count = 0 ;
2009-06-10 15:20:19 +02:00
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-12-08 15:33:32 +01:00
bo = list_first_entry ( & man - > lru , struct ttm_buffer_object , lru ) ;
kref_get ( & bo - > list_kref ) ;
ret = ttm_bo_reserve_locked ( bo , interruptible , no_wait , false , 0 ) ;
if ( likely ( ret = = 0 ) )
put_count = ttm_bo_del_from_lru ( bo ) ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-12-08 15:33:32 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
while ( put_count - - )
kref_put ( & bo - > list_kref , ttm_bo_ref_bug ) ;
ret = ttm_bo_evict ( bo , interruptible , no_wait ) ;
ttm_bo_unreserve ( bo ) ;
kref_put ( & bo - > list_kref , ttm_bo_release_list ) ;
2009-06-10 15:20:19 +02:00
return ret ;
}
2009-12-08 15:33:32 +01:00
static int ttm_bo_man_get_node ( struct ttm_buffer_object * bo ,
struct ttm_mem_type_manager * man ,
struct ttm_placement * placement ,
struct ttm_mem_reg * mem ,
struct drm_mm_node * * node )
{
struct ttm_bo_global * glob = bo - > glob ;
unsigned long lpfn ;
int ret ;
lpfn = placement - > lpfn ;
if ( ! lpfn )
lpfn = man - > size ;
* node = NULL ;
do {
ret = drm_mm_pre_get ( & man - > manager ) ;
if ( unlikely ( ret ) )
return ret ;
spin_lock ( & glob - > lru_lock ) ;
* node = drm_mm_search_free_in_range ( & man - > manager ,
mem - > num_pages , mem - > page_alignment ,
placement - > fpfn , lpfn , 1 ) ;
if ( unlikely ( * node = = NULL ) ) {
spin_unlock ( & glob - > lru_lock ) ;
return 0 ;
}
* node = drm_mm_get_block_atomic_range ( * node , mem - > num_pages ,
mem - > page_alignment ,
placement - > fpfn ,
lpfn ) ;
spin_unlock ( & glob - > lru_lock ) ;
} while ( * node = = NULL ) ;
return 0 ;
}
2009-06-10 15:20:19 +02:00
/**
* Repeatedly evict memory from the LRU for @ mem_type until we create enough
* space , or we ' ve evicted everything and there isn ' t enough space .
*/
2009-12-08 15:33:32 +01:00
static int ttm_bo_mem_force_space ( struct ttm_buffer_object * bo ,
uint32_t mem_type ,
struct ttm_placement * placement ,
struct ttm_mem_reg * mem ,
bool interruptible , bool no_wait )
2009-06-10 15:20:19 +02:00
{
2009-12-08 15:33:32 +01:00
struct ttm_bo_device * bdev = bo - > bdev ;
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob = bdev - > glob ;
2009-06-10 15:20:19 +02:00
struct ttm_mem_type_manager * man = & bdev - > man [ mem_type ] ;
2009-12-08 15:33:32 +01:00
struct drm_mm_node * node ;
2009-06-10 15:20:19 +02:00
int ret ;
do {
2009-12-08 15:33:32 +01:00
ret = ttm_bo_man_get_node ( bo , man , placement , mem , & node ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2009-06-10 15:20:19 +02:00
if ( node )
break ;
2009-12-08 15:33:32 +01:00
spin_lock ( & glob - > lru_lock ) ;
if ( list_empty ( & man - > lru ) ) {
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
break ;
2009-12-08 15:33:32 +01:00
}
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-12-08 15:33:32 +01:00
ret = ttm_mem_evict_first ( bdev , mem_type , interruptible ,
no_wait ) ;
2009-06-10 15:20:19 +02:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
} while ( 1 ) ;
2009-12-08 15:33:32 +01:00
if ( node = = NULL )
2009-06-10 15:20:19 +02:00
return - ENOMEM ;
mem - > mm_node = node ;
mem - > mem_type = mem_type ;
return 0 ;
}
2009-06-24 19:57:34 +02:00
static uint32_t ttm_bo_select_caching ( struct ttm_mem_type_manager * man ,
uint32_t cur_placement ,
uint32_t proposed_placement )
{
uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING ;
uint32_t result = proposed_placement & ~ TTM_PL_MASK_CACHING ;
/**
* Keep current caching if possible .
*/
if ( ( cur_placement & caching ) ! = 0 )
result | = ( cur_placement & caching ) ;
else if ( ( man - > default_caching & caching ) ! = 0 )
result | = man - > default_caching ;
else if ( ( TTM_PL_FLAG_CACHED & caching ) ! = 0 )
result | = TTM_PL_FLAG_CACHED ;
else if ( ( TTM_PL_FLAG_WC & caching ) ! = 0 )
result | = TTM_PL_FLAG_WC ;
else if ( ( TTM_PL_FLAG_UNCACHED & caching ) ! = 0 )
result | = TTM_PL_FLAG_UNCACHED ;
return result ;
}
2009-06-10 15:20:19 +02:00
static bool ttm_bo_mt_compatible ( struct ttm_mem_type_manager * man ,
bool disallow_fixed ,
uint32_t mem_type ,
2009-06-24 19:57:34 +02:00
uint32_t proposed_placement ,
uint32_t * masked_placement )
2009-06-10 15:20:19 +02:00
{
uint32_t cur_flags = ttm_bo_type_flags ( mem_type ) ;
if ( ( man - > flags & TTM_MEMTYPE_FLAG_FIXED ) & & disallow_fixed )
return false ;
2009-06-24 19:57:34 +02:00
if ( ( cur_flags & proposed_placement & TTM_PL_MASK_MEM ) = = 0 )
2009-06-10 15:20:19 +02:00
return false ;
2009-06-24 19:57:34 +02:00
if ( ( proposed_placement & man - > available_caching ) = = 0 )
2009-06-10 15:20:19 +02:00
return false ;
2009-06-24 19:57:34 +02:00
cur_flags | = ( proposed_placement & man - > available_caching ) ;
* masked_placement = cur_flags ;
2009-06-10 15:20:19 +02:00
return true ;
}
/**
* Creates space for memory region @ mem according to its type .
*
* This function first searches for free space in compatible memory types in
* the priority order defined by the driver . If free space isn ' t found , then
* ttm_bo_mem_force_space is attempted in priority order to evict and find
* space .
*/
int ttm_bo_mem_space ( struct ttm_buffer_object * bo ,
2009-12-08 15:33:32 +01:00
struct ttm_placement * placement ,
struct ttm_mem_reg * mem ,
bool interruptible , bool no_wait )
2009-06-10 15:20:19 +02:00
{
struct ttm_bo_device * bdev = bo - > bdev ;
struct ttm_mem_type_manager * man ;
uint32_t mem_type = TTM_PL_SYSTEM ;
uint32_t cur_flags = 0 ;
bool type_found = false ;
bool type_ok = false ;
2009-12-07 18:36:18 +01:00
bool has_erestartsys = false ;
2009-06-10 15:20:19 +02:00
struct drm_mm_node * node = NULL ;
2009-12-08 15:33:32 +01:00
int i , ret ;
2009-06-10 15:20:19 +02:00
mem - > mm_node = NULL ;
2009-12-08 15:33:32 +01:00
for ( i = 0 ; i < = placement - > num_placement ; + + i ) {
ret = ttm_mem_type_from_flags ( placement - > placement [ i ] ,
& mem_type ) ;
if ( ret )
return ret ;
2009-06-10 15:20:19 +02:00
man = & bdev - > man [ mem_type ] ;
type_ok = ttm_bo_mt_compatible ( man ,
2009-12-08 15:33:32 +01:00
bo - > type = = ttm_bo_type_user ,
mem_type ,
placement - > placement [ i ] ,
& cur_flags ) ;
2009-06-10 15:20:19 +02:00
if ( ! type_ok )
continue ;
2009-06-24 19:57:34 +02:00
cur_flags = ttm_bo_select_caching ( man , bo - > mem . placement ,
cur_flags ) ;
2009-12-08 15:33:32 +01:00
/*
* Use the access and other non - mapping - related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked ( & cur_flags , placement - > placement [ i ] ,
~ TTM_PL_MASK_MEMTYPE ) ;
2009-06-24 19:57:34 +02:00
2009-06-10 15:20:19 +02:00
if ( mem_type = = TTM_PL_SYSTEM )
break ;
if ( man - > has_type & & man - > use_type ) {
type_found = true ;
2009-12-08 15:33:32 +01:00
ret = ttm_bo_man_get_node ( bo , man , placement , mem ,
& node ) ;
if ( unlikely ( ret ) )
return ret ;
2009-06-10 15:20:19 +02:00
}
if ( node )
break ;
}
if ( ( type_ok & & ( mem_type = = TTM_PL_SYSTEM ) ) | | node ) {
mem - > mm_node = node ;
mem - > mem_type = mem_type ;
mem - > placement = cur_flags ;
2009-12-08 15:33:32 +01:00
if ( node )
node - > private = bo ;
2009-06-10 15:20:19 +02:00
return 0 ;
}
if ( ! type_found )
return - EINVAL ;
2009-12-08 15:33:32 +01:00
for ( i = 0 ; i < = placement - > num_busy_placement ; + + i ) {
ret = ttm_mem_type_from_flags ( placement - > placement [ i ] ,
& mem_type ) ;
if ( ret )
return ret ;
2009-06-10 15:20:19 +02:00
man = & bdev - > man [ mem_type ] ;
if ( ! man - > has_type )
continue ;
if ( ! ttm_bo_mt_compatible ( man ,
2009-12-08 15:33:32 +01:00
bo - > type = = ttm_bo_type_user ,
mem_type ,
placement - > placement [ i ] ,
& cur_flags ) )
2009-06-10 15:20:19 +02:00
continue ;
2009-06-24 19:57:34 +02:00
cur_flags = ttm_bo_select_caching ( man , bo - > mem . placement ,
cur_flags ) ;
2009-12-08 15:33:32 +01:00
/*
* Use the access and other non - mapping - related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked ( & cur_flags , placement - > placement [ i ] ,
~ TTM_PL_MASK_MEMTYPE ) ;
2009-06-24 19:57:34 +02:00
2009-12-08 15:33:32 +01:00
ret = ttm_bo_mem_force_space ( bo , mem_type , placement , mem ,
interruptible , no_wait ) ;
2009-06-10 15:20:19 +02:00
if ( ret = = 0 & & mem - > mm_node ) {
mem - > placement = cur_flags ;
2009-12-08 15:33:32 +01:00
mem - > mm_node - > private = bo ;
2009-06-10 15:20:19 +02:00
return 0 ;
}
2009-12-07 18:36:18 +01:00
if ( ret = = - ERESTARTSYS )
has_erestartsys = true ;
2009-06-10 15:20:19 +02:00
}
2009-12-07 18:36:18 +01:00
ret = ( has_erestartsys ) ? - ERESTARTSYS : - ENOMEM ;
2009-06-10 15:20:19 +02:00
return ret ;
}
EXPORT_SYMBOL ( ttm_bo_mem_space ) ;
int ttm_bo_wait_cpu ( struct ttm_buffer_object * bo , bool no_wait )
{
if ( ( atomic_read ( & bo - > cpu_writers ) > 0 ) & & no_wait )
return - EBUSY ;
2009-12-07 18:36:18 +01:00
return wait_event_interruptible ( bo - > event_queue ,
atomic_read ( & bo - > cpu_writers ) = = 0 ) ;
2009-06-10 15:20:19 +02:00
}
int ttm_bo_move_buffer ( struct ttm_buffer_object * bo ,
2009-12-08 15:33:32 +01:00
struct ttm_placement * placement ,
bool interruptible , bool no_wait )
2009-06-10 15:20:19 +02:00
{
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob = bo - > glob ;
2009-06-10 15:20:19 +02:00
int ret = 0 ;
struct ttm_mem_reg mem ;
BUG_ON ( ! atomic_read ( & bo - > reserved ) ) ;
/*
* FIXME : It ' s possible to pipeline buffer moves .
* Have the driver move function wait for idle when necessary ,
* instead of doing it here .
*/
spin_lock ( & bo - > lock ) ;
ret = ttm_bo_wait ( bo , false , interruptible , no_wait ) ;
spin_unlock ( & bo - > lock ) ;
if ( ret )
return ret ;
mem . num_pages = bo - > num_pages ;
mem . size = mem . num_pages < < PAGE_SHIFT ;
mem . page_alignment = bo - > mem . page_alignment ;
/*
* Determine where to move the buffer .
*/
2009-12-08 15:33:32 +01:00
ret = ttm_bo_mem_space ( bo , placement , & mem , interruptible , no_wait ) ;
2009-06-10 15:20:19 +02:00
if ( ret )
goto out_unlock ;
ret = ttm_bo_handle_move_mem ( bo , & mem , false , interruptible , no_wait ) ;
out_unlock :
if ( ret & & mem . mm_node ) {
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-12-08 15:33:32 +01:00
mem . mm_node - > private = NULL ;
2009-06-10 15:20:19 +02:00
drm_mm_put_block ( mem . mm_node ) ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
}
return ret ;
}
2009-12-08 15:33:32 +01:00
static int ttm_bo_mem_compat ( struct ttm_placement * placement ,
2009-06-10 15:20:19 +02:00
struct ttm_mem_reg * mem )
{
2009-12-08 15:33:32 +01:00
int i ;
for ( i = 0 ; i < placement - > num_placement ; i + + ) {
if ( ( placement - > placement [ i ] & mem - > placement &
TTM_PL_MASK_CACHING ) & &
( placement - > placement [ i ] & mem - > placement &
TTM_PL_MASK_MEM ) )
return i ;
}
return - 1 ;
2009-06-10 15:20:19 +02:00
}
2009-12-10 17:16:27 +01:00
int ttm_bo_validate ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement ,
bool interruptible , bool no_wait )
2009-06-10 15:20:19 +02:00
{
int ret ;
BUG_ON ( ! atomic_read ( & bo - > reserved ) ) ;
2009-12-08 15:33:32 +01:00
/* Check that range is valid */
if ( placement - > lpfn | | placement - > fpfn )
if ( placement - > fpfn > placement - > lpfn | |
( placement - > lpfn - placement - > fpfn ) < bo - > num_pages )
return - EINVAL ;
2009-06-10 15:20:19 +02:00
/*
* Check whether we need to move buffer .
*/
2009-12-08 15:33:32 +01:00
ret = ttm_bo_mem_compat ( placement , & bo - > mem ) ;
if ( ret < 0 ) {
ret = ttm_bo_move_buffer ( bo , placement , interruptible , no_wait ) ;
if ( ret )
2009-06-10 15:20:19 +02:00
return ret ;
2009-12-08 15:33:32 +01:00
} else {
/*
* Use the access and other non - mapping - related flag bits from
* the compatible memory placement flags to the active flags
*/
ttm_flag_masked ( & bo - > mem . placement , placement - > placement [ ret ] ,
~ TTM_PL_MASK_MEMTYPE ) ;
2009-06-10 15:20:19 +02:00
}
/*
* We might need to add a TTM .
*/
if ( bo - > mem . mem_type = = TTM_PL_SYSTEM & & bo - > ttm = = NULL ) {
ret = ttm_bo_add_ttm ( bo , true ) ;
if ( ret )
return ret ;
}
return 0 ;
}
2009-12-10 17:16:27 +01:00
EXPORT_SYMBOL ( ttm_bo_validate ) ;
2009-06-10 15:20:19 +02:00
2009-12-10 17:16:27 +01:00
int ttm_bo_check_placement ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement )
2009-06-10 15:20:19 +02:00
{
2009-12-10 17:16:27 +01:00
int i ;
2009-06-10 15:20:19 +02:00
2009-12-10 17:16:27 +01:00
if ( placement - > fpfn | | placement - > lpfn ) {
if ( bo - > mem . num_pages > ( placement - > lpfn - placement - > fpfn ) ) {
printk ( KERN_ERR TTM_PFX " Page number range to small "
" Need %lu pages, range is [%u, %u] \n " ,
bo - > mem . num_pages , placement - > fpfn ,
placement - > lpfn ) ;
2009-06-10 15:20:19 +02:00
return - EINVAL ;
}
2009-12-10 17:16:27 +01:00
}
for ( i = 0 ; i < placement - > num_placement ; i + + ) {
if ( ! capable ( CAP_SYS_ADMIN ) ) {
if ( placement - > placement [ i ] & TTM_PL_FLAG_NO_EVICT ) {
printk ( KERN_ERR TTM_PFX " Need to be root to "
" modify NO_EVICT status. \n " ) ;
return - EINVAL ;
}
}
}
for ( i = 0 ; i < placement - > num_busy_placement ; i + + ) {
if ( ! capable ( CAP_SYS_ADMIN ) ) {
if ( placement - > busy_placement [ i ] & TTM_PL_FLAG_NO_EVICT ) {
printk ( KERN_ERR TTM_PFX " Need to be root to "
" modify NO_EVICT status. \n " ) ;
return - EINVAL ;
}
2009-06-10 15:20:19 +02:00
}
}
return 0 ;
}
2009-12-10 17:16:27 +01:00
int ttm_bo_init ( struct ttm_bo_device * bdev ,
struct ttm_buffer_object * bo ,
unsigned long size ,
enum ttm_bo_type type ,
struct ttm_placement * placement ,
uint32_t page_alignment ,
unsigned long buffer_start ,
bool interruptible ,
struct file * persistant_swap_storage ,
size_t acc_size ,
void ( * destroy ) ( struct ttm_buffer_object * ) )
2009-06-10 15:20:19 +02:00
{
2009-12-10 17:16:27 +01:00
int ret = 0 ;
2009-06-10 15:20:19 +02:00
unsigned long num_pages ;
size + = buffer_start & ~ PAGE_MASK ;
num_pages = ( size + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
if ( num_pages = = 0 ) {
printk ( KERN_ERR TTM_PFX " Illegal buffer object size. \n " ) ;
return - EINVAL ;
}
bo - > destroy = destroy ;
spin_lock_init ( & bo - > lock ) ;
kref_init ( & bo - > kref ) ;
kref_init ( & bo - > list_kref ) ;
atomic_set ( & bo - > cpu_writers , 0 ) ;
atomic_set ( & bo - > reserved , 1 ) ;
init_waitqueue_head ( & bo - > event_queue ) ;
INIT_LIST_HEAD ( & bo - > lru ) ;
INIT_LIST_HEAD ( & bo - > ddestroy ) ;
INIT_LIST_HEAD ( & bo - > swap ) ;
bo - > bdev = bdev ;
2009-08-18 16:51:56 +02:00
bo - > glob = bdev - > glob ;
2009-06-10 15:20:19 +02:00
bo - > type = type ;
bo - > num_pages = num_pages ;
bo - > mem . mem_type = TTM_PL_SYSTEM ;
bo - > mem . num_pages = bo - > num_pages ;
bo - > mem . mm_node = NULL ;
bo - > mem . page_alignment = page_alignment ;
bo - > buffer_start = buffer_start & PAGE_MASK ;
bo - > priv_flags = 0 ;
bo - > mem . placement = ( TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED ) ;
bo - > seq_valid = false ;
bo - > persistant_swap_storage = persistant_swap_storage ;
bo - > acc_size = acc_size ;
2009-08-18 16:51:56 +02:00
atomic_inc ( & bo - > glob - > bo_count ) ;
2009-06-10 15:20:19 +02:00
2009-12-10 17:16:27 +01:00
ret = ttm_bo_check_placement ( bo , placement ) ;
2009-06-10 15:20:19 +02:00
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
/*
* For ttm_bo_type_device buffers , allocate
* address space from the device .
*/
if ( bo - > type = = ttm_bo_type_device ) {
ret = ttm_bo_setup_vm ( bo ) ;
if ( ret )
goto out_err ;
}
2009-12-10 17:16:27 +01:00
ret = ttm_bo_validate ( bo , placement , interruptible , false ) ;
2009-06-10 15:20:19 +02:00
if ( ret )
goto out_err ;
ttm_bo_unreserve ( bo ) ;
return 0 ;
out_err :
ttm_bo_unreserve ( bo ) ;
ttm_bo_unref ( & bo ) ;
return ret ;
}
2009-12-10 17:16:27 +01:00
EXPORT_SYMBOL ( ttm_bo_init ) ;
2009-06-10 15:20:19 +02:00
2009-08-18 16:51:56 +02:00
static inline size_t ttm_bo_size ( struct ttm_bo_global * glob ,
2009-06-10 15:20:19 +02:00
unsigned long num_pages )
{
size_t page_array_size = ( num_pages * sizeof ( void * ) + PAGE_SIZE - 1 ) &
PAGE_MASK ;
2009-08-18 16:51:56 +02:00
return glob - > ttm_bo_size + 2 * page_array_size ;
2009-06-10 15:20:19 +02:00
}
2009-12-10 17:16:27 +01:00
int ttm_bo_create ( struct ttm_bo_device * bdev ,
unsigned long size ,
enum ttm_bo_type type ,
struct ttm_placement * placement ,
uint32_t page_alignment ,
unsigned long buffer_start ,
bool interruptible ,
struct file * persistant_swap_storage ,
struct ttm_buffer_object * * p_bo )
2009-06-10 15:20:19 +02:00
{
struct ttm_buffer_object * bo ;
2009-08-18 16:51:56 +02:00
struct ttm_mem_global * mem_glob = bdev - > glob - > mem_glob ;
2009-12-08 15:33:32 +01:00
int ret ;
2009-06-10 15:20:19 +02:00
size_t acc_size =
2009-08-18 16:51:56 +02:00
ttm_bo_size ( bdev - > glob , ( size + PAGE_SIZE - 1 ) > > PAGE_SHIFT ) ;
2009-08-17 16:28:39 +02:00
ret = ttm_mem_global_alloc ( mem_glob , acc_size , false , false ) ;
2009-06-10 15:20:19 +02:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
bo = kzalloc ( sizeof ( * bo ) , GFP_KERNEL ) ;
if ( unlikely ( bo = = NULL ) ) {
2009-08-17 16:28:39 +02:00
ttm_mem_global_free ( mem_glob , acc_size ) ;
2009-06-10 15:20:19 +02:00
return - ENOMEM ;
}
2009-12-10 17:16:27 +01:00
ret = ttm_bo_init ( bdev , bo , size , type , placement , page_alignment ,
buffer_start , interruptible ,
persistant_swap_storage , acc_size , NULL ) ;
2009-06-10 15:20:19 +02:00
if ( likely ( ret = = 0 ) )
* p_bo = bo ;
return ret ;
}
static int ttm_bo_force_list_clean ( struct ttm_bo_device * bdev ,
2009-12-08 15:33:32 +01:00
unsigned mem_type , bool allow_errors )
2009-06-10 15:20:19 +02:00
{
2009-12-08 15:33:32 +01:00
struct ttm_mem_type_manager * man = & bdev - > man [ mem_type ] ;
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob = bdev - > glob ;
2009-06-10 15:20:19 +02:00
int ret ;
/*
* Can ' t use standard list traversal since we ' re unlocking .
*/
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-12-08 15:33:32 +01:00
while ( ! list_empty ( & man - > lru ) ) {
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-12-08 15:33:32 +01:00
ret = ttm_mem_evict_first ( bdev , mem_type , false , false ) ;
if ( ret ) {
if ( allow_errors ) {
return ret ;
} else {
printk ( KERN_ERR TTM_PFX
" Cleanup eviction failed \n " ) ;
}
}
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
}
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
return 0 ;
}
int ttm_bo_clean_mm ( struct ttm_bo_device * bdev , unsigned mem_type )
{
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob = bdev - > glob ;
2009-08-03 14:22:53 +02:00
struct ttm_mem_type_manager * man ;
2009-06-10 15:20:19 +02:00
int ret = - EINVAL ;
if ( mem_type > = TTM_NUM_MEM_TYPES ) {
printk ( KERN_ERR TTM_PFX " Illegal memory type %d \n " , mem_type ) ;
return ret ;
}
2009-08-03 14:22:53 +02:00
man = & bdev - > man [ mem_type ] ;
2009-06-10 15:20:19 +02:00
if ( ! man - > has_type ) {
printk ( KERN_ERR TTM_PFX " Trying to take down uninitialized "
" memory manager type %u \n " , mem_type ) ;
return ret ;
}
man - > use_type = false ;
man - > has_type = false ;
ret = 0 ;
if ( mem_type > 0 ) {
2009-12-08 15:33:32 +01:00
ttm_bo_force_list_clean ( bdev , mem_type , false ) ;
2009-06-10 15:20:19 +02:00
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
if ( drm_mm_clean ( & man - > manager ) )
drm_mm_takedown ( & man - > manager ) ;
else
ret = - EBUSY ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
}
return ret ;
}
EXPORT_SYMBOL ( ttm_bo_clean_mm ) ;
int ttm_bo_evict_mm ( struct ttm_bo_device * bdev , unsigned mem_type )
{
struct ttm_mem_type_manager * man = & bdev - > man [ mem_type ] ;
if ( mem_type = = 0 | | mem_type > = TTM_NUM_MEM_TYPES ) {
printk ( KERN_ERR TTM_PFX
" Illegal memory manager memory type %u. \n " ,
mem_type ) ;
return - EINVAL ;
}
if ( ! man - > has_type ) {
printk ( KERN_ERR TTM_PFX
" Memory type %u has not been initialized. \n " ,
mem_type ) ;
return 0 ;
}
2009-12-08 15:33:32 +01:00
return ttm_bo_force_list_clean ( bdev , mem_type , true ) ;
2009-06-10 15:20:19 +02:00
}
EXPORT_SYMBOL ( ttm_bo_evict_mm ) ;
int ttm_bo_init_mm ( struct ttm_bo_device * bdev , unsigned type ,
2009-12-08 15:33:32 +01:00
unsigned long p_size )
2009-06-10 15:20:19 +02:00
{
int ret = - EINVAL ;
struct ttm_mem_type_manager * man ;
if ( type > = TTM_NUM_MEM_TYPES ) {
printk ( KERN_ERR TTM_PFX " Illegal memory type %d \n " , type ) ;
return ret ;
}
man = & bdev - > man [ type ] ;
if ( man - > has_type ) {
printk ( KERN_ERR TTM_PFX
" Memory manager already initialized for type %d \n " ,
type ) ;
return ret ;
}
ret = bdev - > driver - > init_mem_type ( bdev , type , man ) ;
if ( ret )
return ret ;
ret = 0 ;
if ( type ! = TTM_PL_SYSTEM ) {
if ( ! p_size ) {
printk ( KERN_ERR TTM_PFX
" Zero size memory manager type %d \n " ,
type ) ;
return ret ;
}
2009-12-08 15:33:32 +01:00
ret = drm_mm_init ( & man - > manager , 0 , p_size ) ;
2009-06-10 15:20:19 +02:00
if ( ret )
return ret ;
}
man - > has_type = true ;
man - > use_type = true ;
man - > size = p_size ;
INIT_LIST_HEAD ( & man - > lru ) ;
return 0 ;
}
EXPORT_SYMBOL ( ttm_bo_init_mm ) ;
2009-08-18 16:51:56 +02:00
static void ttm_bo_global_kobj_release ( struct kobject * kobj )
{
struct ttm_bo_global * glob =
container_of ( kobj , struct ttm_bo_global , kobj ) ;
ttm_mem_unregister_shrink ( glob - > mem_glob , & glob - > shrink ) ;
__free_page ( glob - > dummy_read_page ) ;
kfree ( glob ) ;
}
void ttm_bo_global_release ( struct ttm_global_reference * ref )
{
struct ttm_bo_global * glob = ref - > object ;
kobject_del ( & glob - > kobj ) ;
kobject_put ( & glob - > kobj ) ;
}
EXPORT_SYMBOL ( ttm_bo_global_release ) ;
int ttm_bo_global_init ( struct ttm_global_reference * ref )
{
struct ttm_bo_global_ref * bo_ref =
container_of ( ref , struct ttm_bo_global_ref , ref ) ;
struct ttm_bo_global * glob = ref - > object ;
int ret ;
mutex_init ( & glob - > device_list_mutex ) ;
spin_lock_init ( & glob - > lru_lock ) ;
glob - > mem_glob = bo_ref - > mem_glob ;
glob - > dummy_read_page = alloc_page ( __GFP_ZERO | GFP_DMA32 ) ;
if ( unlikely ( glob - > dummy_read_page = = NULL ) ) {
ret = - ENOMEM ;
goto out_no_drp ;
}
INIT_LIST_HEAD ( & glob - > swap_lru ) ;
INIT_LIST_HEAD ( & glob - > device_list ) ;
ttm_mem_init_shrink ( & glob - > shrink , ttm_bo_swapout ) ;
ret = ttm_mem_register_shrink ( glob - > mem_glob , & glob - > shrink ) ;
if ( unlikely ( ret ! = 0 ) ) {
printk ( KERN_ERR TTM_PFX
" Could not register buffer object swapout. \n " ) ;
goto out_no_shrink ;
}
glob - > ttm_bo_extra_size =
ttm_round_pot ( sizeof ( struct ttm_tt ) ) +
ttm_round_pot ( sizeof ( struct ttm_backend ) ) ;
glob - > ttm_bo_size = glob - > ttm_bo_extra_size +
ttm_round_pot ( sizeof ( struct ttm_buffer_object ) ) ;
atomic_set ( & glob - > bo_count , 0 ) ;
kobject_init ( & glob - > kobj , & ttm_bo_glob_kobj_type ) ;
ret = kobject_add ( & glob - > kobj , ttm_get_kobj ( ) , " buffer_objects " ) ;
if ( unlikely ( ret ! = 0 ) )
kobject_put ( & glob - > kobj ) ;
return ret ;
out_no_shrink :
__free_page ( glob - > dummy_read_page ) ;
out_no_drp :
kfree ( glob ) ;
return ret ;
}
EXPORT_SYMBOL ( ttm_bo_global_init ) ;
2009-06-10 15:20:19 +02:00
int ttm_bo_device_release ( struct ttm_bo_device * bdev )
{
int ret = 0 ;
unsigned i = TTM_NUM_MEM_TYPES ;
struct ttm_mem_type_manager * man ;
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob = bdev - > glob ;
2009-06-10 15:20:19 +02:00
while ( i - - ) {
man = & bdev - > man [ i ] ;
if ( man - > has_type ) {
man - > use_type = false ;
if ( ( i ! = TTM_PL_SYSTEM ) & & ttm_bo_clean_mm ( bdev , i ) ) {
ret = - EBUSY ;
printk ( KERN_ERR TTM_PFX
" DRM memory manager type %d "
" is not clean. \n " , i ) ;
}
man - > has_type = false ;
}
}
2009-08-18 16:51:56 +02:00
mutex_lock ( & glob - > device_list_mutex ) ;
list_del ( & bdev - > device_list ) ;
mutex_unlock ( & glob - > device_list_mutex ) ;
2009-06-10 15:20:19 +02:00
if ( ! cancel_delayed_work ( & bdev - > wq ) )
flush_scheduled_work ( ) ;
while ( ttm_bo_delayed_delete ( bdev , true ) )
;
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
if ( list_empty ( & bdev - > ddestroy ) )
TTM_DEBUG ( " Delayed destroy list was clean \n " ) ;
if ( list_empty ( & bdev - > man [ 0 ] . lru ) )
TTM_DEBUG ( " Swap list was clean \n " ) ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
BUG_ON ( ! drm_mm_clean ( & bdev - > addr_space_mm ) ) ;
write_lock ( & bdev - > vm_lock ) ;
drm_mm_takedown ( & bdev - > addr_space_mm ) ;
write_unlock ( & bdev - > vm_lock ) ;
return ret ;
}
EXPORT_SYMBOL ( ttm_bo_device_release ) ;
int ttm_bo_device_init ( struct ttm_bo_device * bdev ,
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob ,
struct ttm_bo_driver * driver ,
2009-08-20 13:38:04 +10:00
uint64_t file_page_offset ,
2009-07-10 22:36:26 +10:00
bool need_dma32 )
2009-06-10 15:20:19 +02:00
{
int ret = - EINVAL ;
rwlock_init ( & bdev - > vm_lock ) ;
bdev - > driver = driver ;
memset ( bdev - > man , 0 , sizeof ( bdev - > man ) ) ;
/*
* Initialize the system memory buffer type .
* Other types need to be driver / IOCTL initialized .
*/
2009-12-08 15:33:32 +01:00
ret = ttm_bo_init_mm ( bdev , TTM_PL_SYSTEM , 0 ) ;
2009-06-10 15:20:19 +02:00
if ( unlikely ( ret ! = 0 ) )
2009-08-18 16:51:56 +02:00
goto out_no_sys ;
2009-06-10 15:20:19 +02:00
bdev - > addr_space_rb = RB_ROOT ;
ret = drm_mm_init ( & bdev - > addr_space_mm , file_page_offset , 0x10000000 ) ;
if ( unlikely ( ret ! = 0 ) )
2009-08-18 16:51:56 +02:00
goto out_no_addr_mm ;
2009-06-10 15:20:19 +02:00
INIT_DELAYED_WORK ( & bdev - > wq , ttm_bo_delayed_workqueue ) ;
bdev - > nice_mode = true ;
INIT_LIST_HEAD ( & bdev - > ddestroy ) ;
bdev - > dev_mapping = NULL ;
2009-08-18 16:51:56 +02:00
bdev - > glob = glob ;
2009-07-10 22:36:26 +10:00
bdev - > need_dma32 = need_dma32 ;
2009-06-10 15:20:19 +02:00
2009-08-18 16:51:56 +02:00
mutex_lock ( & glob - > device_list_mutex ) ;
list_add_tail ( & bdev - > device_list , & glob - > device_list ) ;
mutex_unlock ( & glob - > device_list_mutex ) ;
2009-06-10 15:20:19 +02:00
return 0 ;
2009-08-18 16:51:56 +02:00
out_no_addr_mm :
2009-06-10 15:20:19 +02:00
ttm_bo_clean_mm ( bdev , 0 ) ;
2009-08-18 16:51:56 +02:00
out_no_sys :
2009-06-10 15:20:19 +02:00
return ret ;
}
EXPORT_SYMBOL ( ttm_bo_device_init ) ;
/*
* buffer object vm functions .
*/
bool ttm_mem_reg_is_pci ( struct ttm_bo_device * bdev , struct ttm_mem_reg * mem )
{
struct ttm_mem_type_manager * man = & bdev - > man [ mem - > mem_type ] ;
if ( ! ( man - > flags & TTM_MEMTYPE_FLAG_FIXED ) ) {
if ( mem - > mem_type = = TTM_PL_SYSTEM )
return false ;
if ( man - > flags & TTM_MEMTYPE_FLAG_CMA )
return false ;
if ( mem - > placement & TTM_PL_FLAG_CACHED )
return false ;
}
return true ;
}
int ttm_bo_pci_offset ( struct ttm_bo_device * bdev ,
struct ttm_mem_reg * mem ,
unsigned long * bus_base ,
unsigned long * bus_offset , unsigned long * bus_size )
{
struct ttm_mem_type_manager * man = & bdev - > man [ mem - > mem_type ] ;
* bus_size = 0 ;
if ( ! ( man - > flags & TTM_MEMTYPE_FLAG_MAPPABLE ) )
return - EINVAL ;
if ( ttm_mem_reg_is_pci ( bdev , mem ) ) {
* bus_offset = mem - > mm_node - > start < < PAGE_SHIFT ;
* bus_size = mem - > num_pages < < PAGE_SHIFT ;
* bus_base = man - > io_offset ;
}
return 0 ;
}
void ttm_bo_unmap_virtual ( struct ttm_buffer_object * bo )
{
struct ttm_bo_device * bdev = bo - > bdev ;
loff_t offset = ( loff_t ) bo - > addr_space_offset ;
loff_t holelen = ( ( loff_t ) bo - > mem . num_pages ) < < PAGE_SHIFT ;
if ( ! bdev - > dev_mapping )
return ;
unmap_mapping_range ( bdev - > dev_mapping , offset , holelen , 1 ) ;
}
2009-06-24 09:48:08 +10:00
EXPORT_SYMBOL ( ttm_bo_unmap_virtual ) ;
2009-06-10 15:20:19 +02:00
static void ttm_bo_vm_insert_rb ( struct ttm_buffer_object * bo )
{
struct ttm_bo_device * bdev = bo - > bdev ;
struct rb_node * * cur = & bdev - > addr_space_rb . rb_node ;
struct rb_node * parent = NULL ;
struct ttm_buffer_object * cur_bo ;
unsigned long offset = bo - > vm_node - > start ;
unsigned long cur_offset ;
while ( * cur ) {
parent = * cur ;
cur_bo = rb_entry ( parent , struct ttm_buffer_object , vm_rb ) ;
cur_offset = cur_bo - > vm_node - > start ;
if ( offset < cur_offset )
cur = & parent - > rb_left ;
else if ( offset > cur_offset )
cur = & parent - > rb_right ;
else
BUG ( ) ;
}
rb_link_node ( & bo - > vm_rb , parent , cur ) ;
rb_insert_color ( & bo - > vm_rb , & bdev - > addr_space_rb ) ;
}
/**
* ttm_bo_setup_vm :
*
* @ bo : the buffer to allocate address space for
*
* Allocate address space in the drm device so that applications
* can mmap the buffer and access the contents . This only
* applies to ttm_bo_type_device objects as others are not
* placed in the drm device address space .
*/
static int ttm_bo_setup_vm ( struct ttm_buffer_object * bo )
{
struct ttm_bo_device * bdev = bo - > bdev ;
int ret ;
retry_pre_get :
ret = drm_mm_pre_get ( & bdev - > addr_space_mm ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
write_lock ( & bdev - > vm_lock ) ;
bo - > vm_node = drm_mm_search_free ( & bdev - > addr_space_mm ,
bo - > mem . num_pages , 0 , 0 ) ;
if ( unlikely ( bo - > vm_node = = NULL ) ) {
ret = - ENOMEM ;
goto out_unlock ;
}
bo - > vm_node = drm_mm_get_block_atomic ( bo - > vm_node ,
bo - > mem . num_pages , 0 ) ;
if ( unlikely ( bo - > vm_node = = NULL ) ) {
write_unlock ( & bdev - > vm_lock ) ;
goto retry_pre_get ;
}
ttm_bo_vm_insert_rb ( bo ) ;
write_unlock ( & bdev - > vm_lock ) ;
bo - > addr_space_offset = ( ( uint64_t ) bo - > vm_node - > start ) < < PAGE_SHIFT ;
return 0 ;
out_unlock :
write_unlock ( & bdev - > vm_lock ) ;
return ret ;
}
int ttm_bo_wait ( struct ttm_buffer_object * bo ,
bool lazy , bool interruptible , bool no_wait )
{
struct ttm_bo_driver * driver = bo - > bdev - > driver ;
void * sync_obj ;
void * sync_obj_arg ;
int ret = 0 ;
if ( likely ( bo - > sync_obj = = NULL ) )
return 0 ;
while ( bo - > sync_obj ) {
if ( driver - > sync_obj_signaled ( bo - > sync_obj , bo - > sync_obj_arg ) ) {
void * tmp_obj = bo - > sync_obj ;
bo - > sync_obj = NULL ;
clear_bit ( TTM_BO_PRIV_FLAG_MOVING , & bo - > priv_flags ) ;
spin_unlock ( & bo - > lock ) ;
driver - > sync_obj_unref ( & tmp_obj ) ;
spin_lock ( & bo - > lock ) ;
continue ;
}
if ( no_wait )
return - EBUSY ;
sync_obj = driver - > sync_obj_ref ( bo - > sync_obj ) ;
sync_obj_arg = bo - > sync_obj_arg ;
spin_unlock ( & bo - > lock ) ;
ret = driver - > sync_obj_wait ( sync_obj , sync_obj_arg ,
lazy , interruptible ) ;
if ( unlikely ( ret ! = 0 ) ) {
driver - > sync_obj_unref ( & sync_obj ) ;
spin_lock ( & bo - > lock ) ;
return ret ;
}
spin_lock ( & bo - > lock ) ;
if ( likely ( bo - > sync_obj = = sync_obj & &
bo - > sync_obj_arg = = sync_obj_arg ) ) {
void * tmp_obj = bo - > sync_obj ;
bo - > sync_obj = NULL ;
clear_bit ( TTM_BO_PRIV_FLAG_MOVING ,
& bo - > priv_flags ) ;
spin_unlock ( & bo - > lock ) ;
driver - > sync_obj_unref ( & sync_obj ) ;
driver - > sync_obj_unref ( & tmp_obj ) ;
spin_lock ( & bo - > lock ) ;
2009-08-03 12:39:06 +02:00
} else {
spin_unlock ( & bo - > lock ) ;
driver - > sync_obj_unref ( & sync_obj ) ;
spin_lock ( & bo - > lock ) ;
2009-06-10 15:20:19 +02:00
}
}
return 0 ;
}
EXPORT_SYMBOL ( ttm_bo_wait ) ;
void ttm_bo_unblock_reservation ( struct ttm_buffer_object * bo )
{
atomic_set ( & bo - > reserved , 0 ) ;
wake_up_all ( & bo - > event_queue ) ;
}
int ttm_bo_block_reservation ( struct ttm_buffer_object * bo , bool interruptible ,
bool no_wait )
{
int ret ;
while ( unlikely ( atomic_cmpxchg ( & bo - > reserved , 0 , 1 ) ! = 0 ) ) {
if ( no_wait )
return - EBUSY ;
else if ( interruptible ) {
ret = wait_event_interruptible
( bo - > event_queue , atomic_read ( & bo - > reserved ) = = 0 ) ;
if ( unlikely ( ret ! = 0 ) )
2009-12-07 18:36:18 +01:00
return ret ;
2009-06-10 15:20:19 +02:00
} else {
wait_event ( bo - > event_queue ,
atomic_read ( & bo - > reserved ) = = 0 ) ;
}
}
return 0 ;
}
int ttm_bo_synccpu_write_grab ( struct ttm_buffer_object * bo , bool no_wait )
{
int ret = 0 ;
/*
* Using ttm_bo_reserve instead of ttm_bo_block_reservation
* makes sure the lru lists are updated .
*/
ret = ttm_bo_reserve ( bo , true , no_wait , false , 0 ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
spin_lock ( & bo - > lock ) ;
ret = ttm_bo_wait ( bo , false , true , no_wait ) ;
spin_unlock ( & bo - > lock ) ;
if ( likely ( ret = = 0 ) )
atomic_inc ( & bo - > cpu_writers ) ;
ttm_bo_unreserve ( bo ) ;
return ret ;
}
void ttm_bo_synccpu_write_release ( struct ttm_buffer_object * bo )
{
if ( atomic_dec_and_test ( & bo - > cpu_writers ) )
wake_up_all ( & bo - > event_queue ) ;
}
/**
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global : : swap_lru list .
*/
static int ttm_bo_swapout ( struct ttm_mem_shrink * shrink )
{
2009-08-18 16:51:56 +02:00
struct ttm_bo_global * glob =
container_of ( shrink , struct ttm_bo_global , shrink ) ;
2009-06-10 15:20:19 +02:00
struct ttm_buffer_object * bo ;
int ret = - EBUSY ;
int put_count ;
uint32_t swap_placement = ( TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM ) ;
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
while ( ret = = - EBUSY ) {
2009-08-18 16:51:56 +02:00
if ( unlikely ( list_empty ( & glob - > swap_lru ) ) ) {
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
return - EBUSY ;
}
2009-08-18 16:51:56 +02:00
bo = list_first_entry ( & glob - > swap_lru ,
2009-06-10 15:20:19 +02:00
struct ttm_buffer_object , swap ) ;
kref_get ( & bo - > list_kref ) ;
/**
* Reserve buffer . Since we unlock while sleeping , we need
* to re - check that nobody removed us from the swap - list while
* we slept .
*/
ret = ttm_bo_reserve_locked ( bo , false , true , false , 0 ) ;
if ( unlikely ( ret = = - EBUSY ) ) {
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
ttm_bo_wait_unreserved ( bo , false ) ;
kref_put ( & bo - > list_kref , ttm_bo_release_list ) ;
2009-08-18 16:51:56 +02:00
spin_lock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
}
}
BUG_ON ( ret ! = 0 ) ;
put_count = ttm_bo_del_from_lru ( bo ) ;
2009-08-18 16:51:56 +02:00
spin_unlock ( & glob - > lru_lock ) ;
2009-06-10 15:20:19 +02:00
while ( put_count - - )
kref_put ( & bo - > list_kref , ttm_bo_ref_bug ) ;
/**
* Wait for GPU , then move to system cached .
*/
spin_lock ( & bo - > lock ) ;
ret = ttm_bo_wait ( bo , false , false , false ) ;
spin_unlock ( & bo - > lock ) ;
if ( unlikely ( ret ! = 0 ) )
goto out ;
if ( ( bo - > mem . placement & swap_placement ) ! = swap_placement ) {
struct ttm_mem_reg evict_mem ;
evict_mem = bo - > mem ;
evict_mem . mm_node = NULL ;
evict_mem . placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED ;
evict_mem . mem_type = TTM_PL_SYSTEM ;
ret = ttm_bo_handle_move_mem ( bo , & evict_mem , true ,
false , false ) ;
if ( unlikely ( ret ! = 0 ) )
goto out ;
}
ttm_bo_unmap_virtual ( bo ) ;
/**
* Swap out . Buffer will be swapped in again as soon as
* anyone tries to access a ttm page .
*/
ret = ttm_tt_swapout ( bo - > ttm , bo - > persistant_swap_storage ) ;
out :
/**
*
* Unreserve without putting on LRU to avoid swapping out an
* already swapped buffer .
*/
atomic_set ( & bo - > reserved , 0 ) ;
wake_up_all ( & bo - > event_queue ) ;
kref_put ( & bo - > list_kref , ttm_bo_release_list ) ;
return ret ;
}
void ttm_bo_swapout_all ( struct ttm_bo_device * bdev )
{
2009-08-18 16:51:56 +02:00
while ( ttm_bo_swapout ( & bdev - > glob - > shrink ) = = 0 )
2009-06-10 15:20:19 +02:00
;
}