2018-05-02 15:46:21 +02:00
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2009-12-06 21:46:26 +01:00
/**************************************************************************
*
* Copyright ( c ) 2006 - 2009 VMware , Inc . , Palo Alto , CA . , USA
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2012-10-02 18:01:07 +01:00
# include <drm/ttm/ttm_execbuf_util.h>
# include <drm/ttm/ttm_bo_driver.h>
# include <drm/ttm/ttm_placement.h>
2009-12-06 21:46:26 +01:00
# include <linux/wait.h>
# include <linux/sched.h>
# include <linux/module.h>
2014-01-09 11:03:08 +01:00
static void ttm_eu_backoff_reservation_reverse ( struct list_head * list ,
struct ttm_validate_buffer * entry )
2010-11-22 13:24:40 +10:00
{
2014-01-09 11:03:08 +01:00
list_for_each_entry_continue_reverse ( entry , list , head ) {
2010-11-22 13:24:40 +10:00
struct ttm_buffer_object * bo = entry - > bo ;
2019-08-05 16:01:12 +02:00
reservation_object_unlock ( bo - > base . resv ) ;
2010-11-22 13:24:40 +10:00
}
}
static void ttm_eu_del_from_lru_locked ( struct list_head * list )
{
struct ttm_validate_buffer * entry ;
list_for_each_entry ( entry , list , head ) {
struct ttm_buffer_object * bo = entry - > bo ;
2016-11-14 17:34:19 +01:00
ttm_bo_del_from_lru ( bo ) ;
2010-11-22 13:24:40 +10:00
}
}
2013-06-27 13:48:17 +02:00
void ttm_eu_backoff_reservation ( struct ww_acquire_ctx * ticket ,
struct list_head * list )
2009-12-06 21:46:26 +01:00
{
struct ttm_validate_buffer * entry ;
2010-11-17 12:28:27 +00:00
struct ttm_bo_global * glob ;
2009-12-06 21:46:26 +01:00
2010-11-17 12:28:27 +00:00
if ( list_empty ( list ) )
return ;
2009-12-06 21:46:26 +01:00
2010-11-17 12:28:27 +00:00
entry = list_first_entry ( list , struct ttm_validate_buffer , head ) ;
2018-02-21 17:26:45 +01:00
glob = entry - > bo - > bdev - > glob ;
2014-01-09 11:03:08 +01:00
2010-11-17 12:28:27 +00:00
spin_lock ( & glob - > lru_lock ) ;
2014-01-09 11:03:08 +01:00
list_for_each_entry ( entry , list , head ) {
struct ttm_buffer_object * bo = entry - > bo ;
2019-05-10 14:15:08 +02:00
if ( list_empty ( & bo - > lru ) )
ttm_bo_add_to_lru ( bo ) ;
2019-08-05 16:01:12 +02:00
reservation_object_unlock ( bo - > base . resv ) ;
2014-01-09 11:03:08 +01:00
}
spin_unlock ( & glob - > lru_lock ) ;
2013-11-15 00:02:54 -08:00
if ( ticket )
ww_acquire_fini ( ticket ) ;
2009-12-06 21:46:26 +01:00
}
EXPORT_SYMBOL ( ttm_eu_backoff_reservation ) ;
/*
* Reserve buffers for validation .
*
* If a buffer in the list is marked for CPU access , we back off and
* wait for that buffer to become free for GPU access .
*
* If a buffer is reserved for another validation , the validator with
* the highest validation sequence backs off and waits for that buffer
* to become unreserved . This prevents deadlocks when validating multiple
* buffers in different orders .
*/
2013-06-27 13:48:17 +02:00
int ttm_eu_reserve_buffers ( struct ww_acquire_ctx * ticket ,
2014-12-03 15:46:48 +01:00
struct list_head * list , bool intr ,
2019-05-10 14:15:08 +02:00
struct list_head * dups , bool del_lru )
2009-12-06 21:46:26 +01:00
{
2010-11-22 13:24:40 +10:00
struct ttm_bo_global * glob ;
2009-12-06 21:46:26 +01:00
struct ttm_validate_buffer * entry ;
int ret ;
2010-11-22 13:24:40 +10:00
if ( list_empty ( list ) )
return 0 ;
entry = list_first_entry ( list , struct ttm_validate_buffer , head ) ;
2018-02-21 17:26:45 +01:00
glob = entry - > bo - > bdev - > glob ;
2010-11-22 13:24:40 +10:00
2013-11-15 00:02:54 -08:00
if ( ticket )
ww_acquire_init ( ticket , & reservation_ww_class ) ;
2014-01-09 11:03:08 +01:00
2009-12-06 21:46:26 +01:00
list_for_each_entry ( entry , list , head ) {
struct ttm_buffer_object * bo = entry - > bo ;
2016-04-06 11:12:03 +02:00
ret = __ttm_bo_reserve ( bo , intr , ( ticket = = NULL ) , ticket ) ;
2014-01-09 11:03:08 +01:00
if ( ! ret & & unlikely ( atomic_read ( & bo - > cpu_writers ) > 0 ) ) {
2019-08-05 16:01:12 +02:00
reservation_object_unlock ( bo - > base . resv ) ;
2013-01-15 14:56:48 +01:00
ret = - EBUSY ;
2014-12-03 15:46:48 +01:00
} else if ( ret = = - EALREADY & & dups ) {
struct ttm_validate_buffer * safe = entry ;
entry = list_prev_entry ( entry , head ) ;
list_del ( & safe - > head ) ;
list_add ( & safe - > head , dups ) ;
continue ;
2009-12-06 21:46:26 +01:00
}
2014-01-09 11:03:08 +01:00
2014-09-04 20:01:52 +02:00
if ( ! ret ) {
2018-09-19 16:25:08 +02:00
if ( ! entry - > num_shared )
2014-09-04 20:01:52 +02:00
continue ;
2019-08-05 16:01:12 +02:00
ret = reservation_object_reserve_shared ( bo - > base . resv ,
2018-09-19 16:25:08 +02:00
entry - > num_shared ) ;
2014-09-04 20:01:52 +02:00
if ( ! ret )
continue ;
}
2014-01-09 11:03:08 +01:00
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer , then start over if
* this succeeds .
*/
ttm_eu_backoff_reservation_reverse ( list , entry ) ;
2018-01-26 10:13:42 -05:00
if ( ret = = - EDEADLK ) {
if ( intr ) {
2019-08-05 16:01:12 +02:00
ret = reservation_object_lock_slow_interruptible ( bo - > base . resv ,
2019-07-31 09:41:50 +02:00
ticket ) ;
2018-01-26 10:13:42 -05:00
} else {
2019-08-05 16:01:12 +02:00
reservation_object_lock_slow ( bo - > base . resv , ticket ) ;
2018-01-26 10:13:42 -05:00
ret = 0 ;
}
2014-01-09 11:03:08 +01:00
}
2018-09-19 16:25:08 +02:00
if ( ! ret & & entry - > num_shared )
2019-08-05 16:01:12 +02:00
ret = reservation_object_reserve_shared ( bo - > base . resv ,
2018-09-19 16:25:08 +02:00
entry - > num_shared ) ;
2014-09-04 20:01:52 +02:00
2014-01-09 11:03:08 +01:00
if ( unlikely ( ret ! = 0 ) ) {
if ( ret = = - EINTR )
ret = - ERESTARTSYS ;
if ( ticket ) {
ww_acquire_done ( ticket ) ;
ww_acquire_fini ( ticket ) ;
}
return ret ;
}
/* move this item to the front of the list,
* forces correct iteration of the loop without keeping track
*/
list_del ( & entry - > head ) ;
list_add ( & entry - > head , list ) ;
2009-12-06 21:46:26 +01:00
}
2010-11-22 13:24:40 +10:00
2019-05-10 14:15:08 +02:00
if ( del_lru ) {
spin_lock ( & glob - > lru_lock ) ;
ttm_eu_del_from_lru_locked ( list ) ;
spin_unlock ( & glob - > lru_lock ) ;
}
2009-12-06 21:46:26 +01:00
return 0 ;
}
EXPORT_SYMBOL ( ttm_eu_reserve_buffers ) ;
2013-06-27 13:48:17 +02:00
void ttm_eu_fence_buffer_objects ( struct ww_acquire_ctx * ticket ,
2016-10-25 13:00:45 +01:00
struct list_head * list ,
struct dma_fence * fence )
2009-12-06 21:46:26 +01:00
{
struct ttm_validate_buffer * entry ;
2010-11-17 12:28:30 +00:00
struct ttm_buffer_object * bo ;
struct ttm_bo_global * glob ;
2009-12-06 21:46:26 +01:00
2010-11-17 12:28:30 +00:00
if ( list_empty ( list ) )
return ;
bo = list_first_entry ( list , struct ttm_validate_buffer , head ) - > bo ;
2018-02-21 17:26:45 +01:00
glob = bo - > bdev - > glob ;
2009-12-06 21:46:26 +01:00
2010-11-17 12:28:30 +00:00
spin_lock ( & glob - > lru_lock ) ;
list_for_each_entry ( entry , list , head ) {
bo = entry - > bo ;
2018-09-19 16:25:08 +02:00
if ( entry - > num_shared )
2019-08-05 16:01:12 +02:00
reservation_object_add_shared_fence ( bo - > base . resv , fence ) ;
2014-09-04 20:01:52 +02:00
else
2019-08-05 16:01:12 +02:00
reservation_object_add_excl_fence ( bo - > base . resv , fence ) ;
2019-05-10 14:15:08 +02:00
if ( list_empty ( & bo - > lru ) )
ttm_bo_add_to_lru ( bo ) ;
else
ttm_bo_move_to_lru_tail ( bo , NULL ) ;
2019-08-05 16:01:12 +02:00
reservation_object_unlock ( bo - > base . resv ) ;
2010-11-17 12:28:30 +00:00
}
2012-11-28 12:25:39 +01:00
spin_unlock ( & glob - > lru_lock ) ;
2013-11-15 00:02:54 -08:00
if ( ticket )
ww_acquire_fini ( ticket ) ;
2009-12-06 21:46:26 +01:00
}
EXPORT_SYMBOL ( ttm_eu_fence_buffer_objects ) ;