2018-05-02 15:46:21 +02:00
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2009-12-06 21:46:26 +01:00
/**************************************************************************
*
* Copyright ( c ) 2006 - 2009 VMware , Inc . , Palo Alto , CA . , USA
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2012-10-02 18:01:07 +01:00
# include <drm/ttm/ttm_execbuf_util.h>
# include <drm/ttm/ttm_bo_driver.h>
# include <drm/ttm/ttm_placement.h>
2009-12-06 21:46:26 +01:00
# include <linux/wait.h>
# include <linux/sched.h>
# include <linux/module.h>
2014-01-09 11:03:08 +01:00
static void ttm_eu_backoff_reservation_reverse ( struct list_head * list ,
struct ttm_validate_buffer * entry )
2010-11-22 13:24:40 +10:00
{
2014-01-09 11:03:08 +01:00
list_for_each_entry_continue_reverse ( entry , list , head ) {
2010-11-22 13:24:40 +10:00
struct ttm_buffer_object * bo = entry - > bo ;
2019-08-11 10:06:32 +02:00
dma_resv_unlock ( bo - > base . resv ) ;
2010-11-22 13:24:40 +10:00
}
}
2013-06-27 13:48:17 +02:00
void ttm_eu_backoff_reservation ( struct ww_acquire_ctx * ticket ,
struct list_head * list )
2009-12-06 21:46:26 +01:00
{
struct ttm_validate_buffer * entry ;
2010-11-17 12:28:27 +00:00
if ( list_empty ( list ) )
return ;
2009-12-06 21:46:26 +01:00
2019-09-25 11:38:50 +02:00
spin_lock ( & ttm_bo_glob . lru_lock ) ;
2014-01-09 11:03:08 +01:00
list_for_each_entry ( entry , list , head ) {
struct ttm_buffer_object * bo = entry - > bo ;
2019-09-19 12:56:15 +02:00
ttm_bo_move_to_lru_tail ( bo , NULL ) ;
2019-08-11 10:06:32 +02:00
dma_resv_unlock ( bo - > base . resv ) ;
2014-01-09 11:03:08 +01:00
}
2019-09-25 11:38:50 +02:00
spin_unlock ( & ttm_bo_glob . lru_lock ) ;
2014-01-09 11:03:08 +01:00
2013-11-15 00:02:54 -08:00
if ( ticket )
ww_acquire_fini ( ticket ) ;
2009-12-06 21:46:26 +01:00
}
EXPORT_SYMBOL ( ttm_eu_backoff_reservation ) ;
/*
* Reserve buffers for validation .
*
* If a buffer in the list is marked for CPU access , we back off and
* wait for that buffer to become free for GPU access .
*
* If a buffer is reserved for another validation , the validator with
* the highest validation sequence backs off and waits for that buffer
* to become unreserved . This prevents deadlocks when validating multiple
* buffers in different orders .
*/
2013-06-27 13:48:17 +02:00
int ttm_eu_reserve_buffers ( struct ww_acquire_ctx * ticket ,
2014-12-03 15:46:48 +01:00
struct list_head * list , bool intr ,
2019-09-19 12:56:15 +02:00
struct list_head * dups )
2009-12-06 21:46:26 +01:00
{
struct ttm_validate_buffer * entry ;
int ret ;
2010-11-22 13:24:40 +10:00
if ( list_empty ( list ) )
return 0 ;
2013-11-15 00:02:54 -08:00
if ( ticket )
ww_acquire_init ( ticket , & reservation_ww_class ) ;
2014-01-09 11:03:08 +01:00
2009-12-06 21:46:26 +01:00
list_for_each_entry ( entry , list , head ) {
struct ttm_buffer_object * bo = entry - > bo ;
2020-08-04 12:55:38 +10:00
ret = ttm_bo_reserve ( bo , intr , ( ticket = = NULL ) , ticket ) ;
2019-10-01 10:02:58 +02:00
if ( ret = = - EALREADY & & dups ) {
2014-12-03 15:46:48 +01:00
struct ttm_validate_buffer * safe = entry ;
entry = list_prev_entry ( entry , head ) ;
list_del ( & safe - > head ) ;
list_add ( & safe - > head , dups ) ;
continue ;
2009-12-06 21:46:26 +01:00
}
2014-01-09 11:03:08 +01:00
2014-09-04 20:01:52 +02:00
if ( ! ret ) {
2018-09-19 16:25:08 +02:00
if ( ! entry - > num_shared )
2014-09-04 20:01:52 +02:00
continue ;
2019-08-11 10:06:32 +02:00
ret = dma_resv_reserve_shared ( bo - > base . resv ,
2018-09-19 16:25:08 +02:00
entry - > num_shared ) ;
2014-09-04 20:01:52 +02:00
if ( ! ret )
continue ;
}
2014-01-09 11:03:08 +01:00
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer , then start over if
* this succeeds .
*/
ttm_eu_backoff_reservation_reverse ( list , entry ) ;
2018-01-26 10:13:42 -05:00
if ( ret = = - EDEADLK ) {
2020-08-04 12:55:38 +10:00
ret = ttm_bo_reserve_slowpath ( bo , intr , ticket ) ;
2014-01-09 11:03:08 +01:00
}
2018-09-19 16:25:08 +02:00
if ( ! ret & & entry - > num_shared )
2019-08-11 10:06:32 +02:00
ret = dma_resv_reserve_shared ( bo - > base . resv ,
2018-09-19 16:25:08 +02:00
entry - > num_shared ) ;
2014-09-04 20:01:52 +02:00
2014-01-09 11:03:08 +01:00
if ( unlikely ( ret ! = 0 ) ) {
if ( ticket ) {
ww_acquire_done ( ticket ) ;
ww_acquire_fini ( ticket ) ;
}
return ret ;
}
/* move this item to the front of the list,
* forces correct iteration of the loop without keeping track
*/
list_del ( & entry - > head ) ;
list_add ( & entry - > head , list ) ;
2009-12-06 21:46:26 +01:00
}
2010-11-22 13:24:40 +10:00
2009-12-06 21:46:26 +01:00
return 0 ;
}
EXPORT_SYMBOL ( ttm_eu_reserve_buffers ) ;
2013-06-27 13:48:17 +02:00
void ttm_eu_fence_buffer_objects ( struct ww_acquire_ctx * ticket ,
2016-10-25 13:00:45 +01:00
struct list_head * list ,
struct dma_fence * fence )
2009-12-06 21:46:26 +01:00
{
struct ttm_validate_buffer * entry ;
2010-11-17 12:28:30 +00:00
if ( list_empty ( list ) )
return ;
2019-09-25 11:38:50 +02:00
spin_lock ( & ttm_bo_glob . lru_lock ) ;
2010-11-17 12:28:30 +00:00
list_for_each_entry ( entry , list , head ) {
2019-09-25 11:38:50 +02:00
struct ttm_buffer_object * bo = entry - > bo ;
2018-09-19 16:25:08 +02:00
if ( entry - > num_shared )
2019-08-11 10:06:32 +02:00
dma_resv_add_shared_fence ( bo - > base . resv , fence ) ;
2014-09-04 20:01:52 +02:00
else
2019-08-11 10:06:32 +02:00
dma_resv_add_excl_fence ( bo - > base . resv , fence ) ;
2019-09-19 12:56:15 +02:00
ttm_bo_move_to_lru_tail ( bo , NULL ) ;
2019-08-11 10:06:32 +02:00
dma_resv_unlock ( bo - > base . resv ) ;
2010-11-17 12:28:30 +00:00
}
2019-09-25 11:38:50 +02:00
spin_unlock ( & ttm_bo_glob . lru_lock ) ;
2013-11-15 00:02:54 -08:00
if ( ticket )
ww_acquire_fini ( ticket ) ;
2009-12-06 21:46:26 +01:00
}
EXPORT_SYMBOL ( ttm_eu_fence_buffer_objects ) ;