2018-05-02 17:38:39 +01:00
/*
* SPDX - License - Identifier : MIT
*
* Copyright © 2016 - 2018 Intel Corporation
*/
# include "i915_drv.h"
2019-03-01 17:08:59 +00:00
# include "i915_active.h"
2018-05-02 17:38:39 +01:00
# include "i915_syncmap.h"
2019-03-01 17:08:59 +00:00
# include "i915_timeline.h"
# define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
# define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
2018-05-02 17:38:39 +01:00
2019-01-28 18:18:10 +00:00
struct i915_timeline_hwsp {
2019-03-01 17:08:59 +00:00
struct i915_gt_timelines * gt ;
2019-01-28 18:18:10 +00:00
struct list_head free_link ;
2019-03-01 17:08:59 +00:00
struct i915_vma * vma ;
2019-01-28 18:18:10 +00:00
u64 free_bitmap ;
} ;
2019-03-01 17:08:59 +00:00
struct i915_timeline_cacheline {
struct i915_active active ;
struct i915_timeline_hwsp * hwsp ;
void * vaddr ;
# define CACHELINE_BITS 6
# define CACHELINE_FREE CACHELINE_BITS
} ;
static inline struct drm_i915_private *
hwsp_to_i915 ( struct i915_timeline_hwsp * hwsp )
2019-01-28 18:18:10 +00:00
{
2019-03-01 17:08:59 +00:00
return container_of ( hwsp - > gt , struct drm_i915_private , gt . timelines ) ;
2019-01-28 18:18:10 +00:00
}
2019-01-28 18:18:09 +00:00
static struct i915_vma * __hwsp_alloc ( struct drm_i915_private * i915 )
{
struct drm_i915_gem_object * obj ;
struct i915_vma * vma ;
obj = i915_gem_object_create_internal ( i915 , PAGE_SIZE ) ;
if ( IS_ERR ( obj ) )
return ERR_CAST ( obj ) ;
i915_gem_object_set_cache_coherency ( obj , I915_CACHE_LLC ) ;
vma = i915_vma_instance ( obj , & i915 - > ggtt . vm , NULL ) ;
if ( IS_ERR ( vma ) )
i915_gem_object_put ( obj ) ;
return vma ;
}
2019-01-28 18:18:10 +00:00
static struct i915_vma *
hwsp_alloc ( struct i915_timeline * timeline , unsigned int * cacheline )
2019-01-28 18:18:09 +00:00
{
2019-01-28 18:18:10 +00:00
struct drm_i915_private * i915 = timeline - > i915 ;
struct i915_gt_timelines * gt = & i915 - > gt . timelines ;
struct i915_timeline_hwsp * hwsp ;
2019-01-28 18:18:09 +00:00
2019-01-28 18:18:10 +00:00
BUILD_BUG_ON ( BITS_PER_TYPE ( u64 ) * CACHELINE_BYTES > PAGE_SIZE ) ;
2019-01-28 18:18:09 +00:00
2019-06-06 12:23:20 +01:00
spin_lock_irq ( & gt - > hwsp_lock ) ;
2019-01-28 18:18:09 +00:00
2019-01-28 18:18:10 +00:00
/* hwsp_free_list only contains HWSP that have available cachelines */
hwsp = list_first_entry_or_null ( & gt - > hwsp_free_list ,
typeof ( * hwsp ) , free_link ) ;
if ( ! hwsp ) {
struct i915_vma * vma ;
2019-06-06 12:23:20 +01:00
spin_unlock_irq ( & gt - > hwsp_lock ) ;
2019-01-28 18:18:10 +00:00
hwsp = kmalloc ( sizeof ( * hwsp ) , GFP_KERNEL ) ;
if ( ! hwsp )
return ERR_PTR ( - ENOMEM ) ;
vma = __hwsp_alloc ( i915 ) ;
if ( IS_ERR ( vma ) ) {
kfree ( hwsp ) ;
return vma ;
}
vma - > private = hwsp ;
hwsp - > vma = vma ;
hwsp - > free_bitmap = ~ 0ull ;
2019-03-01 17:08:59 +00:00
hwsp - > gt = gt ;
2019-01-28 18:18:10 +00:00
2019-06-06 12:23:20 +01:00
spin_lock_irq ( & gt - > hwsp_lock ) ;
2019-01-28 18:18:10 +00:00
list_add ( & hwsp - > free_link , & gt - > hwsp_free_list ) ;
}
GEM_BUG_ON ( ! hwsp - > free_bitmap ) ;
* cacheline = __ffs64 ( hwsp - > free_bitmap ) ;
hwsp - > free_bitmap & = ~ BIT_ULL ( * cacheline ) ;
if ( ! hwsp - > free_bitmap )
list_del ( & hwsp - > free_link ) ;
2019-06-06 12:23:20 +01:00
spin_unlock_irq ( & gt - > hwsp_lock ) ;
2019-01-28 18:18:10 +00:00
GEM_BUG_ON ( hwsp - > vma - > private ! = hwsp ) ;
return hwsp - > vma ;
}
2019-03-01 17:08:59 +00:00
static void __idle_hwsp_free ( struct i915_timeline_hwsp * hwsp , int cacheline )
2019-01-28 18:18:10 +00:00
{
2019-03-01 17:08:59 +00:00
struct i915_gt_timelines * gt = hwsp - > gt ;
2019-06-06 12:23:20 +01:00
unsigned long flags ;
2019-01-28 18:18:10 +00:00
2019-06-06 12:23:20 +01:00
spin_lock_irqsave ( & gt - > hwsp_lock , flags ) ;
2019-01-28 18:18:10 +00:00
/* As a cacheline becomes available, publish the HWSP on the freelist */
if ( ! hwsp - > free_bitmap )
list_add_tail ( & hwsp - > free_link , & gt - > hwsp_free_list ) ;
2019-03-01 17:08:59 +00:00
GEM_BUG_ON ( cacheline > = BITS_PER_TYPE ( hwsp - > free_bitmap ) ) ;
hwsp - > free_bitmap | = BIT_ULL ( cacheline ) ;
2019-01-28 18:18:10 +00:00
/* And if no one is left using it, give the page back to the system */
if ( hwsp - > free_bitmap = = ~ 0ull ) {
i915_vma_put ( hwsp - > vma ) ;
list_del ( & hwsp - > free_link ) ;
kfree ( hwsp ) ;
}
2019-06-06 12:23:20 +01:00
spin_unlock_irqrestore ( & gt - > hwsp_lock , flags ) ;
2019-01-28 18:18:09 +00:00
}
2019-03-01 17:08:59 +00:00
static void __idle_cacheline_free ( struct i915_timeline_cacheline * cl )
{
GEM_BUG_ON ( ! i915_active_is_idle ( & cl - > active ) ) ;
i915_gem_object_unpin_map ( cl - > hwsp - > vma - > obj ) ;
i915_vma_put ( cl - > hwsp - > vma ) ;
__idle_hwsp_free ( cl - > hwsp , ptr_unmask_bits ( cl - > vaddr , CACHELINE_BITS ) ) ;
i915_active_fini ( & cl - > active ) ;
kfree ( cl ) ;
}
static void __cacheline_retire ( struct i915_active * active )
{
struct i915_timeline_cacheline * cl =
container_of ( active , typeof ( * cl ) , active ) ;
i915_vma_unpin ( cl - > hwsp - > vma ) ;
if ( ptr_test_bit ( cl - > vaddr , CACHELINE_FREE ) )
__idle_cacheline_free ( cl ) ;
}
static struct i915_timeline_cacheline *
cacheline_alloc ( struct i915_timeline_hwsp * hwsp , unsigned int cacheline )
{
struct i915_timeline_cacheline * cl ;
void * vaddr ;
GEM_BUG_ON ( cacheline > = BIT ( CACHELINE_BITS ) ) ;
cl = kmalloc ( sizeof ( * cl ) , GFP_KERNEL ) ;
if ( ! cl )
return ERR_PTR ( - ENOMEM ) ;
vaddr = i915_gem_object_pin_map ( hwsp - > vma - > obj , I915_MAP_WB ) ;
if ( IS_ERR ( vaddr ) ) {
kfree ( cl ) ;
return ERR_CAST ( vaddr ) ;
}
i915_vma_get ( hwsp - > vma ) ;
cl - > hwsp = hwsp ;
cl - > vaddr = page_pack_bits ( vaddr , cacheline ) ;
i915_active_init ( hwsp_to_i915 ( hwsp ) , & cl - > active , __cacheline_retire ) ;
return cl ;
}
static void cacheline_acquire ( struct i915_timeline_cacheline * cl )
{
if ( cl & & i915_active_acquire ( & cl - > active ) )
__i915_vma_pin ( cl - > hwsp - > vma ) ;
}
static void cacheline_release ( struct i915_timeline_cacheline * cl )
{
if ( cl )
i915_active_release ( & cl - > active ) ;
}
static void cacheline_free ( struct i915_timeline_cacheline * cl )
{
GEM_BUG_ON ( ptr_test_bit ( cl - > vaddr , CACHELINE_FREE ) ) ;
cl - > vaddr = ptr_set_bit ( cl - > vaddr , CACHELINE_FREE ) ;
if ( i915_active_is_idle ( & cl - > active ) )
__idle_cacheline_free ( cl ) ;
}
2019-01-28 18:18:09 +00:00
int i915_timeline_init ( struct drm_i915_private * i915 ,
struct i915_timeline * timeline ,
2019-01-28 18:18:10 +00:00
struct i915_vma * hwsp )
2018-05-02 17:38:39 +01:00
{
2019-01-28 18:18:09 +00:00
void * vaddr ;
2018-05-02 17:38:39 +01:00
/*
* Ideally we want a set of engines on a single leaf as we expect
* to mostly be tracking synchronisation between engines . It is not
* a huge issue if this is not the case , but we may want to mitigate
* any page crossing penalties if they become an issue .
2019-01-28 18:18:09 +00:00
*
* Called during early_init before we know how many engines there are .
2018-05-02 17:38:39 +01:00
*/
BUILD_BUG_ON ( KSYNCMAP < I915_NUM_ENGINES ) ;
2019-01-28 10:23:56 +00:00
timeline - > i915 = i915 ;
2019-01-28 18:18:09 +00:00
timeline - > pin_count = 0 ;
2019-01-29 18:54:50 +00:00
timeline - > has_initial_breadcrumb = ! hwsp ;
2019-03-01 17:08:59 +00:00
timeline - > hwsp_cacheline = NULL ;
2019-01-28 18:18:09 +00:00
2019-01-28 18:18:10 +00:00
if ( ! hwsp ) {
2019-03-01 17:08:59 +00:00
struct i915_timeline_cacheline * cl ;
2019-01-28 18:18:10 +00:00
unsigned int cacheline ;
hwsp = hwsp_alloc ( timeline , & cacheline ) ;
if ( IS_ERR ( hwsp ) )
return PTR_ERR ( hwsp ) ;
2019-03-01 17:08:59 +00:00
cl = cacheline_alloc ( hwsp - > private , cacheline ) ;
if ( IS_ERR ( cl ) ) {
__idle_hwsp_free ( hwsp - > private , cacheline ) ;
return PTR_ERR ( cl ) ;
}
timeline - > hwsp_cacheline = cl ;
2019-01-28 18:18:10 +00:00
timeline - > hwsp_offset = cacheline * CACHELINE_BYTES ;
2018-05-02 17:38:39 +01:00
2019-03-01 17:08:59 +00:00
vaddr = page_mask_bits ( cl - > vaddr ) ;
} else {
timeline - > hwsp_offset = I915_GEM_HWS_SEQNO_ADDR ;
vaddr = i915_gem_object_pin_map ( hwsp - > obj , I915_MAP_WB ) ;
if ( IS_ERR ( vaddr ) )
return PTR_ERR ( vaddr ) ;
2019-01-28 18:18:09 +00:00
}
2018-05-02 17:38:39 +01:00
2019-01-28 18:18:09 +00:00
timeline - > hwsp_seqno =
memset ( vaddr + timeline - > hwsp_offset , 0 , CACHELINE_BYTES ) ;
2018-05-02 17:38:39 +01:00
2019-03-01 17:08:59 +00:00
timeline - > hwsp_ggtt = i915_vma_get ( hwsp ) ;
GEM_BUG_ON ( timeline - > hwsp_offset > = hwsp - > size ) ;
2018-05-02 17:38:39 +01:00
timeline - > fence_context = dma_fence_context_alloc ( 1 ) ;
2019-03-01 11:05:44 +00:00
mutex_init ( & timeline - > mutex ) ;
2018-05-02 17:38:39 +01:00
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 13:00:05 +00:00
INIT_ACTIVE_REQUEST ( & timeline - > last_request ) ;
2018-05-02 17:38:39 +01:00
INIT_LIST_HEAD ( & timeline - > requests ) ;
i915_syncmap_init ( & timeline - > sync ) ;
2019-01-28 18:18:09 +00:00
return 0 ;
2018-05-02 17:38:39 +01:00
}
2019-01-28 10:23:56 +00:00
void i915_timelines_init ( struct drm_i915_private * i915 )
{
struct i915_gt_timelines * gt = & i915 - > gt . timelines ;
mutex_init ( & gt - > mutex ) ;
2019-01-28 18:18:12 +00:00
INIT_LIST_HEAD ( & gt - > active_list ) ;
2019-01-28 10:23:56 +00:00
2019-01-28 18:18:10 +00:00
spin_lock_init ( & gt - > hwsp_lock ) ;
INIT_LIST_HEAD ( & gt - > hwsp_free_list ) ;
2019-01-28 10:23:56 +00:00
/* via i915_gem_wait_for_idle() */
i915_gem_shrinker_taints_mutex ( i915 , & gt - > mutex ) ;
}
2019-01-28 18:18:12 +00:00
static void timeline_add_to_active ( struct i915_timeline * tl )
{
struct i915_gt_timelines * gt = & tl - > i915 - > gt . timelines ;
mutex_lock ( & gt - > mutex ) ;
list_add ( & tl - > link , & gt - > active_list ) ;
mutex_unlock ( & gt - > mutex ) ;
}
static void timeline_remove_from_active ( struct i915_timeline * tl )
{
struct i915_gt_timelines * gt = & tl - > i915 - > gt . timelines ;
mutex_lock ( & gt - > mutex ) ;
list_del ( & tl - > link ) ;
mutex_unlock ( & gt - > mutex ) ;
}
2018-05-02 17:38:39 +01:00
/**
* i915_timelines_park - called when the driver idles
* @ i915 : the drm_i915_private device
*
* When the driver is completely idle , we know that all of our sync points
* have been signaled and our tracking is then entirely redundant . Any request
* to wait upon an older sync point will be completed instantly as we know
* the fence is signaled and therefore we will not even look them up in the
* sync point map .
*/
void i915_timelines_park ( struct drm_i915_private * i915 )
{
2019-01-28 10:23:56 +00:00
struct i915_gt_timelines * gt = & i915 - > gt . timelines ;
2018-05-02 17:38:39 +01:00
struct i915_timeline * timeline ;
2019-01-28 10:23:56 +00:00
mutex_lock ( & gt - > mutex ) ;
2019-01-28 18:18:12 +00:00
list_for_each_entry ( timeline , & gt - > active_list , link ) {
2018-05-02 17:38:39 +01:00
/*
* All known fences are completed so we can scrap
* the current sync point tracking and start afresh ,
* any attempt to wait upon a previous sync point
* will be skipped as the fence was signaled .
*/
i915_syncmap_free ( & timeline - > sync ) ;
}
2019-01-28 10:23:56 +00:00
mutex_unlock ( & gt - > mutex ) ;
2018-05-02 17:38:39 +01:00
}
void i915_timeline_fini ( struct i915_timeline * timeline )
{
2019-01-28 18:18:09 +00:00
GEM_BUG_ON ( timeline - > pin_count ) ;
2018-05-02 17:38:39 +01:00
GEM_BUG_ON ( ! list_empty ( & timeline - > requests ) ) ;
2019-01-28 18:18:10 +00:00
i915_syncmap_free ( & timeline - > sync ) ;
2019-03-01 17:08:59 +00:00
if ( timeline - > hwsp_cacheline )
cacheline_free ( timeline - > hwsp_cacheline ) ;
else
i915_gem_object_unpin_map ( timeline - > hwsp_ggtt - > obj ) ;
2019-01-28 18:18:09 +00:00
i915_vma_put ( timeline - > hwsp_ggtt ) ;
2018-05-02 17:38:39 +01:00
}
struct i915_timeline *
2019-01-28 18:18:09 +00:00
i915_timeline_create ( struct drm_i915_private * i915 ,
struct i915_vma * global_hwsp )
2018-05-02 17:38:39 +01:00
{
struct i915_timeline * timeline ;
2019-01-28 18:18:09 +00:00
int err ;
2018-05-02 17:38:39 +01:00
timeline = kzalloc ( sizeof ( * timeline ) , GFP_KERNEL ) ;
if ( ! timeline )
return ERR_PTR ( - ENOMEM ) ;
2019-03-21 14:07:11 +00:00
err = i915_timeline_init ( i915 , timeline , global_hwsp ) ;
2019-01-28 18:18:09 +00:00
if ( err ) {
kfree ( timeline ) ;
return ERR_PTR ( err ) ;
}
2018-05-02 17:38:39 +01:00
kref_init ( & timeline - > kref ) ;
return timeline ;
}
2019-01-28 18:18:09 +00:00
int i915_timeline_pin ( struct i915_timeline * tl )
{
int err ;
if ( tl - > pin_count + + )
return 0 ;
GEM_BUG_ON ( ! tl - > pin_count ) ;
err = i915_vma_pin ( tl - > hwsp_ggtt , 0 , 0 , PIN_GLOBAL | PIN_HIGH ) ;
if ( err )
goto unpin ;
2019-01-28 18:18:11 +00:00
tl - > hwsp_offset =
i915_ggtt_offset ( tl - > hwsp_ggtt ) +
offset_in_page ( tl - > hwsp_offset ) ;
2019-03-01 17:08:59 +00:00
cacheline_acquire ( tl - > hwsp_cacheline ) ;
2019-01-28 18:18:12 +00:00
timeline_add_to_active ( tl ) ;
2019-01-28 18:18:09 +00:00
return 0 ;
unpin :
tl - > pin_count = 0 ;
return err ;
}
2019-03-01 17:08:59 +00:00
static u32 timeline_advance ( struct i915_timeline * tl )
{
GEM_BUG_ON ( ! tl - > pin_count ) ;
GEM_BUG_ON ( tl - > seqno & tl - > has_initial_breadcrumb ) ;
return tl - > seqno + = 1 + tl - > has_initial_breadcrumb ;
}
static void timeline_rollback ( struct i915_timeline * tl )
{
tl - > seqno - = 1 + tl - > has_initial_breadcrumb ;
}
static noinline int
__i915_timeline_get_seqno ( struct i915_timeline * tl ,
struct i915_request * rq ,
u32 * seqno )
{
struct i915_timeline_cacheline * cl ;
unsigned int cacheline ;
struct i915_vma * vma ;
void * vaddr ;
int err ;
/*
* If there is an outstanding GPU reference to this cacheline ,
* such as it being sampled by a HW semaphore on another timeline ,
* we cannot wraparound our seqno value ( the HW semaphore does
* a strict greater - than - or - equals compare , not i915_seqno_passed ) .
* So if the cacheline is still busy , we must detach ourselves
* from it and leave it inflight alongside its users .
*
* However , if nobody is watching and we can guarantee that nobody
* will , we could simply reuse the same cacheline .
*
* if ( i915_active_request_is_signaled ( & tl - > last_request ) & &
* i915_active_is_signaled ( & tl - > hwsp_cacheline - > active ) )
* return 0 ;
*
* That seems unlikely for a busy timeline that needed to wrap in
* the first place , so just replace the cacheline .
*/
vma = hwsp_alloc ( tl , & cacheline ) ;
if ( IS_ERR ( vma ) ) {
err = PTR_ERR ( vma ) ;
goto err_rollback ;
}
err = i915_vma_pin ( vma , 0 , 0 , PIN_GLOBAL | PIN_HIGH ) ;
if ( err ) {
__idle_hwsp_free ( vma - > private , cacheline ) ;
goto err_rollback ;
}
cl = cacheline_alloc ( vma - > private , cacheline ) ;
if ( IS_ERR ( cl ) ) {
err = PTR_ERR ( cl ) ;
__idle_hwsp_free ( vma - > private , cacheline ) ;
goto err_unpin ;
}
GEM_BUG_ON ( cl - > hwsp - > vma ! = vma ) ;
/*
* Attach the old cacheline to the current request , so that we only
* free it after the current request is retired , which ensures that
* all writes into the cacheline from previous requests are complete .
*/
err = i915_active_ref ( & tl - > hwsp_cacheline - > active ,
tl - > fence_context , rq ) ;
if ( err )
goto err_cacheline ;
cacheline_release ( tl - > hwsp_cacheline ) ; /* ownership now xfered to rq */
cacheline_free ( tl - > hwsp_cacheline ) ;
i915_vma_unpin ( tl - > hwsp_ggtt ) ; /* binding kept alive by old cacheline */
i915_vma_put ( tl - > hwsp_ggtt ) ;
tl - > hwsp_ggtt = i915_vma_get ( vma ) ;
vaddr = page_mask_bits ( cl - > vaddr ) ;
tl - > hwsp_offset = cacheline * CACHELINE_BYTES ;
tl - > hwsp_seqno =
memset ( vaddr + tl - > hwsp_offset , 0 , CACHELINE_BYTES ) ;
tl - > hwsp_offset + = i915_ggtt_offset ( vma ) ;
cacheline_acquire ( cl ) ;
tl - > hwsp_cacheline = cl ;
* seqno = timeline_advance ( tl ) ;
GEM_BUG_ON ( i915_seqno_passed ( * tl - > hwsp_seqno , * seqno ) ) ;
return 0 ;
err_cacheline :
cacheline_free ( cl ) ;
err_unpin :
i915_vma_unpin ( vma ) ;
err_rollback :
timeline_rollback ( tl ) ;
return err ;
}
int i915_timeline_get_seqno ( struct i915_timeline * tl ,
struct i915_request * rq ,
u32 * seqno )
{
* seqno = timeline_advance ( tl ) ;
/* Replace the HWSP on wraparound for HW semaphores */
if ( unlikely ( ! * seqno & & tl - > hwsp_cacheline ) )
return __i915_timeline_get_seqno ( tl , rq , seqno ) ;
return 0 ;
}
static int cacheline_ref ( struct i915_timeline_cacheline * cl ,
struct i915_request * rq )
{
return i915_active_ref ( & cl - > active , rq - > fence . context , rq ) ;
}
int i915_timeline_read_hwsp ( struct i915_request * from ,
struct i915_request * to ,
u32 * hwsp )
{
struct i915_timeline_cacheline * cl = from - > hwsp_cacheline ;
struct i915_timeline * tl = from - > timeline ;
int err ;
GEM_BUG_ON ( to - > timeline = = tl ) ;
mutex_lock_nested ( & tl - > mutex , SINGLE_DEPTH_NESTING ) ;
err = i915_request_completed ( from ) ;
if ( ! err )
err = cacheline_ref ( cl , to ) ;
if ( ! err ) {
if ( likely ( cl = = tl - > hwsp_cacheline ) ) {
* hwsp = tl - > hwsp_offset ;
} else { /* across a seqno wrap, recover the original offset */
* hwsp = i915_ggtt_offset ( cl - > hwsp - > vma ) +
ptr_unmask_bits ( cl - > vaddr , CACHELINE_BITS ) *
CACHELINE_BYTES ;
}
}
mutex_unlock ( & tl - > mutex ) ;
return err ;
}
2019-01-28 18:18:09 +00:00
void i915_timeline_unpin ( struct i915_timeline * tl )
{
GEM_BUG_ON ( ! tl - > pin_count ) ;
if ( - - tl - > pin_count )
return ;
2019-01-28 18:18:12 +00:00
timeline_remove_from_active ( tl ) ;
2019-03-01 17:08:59 +00:00
cacheline_release ( tl - > hwsp_cacheline ) ;
2019-01-28 18:18:12 +00:00
2019-01-28 18:18:09 +00:00
/*
* Since this timeline is idle , all bariers upon which we were waiting
* must also be complete and so we can discard the last used barriers
* without loss of information .
*/
i915_syncmap_free ( & tl - > sync ) ;
__i915_vma_unpin ( tl - > hwsp_ggtt ) ;
}
2018-05-02 17:38:39 +01:00
void __i915_timeline_free ( struct kref * kref )
{
struct i915_timeline * timeline =
container_of ( kref , typeof ( * timeline ) , kref ) ;
i915_timeline_fini ( timeline ) ;
kfree ( timeline ) ;
}
2019-01-28 10:23:56 +00:00
void i915_timelines_fini ( struct drm_i915_private * i915 )
{
struct i915_gt_timelines * gt = & i915 - > gt . timelines ;
2019-01-28 18:18:12 +00:00
GEM_BUG_ON ( ! list_empty ( & gt - > active_list ) ) ;
2019-01-28 18:18:10 +00:00
GEM_BUG_ON ( ! list_empty ( & gt - > hwsp_free_list ) ) ;
2019-01-28 10:23:56 +00:00
mutex_destroy ( & gt - > mutex ) ;
}
2018-05-02 17:38:39 +01:00
# if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
# include "selftests/mock_timeline.c"
# include "selftests/i915_timeline.c"
# endif