2017-05-03 10:39:21 +01:00
/*
2018-05-02 17:38:39 +01:00
* SPDX - License - Identifier : MIT
2017-05-03 10:39:21 +01:00
*
2018-05-02 17:38:39 +01:00
* Copyright © 2017 - 2018 Intel Corporation
2017-05-03 10:39:21 +01:00
*/
2019-01-28 18:18:10 +00:00
# include <linux/prime_numbers.h>
2020-02-04 09:41:02 +00:00
# include "intel_context.h"
# include "intel_engine_heartbeat.h"
2019-10-04 14:40:02 +01:00
# include "intel_engine_pm.h"
2019-07-12 20:29:53 +01:00
# include "intel_gt.h"
2019-10-04 14:40:06 +01:00
# include "intel_gt_requests.h"
2019-10-24 11:03:44 +01:00
# include "intel_ring.h"
2019-05-28 10:29:49 +01:00
2019-06-21 08:08:10 +01:00
# include "../selftests/i915_random.h"
# include "../i915_selftest.h"
2017-05-03 10:39:21 +01:00
2019-06-21 08:08:10 +01:00
# include "../selftests/igt_flush_test.h"
# include "../selftests/mock_gem_device.h"
# include "selftests/mock_timeline.h"
2017-05-03 10:39:21 +01:00
2019-06-21 08:08:10 +01:00
static struct page * hwsp_page ( struct intel_timeline * tl )
2019-01-28 18:18:10 +00:00
{
struct drm_i915_gem_object * obj = tl - > hwsp_ggtt - > obj ;
GEM_BUG_ON ( ! i915_gem_object_has_pinned_pages ( obj ) ) ;
return sg_page ( obj - > mm . pages - > sgl ) ;
}
2019-06-21 08:08:10 +01:00
static unsigned long hwsp_cacheline ( struct intel_timeline * tl )
2019-01-28 18:18:10 +00:00
{
unsigned long address = ( unsigned long ) page_address ( hwsp_page ( tl ) ) ;
return ( address + tl - > hwsp_offset ) / CACHELINE_BYTES ;
}
# define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
struct mock_hwsp_freelist {
2019-10-16 12:38:40 +01:00
struct intel_gt * gt ;
2019-01-28 18:18:10 +00:00
struct radix_tree_root cachelines ;
2019-06-21 08:08:10 +01:00
struct intel_timeline * * history ;
2019-01-28 18:18:10 +00:00
unsigned long count , max ;
struct rnd_state prng ;
} ;
enum {
SHUFFLE = BIT ( 0 ) ,
} ;
static void __mock_hwsp_record ( struct mock_hwsp_freelist * state ,
unsigned int idx ,
2019-06-21 08:08:10 +01:00
struct intel_timeline * tl )
2019-01-28 18:18:10 +00:00
{
tl = xchg ( & state - > history [ idx ] , tl ) ;
if ( tl ) {
radix_tree_delete ( & state - > cachelines , hwsp_cacheline ( tl ) ) ;
2019-06-21 08:08:10 +01:00
intel_timeline_put ( tl ) ;
2019-01-28 18:18:10 +00:00
}
}
static int __mock_hwsp_timeline ( struct mock_hwsp_freelist * state ,
unsigned int count ,
unsigned int flags )
{
2019-06-21 08:08:10 +01:00
struct intel_timeline * tl ;
2019-01-28 18:18:10 +00:00
unsigned int idx ;
while ( count - - ) {
unsigned long cacheline ;
int err ;
2019-10-16 12:38:40 +01:00
tl = intel_timeline_create ( state - > gt , NULL ) ;
2019-01-28 18:18:10 +00:00
if ( IS_ERR ( tl ) )
return PTR_ERR ( tl ) ;
cacheline = hwsp_cacheline ( tl ) ;
err = radix_tree_insert ( & state - > cachelines , cacheline , tl ) ;
if ( err ) {
if ( err = = - EEXIST ) {
pr_err ( " HWSP cacheline %lu already used; duplicate allocation! \n " ,
cacheline ) ;
}
2019-06-21 08:08:10 +01:00
intel_timeline_put ( tl ) ;
2019-01-28 18:18:10 +00:00
return err ;
}
idx = state - > count + + % state - > max ;
__mock_hwsp_record ( state , idx , tl ) ;
}
if ( flags & SHUFFLE )
i915_prandom_shuffle ( state - > history ,
sizeof ( * state - > history ) ,
min ( state - > count , state - > max ) ,
& state - > prng ) ;
count = i915_prandom_u32_max_state ( min ( state - > count , state - > max ) ,
& state - > prng ) ;
while ( count - - ) {
idx = - - state - > count % state - > max ;
__mock_hwsp_record ( state , idx , NULL ) ;
}
return 0 ;
}
static int mock_hwsp_freelist ( void * arg )
{
struct mock_hwsp_freelist state ;
2019-10-16 12:38:40 +01:00
struct drm_i915_private * i915 ;
2019-01-28 18:18:10 +00:00
const struct {
const char * name ;
unsigned int flags ;
} phases [ ] = {
{ " linear " , 0 } ,
{ " shuffled " , SHUFFLE } ,
{ } ,
} , * p ;
unsigned int na ;
int err = 0 ;
2019-10-16 12:38:40 +01:00
i915 = mock_gem_device ( ) ;
if ( ! i915 )
return - ENOMEM ;
2019-01-28 18:18:10 +00:00
INIT_RADIX_TREE ( & state . cachelines , GFP_KERNEL ) ;
state . prng = I915_RND_STATE_INITIALIZER ( i915_selftest . random_seed ) ;
2019-10-16 12:38:40 +01:00
state . gt = & i915 - > gt ;
2019-01-28 18:18:10 +00:00
/*
* Create a bunch of timelines and check that their HWSP do not overlap .
* Free some , and try again .
*/
state . max = PAGE_SIZE / sizeof ( * state . history ) ;
state . count = 0 ;
state . history = kcalloc ( state . max , sizeof ( * state . history ) , GFP_KERNEL ) ;
if ( ! state . history ) {
err = - ENOMEM ;
goto err_put ;
}
for ( p = phases ; p - > name ; p + + ) {
pr_debug ( " %s(%s) \n " , __func__ , p - > name ) ;
for_each_prime_number_from ( na , 1 , 2 * CACHELINES_PER_PAGE ) {
err = __mock_hwsp_timeline ( & state , na , p - > flags ) ;
if ( err )
goto out ;
}
}
out :
for ( na = 0 ; na < state . max ; na + + )
__mock_hwsp_record ( & state , na , NULL ) ;
kfree ( state . history ) ;
err_put :
2019-10-16 12:38:40 +01:00
drm_dev_put ( & i915 - > drm ) ;
2019-01-28 18:18:10 +00:00
return err ;
}
2017-05-03 10:39:21 +01:00
struct __igt_sync {
const char * name ;
u32 seqno ;
bool expected ;
bool set ;
} ;
2019-06-21 08:08:10 +01:00
static int __igt_sync ( struct intel_timeline * tl ,
2017-05-03 10:39:21 +01:00
u64 ctx ,
const struct __igt_sync * p ,
const char * name )
{
int ret ;
2019-06-21 08:08:10 +01:00
if ( __intel_timeline_sync_is_later ( tl , ctx , p - > seqno ) ! = p - > expected ) {
2017-05-03 10:39:21 +01:00
pr_err ( " %s: %s(ctx=%llu, seqno=%u) expected passed %s but failed \n " ,
name , p - > name , ctx , p - > seqno , yesno ( p - > expected ) ) ;
return - EINVAL ;
}
if ( p - > set ) {
2019-06-21 08:08:10 +01:00
ret = __intel_timeline_sync_set ( tl , ctx , p - > seqno ) ;
2017-05-03 10:39:21 +01:00
if ( ret )
return ret ;
}
return 0 ;
}
static int igt_sync ( void * arg )
{
const struct __igt_sync pass [ ] = {
{ " unset " , 0 , false , false } ,
{ " new " , 0 , false , true } ,
{ " 0a " , 0 , true , true } ,
{ " 1a " , 1 , false , true } ,
{ " 1b " , 1 , true , true } ,
{ " 0b " , 0 , true , false } ,
{ " 2a " , 2 , false , true } ,
{ " 4 " , 4 , false , true } ,
{ " INT_MAX " , INT_MAX , false , true } ,
{ " INT_MAX-1 " , INT_MAX - 1 , true , false } ,
{ " INT_MAX+1 " , ( u32 ) INT_MAX + 1 , false , true } ,
{ " INT_MAX " , INT_MAX , true , false } ,
{ " UINT_MAX " , UINT_MAX , false , true } ,
{ " wrap " , 0 , false , true } ,
{ " unwrap " , UINT_MAX , true , false } ,
{ } ,
} , * p ;
2019-06-21 08:08:10 +01:00
struct intel_timeline tl ;
2017-05-03 10:39:21 +01:00
int order , offset ;
2017-11-14 22:33:46 +00:00
int ret = - ENODEV ;
2017-05-03 10:39:21 +01:00
2018-05-02 17:38:39 +01:00
mock_timeline_init ( & tl , 0 ) ;
2017-05-03 10:39:21 +01:00
for ( p = pass ; p - > name ; p + + ) {
for ( order = 1 ; order < 64 ; order + + ) {
for ( offset = - 1 ; offset < = ( order > 1 ) ; offset + + ) {
u64 ctx = BIT_ULL ( order ) + offset ;
2018-05-02 17:38:39 +01:00
ret = __igt_sync ( & tl , ctx , p , " 1 " ) ;
2017-05-03 10:39:21 +01:00
if ( ret )
goto out ;
}
}
}
2018-05-02 17:38:39 +01:00
mock_timeline_fini ( & tl ) ;
2017-05-03 10:39:21 +01:00
2018-05-02 17:38:39 +01:00
mock_timeline_init ( & tl , 0 ) ;
2017-05-03 10:39:21 +01:00
for ( order = 1 ; order < 64 ; order + + ) {
for ( offset = - 1 ; offset < = ( order > 1 ) ; offset + + ) {
u64 ctx = BIT_ULL ( order ) + offset ;
for ( p = pass ; p - > name ; p + + ) {
2018-05-02 17:38:39 +01:00
ret = __igt_sync ( & tl , ctx , p , " 2 " ) ;
2017-05-03 10:39:21 +01:00
if ( ret )
goto out ;
}
}
}
out :
2018-05-02 17:38:39 +01:00
mock_timeline_fini ( & tl ) ;
2017-05-03 10:39:21 +01:00
return ret ;
}
static unsigned int random_engine ( struct rnd_state * rnd )
{
2017-09-13 11:51:54 +01:00
return i915_prandom_u32_max_state ( I915_NUM_ENGINES , rnd ) ;
2017-05-03 10:39:21 +01:00
}
static int bench_sync ( void * arg )
{
struct rnd_state prng ;
2019-06-21 08:08:10 +01:00
struct intel_timeline tl ;
2017-05-03 10:39:21 +01:00
unsigned long end_time , count ;
u64 prng32_1M ;
ktime_t kt ;
int order , last_order ;
2018-05-02 17:38:39 +01:00
mock_timeline_init ( & tl , 0 ) ;
2017-05-03 10:39:21 +01:00
/* Lookups from cache are very fast and so the random number generation
* and the loop itself becomes a significant factor in the per - iteration
* timings . We try to compensate the results by measuring the overhead
* of the prng and subtract it from the reported results .
*/
prandom_seed_state ( & prng , i915_selftest . random_seed ) ;
count = 0 ;
kt = ktime_get ( ) ;
end_time = jiffies + HZ / 10 ;
do {
u32 x ;
/* Make sure the compiler doesn't optimise away the prng call */
WRITE_ONCE ( x , prandom_u32_state ( & prng ) ) ;
count + + ;
} while ( ! time_after ( jiffies , end_time ) ) ;
kt = ktime_sub ( ktime_get ( ) , kt ) ;
pr_debug ( " %s: %lu random evaluations, %lluns/prng \n " ,
__func__ , count , ( long long ) div64_ul ( ktime_to_ns ( kt ) , count ) ) ;
2017-05-13 10:41:54 +01:00
prng32_1M = div64_ul ( ktime_to_ns ( kt ) < < 20 , count ) ;
2017-05-03 10:39:21 +01:00
/* Benchmark (only) setting random context ids */
prandom_seed_state ( & prng , i915_selftest . random_seed ) ;
count = 0 ;
kt = ktime_get ( ) ;
end_time = jiffies + HZ / 10 ;
do {
u64 id = i915_prandom_u64_state ( & prng ) ;
2019-06-21 08:08:10 +01:00
__intel_timeline_sync_set ( & tl , id , 0 ) ;
2017-05-03 10:39:21 +01:00
count + + ;
} while ( ! time_after ( jiffies , end_time ) ) ;
kt = ktime_sub ( ktime_get ( ) , kt ) ;
2017-05-13 10:41:54 +01:00
kt = ktime_sub_ns ( kt , ( count * prng32_1M * 2 ) > > 20 ) ;
2017-05-03 10:39:21 +01:00
pr_info ( " %s: %lu random insertions, %lluns/insert \n " ,
__func__ , count , ( long long ) div64_ul ( ktime_to_ns ( kt ) , count ) ) ;
/* Benchmark looking up the exact same context ids as we just set */
prandom_seed_state ( & prng , i915_selftest . random_seed ) ;
end_time = count ;
kt = ktime_get ( ) ;
while ( end_time - - ) {
u64 id = i915_prandom_u64_state ( & prng ) ;
2019-06-21 08:08:10 +01:00
if ( ! __intel_timeline_sync_is_later ( & tl , id , 0 ) ) {
2018-05-02 17:38:39 +01:00
mock_timeline_fini ( & tl ) ;
2017-05-03 10:39:21 +01:00
pr_err ( " Lookup of %llu failed \n " , id ) ;
return - EINVAL ;
}
}
kt = ktime_sub ( ktime_get ( ) , kt ) ;
2017-05-13 10:41:54 +01:00
kt = ktime_sub_ns ( kt , ( count * prng32_1M * 2 ) > > 20 ) ;
2017-05-03 10:39:21 +01:00
pr_info ( " %s: %lu random lookups, %lluns/lookup \n " ,
__func__ , count , ( long long ) div64_ul ( ktime_to_ns ( kt ) , count ) ) ;
2018-05-02 17:38:39 +01:00
mock_timeline_fini ( & tl ) ;
2017-05-03 10:39:21 +01:00
cond_resched ( ) ;
2018-05-02 17:38:39 +01:00
mock_timeline_init ( & tl , 0 ) ;
2017-05-03 10:39:21 +01:00
/* Benchmark setting the first N (in order) contexts */
count = 0 ;
kt = ktime_get ( ) ;
end_time = jiffies + HZ / 10 ;
do {
2019-06-21 08:08:10 +01:00
__intel_timeline_sync_set ( & tl , count + + , 0 ) ;
2017-05-03 10:39:21 +01:00
} while ( ! time_after ( jiffies , end_time ) ) ;
kt = ktime_sub ( ktime_get ( ) , kt ) ;
pr_info ( " %s: %lu in-order insertions, %lluns/insert \n " ,
__func__ , count , ( long long ) div64_ul ( ktime_to_ns ( kt ) , count ) ) ;
/* Benchmark looking up the exact same context ids as we just set */
end_time = count ;
kt = ktime_get ( ) ;
while ( end_time - - ) {
2019-06-21 08:08:10 +01:00
if ( ! __intel_timeline_sync_is_later ( & tl , end_time , 0 ) ) {
2017-05-03 10:39:21 +01:00
pr_err ( " Lookup of %lu failed \n " , end_time ) ;
2018-05-02 17:38:39 +01:00
mock_timeline_fini ( & tl ) ;
2017-05-03 10:39:21 +01:00
return - EINVAL ;
}
}
kt = ktime_sub ( ktime_get ( ) , kt ) ;
pr_info ( " %s: %lu in-order lookups, %lluns/lookup \n " ,
__func__ , count , ( long long ) div64_ul ( ktime_to_ns ( kt ) , count ) ) ;
2018-05-02 17:38:39 +01:00
mock_timeline_fini ( & tl ) ;
2017-05-03 10:39:21 +01:00
cond_resched ( ) ;
2018-05-02 17:38:39 +01:00
mock_timeline_init ( & tl , 0 ) ;
2017-05-03 10:39:21 +01:00
/* Benchmark searching for a random context id and maybe changing it */
prandom_seed_state ( & prng , i915_selftest . random_seed ) ;
count = 0 ;
kt = ktime_get ( ) ;
end_time = jiffies + HZ / 10 ;
do {
u32 id = random_engine ( & prng ) ;
u32 seqno = prandom_u32_state ( & prng ) ;
2019-06-21 08:08:10 +01:00
if ( ! __intel_timeline_sync_is_later ( & tl , id , seqno ) )
__intel_timeline_sync_set ( & tl , id , seqno ) ;
2017-05-03 10:39:21 +01:00
count + + ;
} while ( ! time_after ( jiffies , end_time ) ) ;
kt = ktime_sub ( ktime_get ( ) , kt ) ;
2017-05-13 10:41:54 +01:00
kt = ktime_sub_ns ( kt , ( count * prng32_1M * 2 ) > > 20 ) ;
2017-05-03 10:39:21 +01:00
pr_info ( " %s: %lu repeated insert/lookups, %lluns/op \n " ,
__func__ , count , ( long long ) div64_ul ( ktime_to_ns ( kt ) , count ) ) ;
2018-05-02 17:38:39 +01:00
mock_timeline_fini ( & tl ) ;
2017-05-03 10:39:21 +01:00
cond_resched ( ) ;
/* Benchmark searching for a known context id and changing the seqno */
for ( last_order = 1 , order = 1 ; order < 32 ;
( { int tmp = last_order ; last_order = order ; order + = tmp ; } ) ) {
unsigned int mask = BIT ( order ) - 1 ;
2018-05-02 17:38:39 +01:00
mock_timeline_init ( & tl , 0 ) ;
2017-05-03 10:39:21 +01:00
count = 0 ;
kt = ktime_get ( ) ;
end_time = jiffies + HZ / 10 ;
do {
/* Without assuming too many details of the underlying
* implementation , try to identify its phase - changes
* ( if any ) !
*/
u64 id = ( u64 ) ( count & mask ) < < order ;
2019-06-21 08:08:10 +01:00
__intel_timeline_sync_is_later ( & tl , id , 0 ) ;
__intel_timeline_sync_set ( & tl , id , 0 ) ;
2017-05-03 10:39:21 +01:00
count + + ;
} while ( ! time_after ( jiffies , end_time ) ) ;
kt = ktime_sub ( ktime_get ( ) , kt ) ;
pr_info ( " %s: %lu cyclic/%d insert/lookups, %lluns/op \n " ,
__func__ , count , order ,
( long long ) div64_ul ( ktime_to_ns ( kt ) , count ) ) ;
2018-05-02 17:38:39 +01:00
mock_timeline_fini ( & tl ) ;
2017-05-03 10:39:21 +01:00
cond_resched ( ) ;
}
return 0 ;
}
2019-06-21 08:08:10 +01:00
int intel_timeline_mock_selftests ( void )
2017-05-03 10:39:21 +01:00
{
static const struct i915_subtest tests [ ] = {
2019-01-28 18:18:10 +00:00
SUBTEST ( mock_hwsp_freelist ) ,
2017-05-03 10:39:21 +01:00
SUBTEST ( igt_sync ) ,
SUBTEST ( bench_sync ) ,
} ;
return i915_subtests ( tests , NULL ) ;
}
2019-01-28 18:18:09 +00:00
static int emit_ggtt_store_dw ( struct i915_request * rq , u32 addr , u32 value )
{
u32 * cs ;
cs = intel_ring_begin ( rq , 4 ) ;
if ( IS_ERR ( cs ) )
return PTR_ERR ( cs ) ;
2020-06-02 23:09:53 +01:00
if ( INTEL_GEN ( rq - > engine - > i915 ) > = 8 ) {
2019-01-28 18:18:09 +00:00
* cs + + = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT ;
* cs + + = addr ;
* cs + + = 0 ;
* cs + + = value ;
2020-06-02 23:09:53 +01:00
} else if ( INTEL_GEN ( rq - > engine - > i915 ) > = 4 ) {
2019-01-28 18:18:09 +00:00
* cs + + = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT ;
* cs + + = 0 ;
* cs + + = addr ;
* cs + + = value ;
} else {
* cs + + = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL ;
* cs + + = addr ;
* cs + + = value ;
* cs + + = MI_NOOP ;
}
intel_ring_advance ( rq , cs ) ;
return 0 ;
}
static struct i915_request *
2019-06-21 08:08:10 +01:00
tl_write ( struct intel_timeline * tl , struct intel_engine_cs * engine , u32 value )
2019-01-28 18:18:09 +00:00
{
struct i915_request * rq ;
int err ;
2019-06-21 08:08:10 +01:00
err = intel_timeline_pin ( tl ) ;
2019-01-28 18:18:09 +00:00
if ( err ) {
rq = ERR_PTR ( err ) ;
goto out ;
}
2019-11-25 10:58:56 +00:00
rq = intel_engine_create_kernel_request ( engine ) ;
2019-01-28 18:18:09 +00:00
if ( IS_ERR ( rq ) )
goto out_unpin ;
2019-10-04 14:40:02 +01:00
i915_request_get ( rq ) ;
2019-01-28 18:18:11 +00:00
err = emit_ggtt_store_dw ( rq , tl - > hwsp_offset , value ) ;
2019-01-28 18:18:09 +00:00
i915_request_add ( rq ) ;
2019-10-04 14:40:02 +01:00
if ( err ) {
i915_request_put ( rq ) ;
2019-01-28 18:18:09 +00:00
rq = ERR_PTR ( err ) ;
2019-10-04 14:40:02 +01:00
}
2019-01-28 18:18:09 +00:00
out_unpin :
2019-06-21 08:08:10 +01:00
intel_timeline_unpin ( tl ) ;
2019-01-28 18:18:09 +00:00
out :
if ( IS_ERR ( rq ) )
pr_err ( " Failed to write to timeline! \n " ) ;
return rq ;
}
2019-06-21 08:08:10 +01:00
static struct intel_timeline *
2019-10-16 12:38:40 +01:00
checked_intel_timeline_create ( struct intel_gt * gt )
2019-01-28 18:18:09 +00:00
{
2019-06-21 08:08:10 +01:00
struct intel_timeline * tl ;
2019-01-28 18:18:09 +00:00
2019-10-16 12:38:40 +01:00
tl = intel_timeline_create ( gt , NULL ) ;
2019-01-28 18:18:09 +00:00
if ( IS_ERR ( tl ) )
return tl ;
if ( * tl - > hwsp_seqno ! = tl - > seqno ) {
pr_err ( " Timeline created with incorrect breadcrumb, found %x, expected %x \n " ,
* tl - > hwsp_seqno , tl - > seqno ) ;
2019-06-21 08:08:10 +01:00
intel_timeline_put ( tl ) ;
2019-01-28 18:18:09 +00:00
return ERR_PTR ( - EINVAL ) ;
}
return tl ;
}
static int live_hwsp_engine ( void * arg )
{
# define NUM_TIMELINES 4096
2019-10-16 12:38:40 +01:00
struct intel_gt * gt = arg ;
2019-06-21 08:08:10 +01:00
struct intel_timeline * * timelines ;
2019-01-28 18:18:09 +00:00
struct intel_engine_cs * engine ;
enum intel_engine_id id ;
unsigned long count , n ;
int err = 0 ;
/*
* Create a bunch of timelines and check we can write
* independently to each of their breadcrumb slots .
*/
timelines = kvmalloc_array ( NUM_TIMELINES * I915_NUM_ENGINES ,
sizeof ( * timelines ) ,
GFP_KERNEL ) ;
if ( ! timelines )
return - ENOMEM ;
count = 0 ;
2019-10-17 10:45:00 +01:00
for_each_engine ( engine , gt , id ) {
2019-01-28 18:18:09 +00:00
if ( ! intel_engine_can_store_dword ( engine ) )
continue ;
2019-10-04 14:40:02 +01:00
intel_engine_pm_get ( engine ) ;
2019-01-28 18:18:09 +00:00
for ( n = 0 ; n < NUM_TIMELINES ; n + + ) {
2019-06-21 08:08:10 +01:00
struct intel_timeline * tl ;
2019-01-28 18:18:09 +00:00
struct i915_request * rq ;
2019-10-16 12:38:40 +01:00
tl = checked_intel_timeline_create ( gt ) ;
2019-01-28 18:18:09 +00:00
if ( IS_ERR ( tl ) ) {
err = PTR_ERR ( tl ) ;
2019-10-04 14:40:02 +01:00
break ;
2019-01-28 18:18:09 +00:00
}
rq = tl_write ( tl , engine , count ) ;
if ( IS_ERR ( rq ) ) {
2019-06-21 08:08:10 +01:00
intel_timeline_put ( tl ) ;
2019-01-28 18:18:09 +00:00
err = PTR_ERR ( rq ) ;
2019-10-04 14:40:02 +01:00
break ;
2019-01-28 18:18:09 +00:00
}
timelines [ count + + ] = tl ;
2019-10-04 14:40:02 +01:00
i915_request_put ( rq ) ;
2019-01-28 18:18:09 +00:00
}
2019-10-04 14:40:02 +01:00
intel_engine_pm_put ( engine ) ;
if ( err )
break ;
2019-01-28 18:18:09 +00:00
}
2019-10-16 12:38:40 +01:00
if ( igt_flush_test ( gt - > i915 ) )
2019-01-28 18:18:09 +00:00
err = - EIO ;
for ( n = 0 ; n < count ; n + + ) {
2019-06-21 08:08:10 +01:00
struct intel_timeline * tl = timelines [ n ] ;
2019-01-28 18:18:09 +00:00
if ( ! err & & * tl - > hwsp_seqno ! = n ) {
pr_err ( " Invalid seqno stored in timeline %lu, found 0x%x \n " ,
n , * tl - > hwsp_seqno ) ;
err = - EINVAL ;
}
2019-06-21 08:08:10 +01:00
intel_timeline_put ( tl ) ;
2019-01-28 18:18:09 +00:00
}
kvfree ( timelines ) ;
return err ;
# undef NUM_TIMELINES
}
static int live_hwsp_alternate ( void * arg )
{
# define NUM_TIMELINES 4096
2019-10-16 12:38:40 +01:00
struct intel_gt * gt = arg ;
2019-06-21 08:08:10 +01:00
struct intel_timeline * * timelines ;
2019-01-28 18:18:09 +00:00
struct intel_engine_cs * engine ;
enum intel_engine_id id ;
unsigned long count , n ;
int err = 0 ;
/*
* Create a bunch of timelines and check we can write
* independently to each of their breadcrumb slots with adjacent
* engines .
*/
timelines = kvmalloc_array ( NUM_TIMELINES * I915_NUM_ENGINES ,
sizeof ( * timelines ) ,
GFP_KERNEL ) ;
if ( ! timelines )
return - ENOMEM ;
count = 0 ;
for ( n = 0 ; n < NUM_TIMELINES ; n + + ) {
2019-10-17 10:45:00 +01:00
for_each_engine ( engine , gt , id ) {
2019-06-21 08:08:10 +01:00
struct intel_timeline * tl ;
2019-01-28 18:18:09 +00:00
struct i915_request * rq ;
if ( ! intel_engine_can_store_dword ( engine ) )
continue ;
2019-10-16 12:38:40 +01:00
tl = checked_intel_timeline_create ( gt ) ;
2019-01-28 18:18:09 +00:00
if ( IS_ERR ( tl ) ) {
err = PTR_ERR ( tl ) ;
goto out ;
}
2019-10-04 14:40:02 +01:00
intel_engine_pm_get ( engine ) ;
2019-01-28 18:18:09 +00:00
rq = tl_write ( tl , engine , count ) ;
2019-10-04 14:40:02 +01:00
intel_engine_pm_put ( engine ) ;
2019-01-28 18:18:09 +00:00
if ( IS_ERR ( rq ) ) {
2019-06-21 08:08:10 +01:00
intel_timeline_put ( tl ) ;
2019-01-28 18:18:09 +00:00
err = PTR_ERR ( rq ) ;
goto out ;
}
timelines [ count + + ] = tl ;
2019-10-04 14:40:02 +01:00
i915_request_put ( rq ) ;
2019-01-28 18:18:09 +00:00
}
}
out :
2019-10-16 12:38:40 +01:00
if ( igt_flush_test ( gt - > i915 ) )
2019-01-28 18:18:09 +00:00
err = - EIO ;
for ( n = 0 ; n < count ; n + + ) {
2019-06-21 08:08:10 +01:00
struct intel_timeline * tl = timelines [ n ] ;
2019-01-28 18:18:09 +00:00
if ( ! err & & * tl - > hwsp_seqno ! = n ) {
pr_err ( " Invalid seqno stored in timeline %lu, found 0x%x \n " ,
n , * tl - > hwsp_seqno ) ;
err = - EINVAL ;
}
2019-06-21 08:08:10 +01:00
intel_timeline_put ( tl ) ;
2019-01-28 18:18:09 +00:00
}
kvfree ( timelines ) ;
return err ;
# undef NUM_TIMELINES
}
2019-03-01 17:08:59 +00:00
static int live_hwsp_wrap ( void * arg )
{
2019-10-16 12:38:40 +01:00
struct intel_gt * gt = arg ;
2019-03-01 17:08:59 +00:00
struct intel_engine_cs * engine ;
2019-06-21 08:08:10 +01:00
struct intel_timeline * tl ;
2019-03-01 17:08:59 +00:00
enum intel_engine_id id ;
int err = 0 ;
/*
* Across a seqno wrap , we need to keep the old cacheline alive for
* foreign GPU references .
*/
2019-10-04 14:40:06 +01:00
tl = intel_timeline_create ( gt , NULL ) ;
2019-10-04 14:40:02 +01:00
if ( IS_ERR ( tl ) )
return PTR_ERR ( tl ) ;
2019-03-01 17:08:59 +00:00
if ( ! tl - > has_initial_breadcrumb | | ! tl - > hwsp_cacheline )
goto out_free ;
2019-06-21 08:08:10 +01:00
err = intel_timeline_pin ( tl ) ;
2019-03-01 17:08:59 +00:00
if ( err )
goto out_free ;
2019-10-17 10:45:00 +01:00
for_each_engine ( engine , gt , id ) {
2019-03-01 17:08:59 +00:00
const u32 * hwsp_seqno [ 2 ] ;
struct i915_request * rq ;
u32 seqno [ 2 ] ;
if ( ! intel_engine_can_store_dword ( engine ) )
continue ;
2019-11-25 10:58:56 +00:00
rq = intel_engine_create_kernel_request ( engine ) ;
2019-03-01 17:08:59 +00:00
if ( IS_ERR ( rq ) ) {
err = PTR_ERR ( rq ) ;
goto out ;
}
tl - > seqno = - 4u ;
2019-08-16 13:10:00 +01:00
mutex_lock_nested ( & tl - > mutex , SINGLE_DEPTH_NESTING ) ;
2019-06-21 08:08:10 +01:00
err = intel_timeline_get_seqno ( tl , rq , & seqno [ 0 ] ) ;
2019-08-16 13:10:00 +01:00
mutex_unlock ( & tl - > mutex ) ;
2019-03-01 17:08:59 +00:00
if ( err ) {
i915_request_add ( rq ) ;
goto out ;
}
pr_debug ( " seqno[0]:%08x, hwsp_offset:%08x \n " ,
seqno [ 0 ] , tl - > hwsp_offset ) ;
err = emit_ggtt_store_dw ( rq , tl - > hwsp_offset , seqno [ 0 ] ) ;
if ( err ) {
i915_request_add ( rq ) ;
goto out ;
}
hwsp_seqno [ 0 ] = tl - > hwsp_seqno ;
2019-08-16 13:10:00 +01:00
mutex_lock_nested ( & tl - > mutex , SINGLE_DEPTH_NESTING ) ;
2019-06-21 08:08:10 +01:00
err = intel_timeline_get_seqno ( tl , rq , & seqno [ 1 ] ) ;
2019-08-16 13:10:00 +01:00
mutex_unlock ( & tl - > mutex ) ;
2019-03-01 17:08:59 +00:00
if ( err ) {
i915_request_add ( rq ) ;
goto out ;
}
pr_debug ( " seqno[1]:%08x, hwsp_offset:%08x \n " ,
seqno [ 1 ] , tl - > hwsp_offset ) ;
err = emit_ggtt_store_dw ( rq , tl - > hwsp_offset , seqno [ 1 ] ) ;
if ( err ) {
i915_request_add ( rq ) ;
goto out ;
}
hwsp_seqno [ 1 ] = tl - > hwsp_seqno ;
/* With wrap should come a new hwsp */
GEM_BUG_ON ( seqno [ 1 ] > = seqno [ 0 ] ) ;
GEM_BUG_ON ( hwsp_seqno [ 0 ] = = hwsp_seqno [ 1 ] ) ;
i915_request_add ( rq ) ;
2019-06-18 08:41:30 +01:00
if ( i915_request_wait ( rq , 0 , HZ / 5 ) < 0 ) {
2019-03-01 17:08:59 +00:00
pr_err ( " Wait for timeline writes timed out! \n " ) ;
err = - EIO ;
goto out ;
}
if ( * hwsp_seqno [ 0 ] ! = seqno [ 0 ] | | * hwsp_seqno [ 1 ] ! = seqno [ 1 ] ) {
pr_err ( " Bad timeline values: found (%x, %x), expected (%x, %x) \n " ,
* hwsp_seqno [ 0 ] , * hwsp_seqno [ 1 ] ,
seqno [ 0 ] , seqno [ 1 ] ) ;
err = - EINVAL ;
goto out ;
}
2019-10-04 14:40:06 +01:00
intel_gt_retire_requests ( gt ) ; /* recycle HWSP */
2019-03-01 17:08:59 +00:00
}
out :
2019-10-16 12:38:40 +01:00
if ( igt_flush_test ( gt - > i915 ) )
2019-03-01 17:08:59 +00:00
err = - EIO ;
2019-06-21 08:08:10 +01:00
intel_timeline_unpin ( tl ) ;
2019-03-01 17:08:59 +00:00
out_free :
2019-06-21 08:08:10 +01:00
intel_timeline_put ( tl ) ;
2019-03-01 17:08:59 +00:00
return err ;
}
2020-05-19 07:31:14 +01:00
static void engine_heartbeat_disable ( struct intel_engine_cs * engine )
2020-02-04 09:41:02 +00:00
{
engine - > props . heartbeat_interval_ms = 0 ;
intel_engine_pm_get ( engine ) ;
intel_engine_park_heartbeat ( engine ) ;
}
2020-05-19 07:31:14 +01:00
static void engine_heartbeat_enable ( struct intel_engine_cs * engine )
2020-02-04 09:41:02 +00:00
{
intel_engine_pm_put ( engine ) ;
2020-05-19 07:31:14 +01:00
engine - > props . heartbeat_interval_ms =
engine - > defaults . heartbeat_interval_ms ;
2020-02-04 09:41:02 +00:00
}
static int live_hwsp_rollover_kernel ( void * arg )
{
struct intel_gt * gt = arg ;
struct intel_engine_cs * engine ;
enum intel_engine_id id ;
int err = 0 ;
/*
* Run the host for long enough , and even the kernel context will
* see a seqno rollover .
*/
for_each_engine ( engine , gt , id ) {
struct intel_context * ce = engine - > kernel_context ;
struct intel_timeline * tl = ce - > timeline ;
struct i915_request * rq [ 3 ] = { } ;
int i ;
2020-05-19 07:31:14 +01:00
engine_heartbeat_disable ( engine ) ;
2020-02-04 09:41:02 +00:00
if ( intel_gt_wait_for_idle ( gt , HZ / 2 ) ) {
err = - EIO ;
goto out ;
}
GEM_BUG_ON ( i915_active_fence_isset ( & tl - > last_request ) ) ;
tl - > seqno = 0 ;
timeline_rollback ( tl ) ;
timeline_rollback ( tl ) ;
WRITE_ONCE ( * ( u32 * ) tl - > hwsp_seqno , tl - > seqno ) ;
for ( i = 0 ; i < ARRAY_SIZE ( rq ) ; i + + ) {
struct i915_request * this ;
this = i915_request_create ( ce ) ;
if ( IS_ERR ( this ) ) {
err = PTR_ERR ( this ) ;
goto out ;
}
pr_debug ( " %s: create fence.seqnp:%d \n " ,
engine - > name ,
lower_32_bits ( this - > fence . seqno ) ) ;
GEM_BUG_ON ( rcu_access_pointer ( this - > timeline ) ! = tl ) ;
rq [ i ] = i915_request_get ( this ) ;
i915_request_add ( this ) ;
}
/* We expected a wrap! */
GEM_BUG_ON ( rq [ 2 ] - > fence . seqno > rq [ 0 ] - > fence . seqno ) ;
if ( i915_request_wait ( rq [ 2 ] , 0 , HZ / 5 ) < 0 ) {
pr_err ( " Wait for timeline wrap timed out! \n " ) ;
err = - EIO ;
goto out ;
}
for ( i = 0 ; i < ARRAY_SIZE ( rq ) ; i + + ) {
if ( ! i915_request_completed ( rq [ i ] ) ) {
pr_err ( " Pre-wrap request not completed! \n " ) ;
err = - EINVAL ;
goto out ;
}
}
out :
for ( i = 0 ; i < ARRAY_SIZE ( rq ) ; i + + )
i915_request_put ( rq [ i ] ) ;
2020-05-19 07:31:14 +01:00
engine_heartbeat_enable ( engine ) ;
2020-02-04 09:41:02 +00:00
if ( err )
break ;
}
if ( igt_flush_test ( gt - > i915 ) )
err = - EIO ;
return err ;
}
static int live_hwsp_rollover_user ( void * arg )
{
struct intel_gt * gt = arg ;
struct intel_engine_cs * engine ;
enum intel_engine_id id ;
int err = 0 ;
/*
* Simulate a long running user context , and force the seqno wrap
* on the user ' s timeline .
*/
for_each_engine ( engine , gt , id ) {
struct i915_request * rq [ 3 ] = { } ;
struct intel_timeline * tl ;
struct intel_context * ce ;
int i ;
ce = intel_context_create ( engine ) ;
if ( IS_ERR ( ce ) )
return PTR_ERR ( ce ) ;
err = intel_context_alloc_state ( ce ) ;
if ( err )
goto out ;
tl = ce - > timeline ;
if ( ! tl - > has_initial_breadcrumb | | ! tl - > hwsp_cacheline )
goto out ;
timeline_rollback ( tl ) ;
timeline_rollback ( tl ) ;
WRITE_ONCE ( * ( u32 * ) tl - > hwsp_seqno , tl - > seqno ) ;
for ( i = 0 ; i < ARRAY_SIZE ( rq ) ; i + + ) {
struct i915_request * this ;
this = intel_context_create_request ( ce ) ;
if ( IS_ERR ( this ) ) {
err = PTR_ERR ( this ) ;
goto out ;
}
pr_debug ( " %s: create fence.seqnp:%d \n " ,
engine - > name ,
lower_32_bits ( this - > fence . seqno ) ) ;
GEM_BUG_ON ( rcu_access_pointer ( this - > timeline ) ! = tl ) ;
rq [ i ] = i915_request_get ( this ) ;
i915_request_add ( this ) ;
}
/* We expected a wrap! */
GEM_BUG_ON ( rq [ 2 ] - > fence . seqno > rq [ 0 ] - > fence . seqno ) ;
if ( i915_request_wait ( rq [ 2 ] , 0 , HZ / 5 ) < 0 ) {
pr_err ( " Wait for timeline wrap timed out! \n " ) ;
err = - EIO ;
goto out ;
}
for ( i = 0 ; i < ARRAY_SIZE ( rq ) ; i + + ) {
if ( ! i915_request_completed ( rq [ i ] ) ) {
pr_err ( " Pre-wrap request not completed! \n " ) ;
err = - EINVAL ;
goto out ;
}
}
out :
for ( i = 0 ; i < ARRAY_SIZE ( rq ) ; i + + )
i915_request_put ( rq [ i ] ) ;
intel_context_put ( ce ) ;
if ( err )
break ;
}
if ( igt_flush_test ( gt - > i915 ) )
err = - EIO ;
return err ;
}
2019-01-28 18:18:09 +00:00
static int live_hwsp_recycle ( void * arg )
{
2019-10-16 12:38:40 +01:00
struct intel_gt * gt = arg ;
2019-01-28 18:18:09 +00:00
struct intel_engine_cs * engine ;
enum intel_engine_id id ;
unsigned long count ;
int err = 0 ;
/*
* Check seqno writes into one timeline at a time . We expect to
* recycle the breadcrumb slot between iterations and neither
* want to confuse ourselves or the GPU .
*/
count = 0 ;
2019-10-17 10:45:00 +01:00
for_each_engine ( engine , gt , id ) {
2019-01-28 18:18:09 +00:00
IGT_TIMEOUT ( end_time ) ;
if ( ! intel_engine_can_store_dword ( engine ) )
continue ;
2019-10-04 14:40:02 +01:00
intel_engine_pm_get ( engine ) ;
2019-01-28 18:18:09 +00:00
do {
2019-06-21 08:08:10 +01:00
struct intel_timeline * tl ;
2019-01-28 18:18:09 +00:00
struct i915_request * rq ;
2019-10-16 12:38:40 +01:00
tl = checked_intel_timeline_create ( gt ) ;
2019-01-28 18:18:09 +00:00
if ( IS_ERR ( tl ) ) {
err = PTR_ERR ( tl ) ;
2019-10-04 14:40:02 +01:00
break ;
2019-01-28 18:18:09 +00:00
}
rq = tl_write ( tl , engine , count ) ;
if ( IS_ERR ( rq ) ) {
2019-06-21 08:08:10 +01:00
intel_timeline_put ( tl ) ;
2019-01-28 18:18:09 +00:00
err = PTR_ERR ( rq ) ;
2019-10-04 14:40:02 +01:00
break ;
2019-01-28 18:18:09 +00:00
}
2019-06-18 08:41:30 +01:00
if ( i915_request_wait ( rq , 0 , HZ / 5 ) < 0 ) {
2019-01-28 18:18:09 +00:00
pr_err ( " Wait for timeline writes timed out! \n " ) ;
2019-10-04 14:40:02 +01:00
i915_request_put ( rq ) ;
2019-06-21 08:08:10 +01:00
intel_timeline_put ( tl ) ;
2019-01-28 18:18:09 +00:00
err = - EIO ;
2019-10-04 14:40:02 +01:00
break ;
2019-01-28 18:18:09 +00:00
}
if ( * tl - > hwsp_seqno ! = count ) {
pr_err ( " Invalid seqno stored in timeline %lu, found 0x%x \n " ,
count , * tl - > hwsp_seqno ) ;
err = - EINVAL ;
}
2019-10-04 14:40:02 +01:00
i915_request_put ( rq ) ;
2019-06-21 08:08:10 +01:00
intel_timeline_put ( tl ) ;
2019-01-28 18:18:09 +00:00
count + + ;
if ( err )
2019-10-04 14:40:02 +01:00
break ;
2019-01-28 18:18:09 +00:00
} while ( ! __igt_timeout ( end_time , NULL ) ) ;
2019-10-04 14:40:02 +01:00
intel_engine_pm_put ( engine ) ;
if ( err )
break ;
}
2019-01-28 18:18:09 +00:00
return err ;
}
2019-06-21 08:08:10 +01:00
int intel_timeline_live_selftests ( struct drm_i915_private * i915 )
2019-01-28 18:18:09 +00:00
{
static const struct i915_subtest tests [ ] = {
SUBTEST ( live_hwsp_recycle ) ,
SUBTEST ( live_hwsp_engine ) ,
SUBTEST ( live_hwsp_alternate ) ,
2019-03-01 17:08:59 +00:00
SUBTEST ( live_hwsp_wrap ) ,
2020-02-04 09:41:02 +00:00
SUBTEST ( live_hwsp_rollover_kernel ) ,
SUBTEST ( live_hwsp_rollover_user ) ,
2019-01-28 18:18:09 +00:00
} ;
2019-07-12 20:29:53 +01:00
if ( intel_gt_is_wedged ( & i915 - > gt ) )
2019-04-13 13:58:20 +01:00
return 0 ;
2019-10-16 12:38:40 +01:00
return intel_gt_live_subtests ( tests , & i915 - > gt ) ;
2019-01-28 18:18:09 +00:00
}