2018-04-14 13:27:54 +01:00
/*
* SPDX - License - Identifier : MIT
*
* Copyright © 2018 Intel Corporation
*/
# include "../i915_selftest.h"
2018-12-03 12:50:11 +00:00
# include "igt_flush_test.h"
# include "igt_reset.h"
2018-11-30 09:52:11 +00:00
# include "igt_spinner.h"
2018-07-11 13:29:52 +01:00
# include "igt_wedge_me.h"
2018-04-14 13:27:54 +01:00
# include "mock_context.h"
static struct drm_i915_gem_object *
read_nonprivs ( struct i915_gem_context * ctx , struct intel_engine_cs * engine )
{
struct drm_i915_gem_object * result ;
struct i915_request * rq ;
struct i915_vma * vma ;
const u32 base = engine - > mmio_base ;
u32 srm , * cs ;
int err ;
int i ;
result = i915_gem_object_create_internal ( engine - > i915 , PAGE_SIZE ) ;
if ( IS_ERR ( result ) )
return result ;
i915_gem_object_set_cache_level ( result , I915_CACHE_LLC ) ;
cs = i915_gem_object_pin_map ( result , I915_MAP_WB ) ;
if ( IS_ERR ( cs ) ) {
err = PTR_ERR ( cs ) ;
goto err_obj ;
}
memset ( cs , 0xc5 , PAGE_SIZE ) ;
i915_gem_object_unpin_map ( result ) ;
2018-06-05 16:37:58 +01:00
vma = i915_vma_instance ( result , & engine - > i915 - > ggtt . vm , NULL ) ;
2018-04-14 13:27:54 +01:00
if ( IS_ERR ( vma ) ) {
err = PTR_ERR ( vma ) ;
goto err_obj ;
}
err = i915_vma_pin ( vma , 0 , 0 , PIN_GLOBAL ) ;
if ( err )
goto err_obj ;
2018-09-20 15:49:34 +01:00
intel_runtime_pm_get ( engine - > i915 ) ;
2018-04-14 13:27:54 +01:00
rq = i915_request_alloc ( engine , ctx ) ;
2018-09-20 15:49:34 +01:00
intel_runtime_pm_put ( engine - > i915 ) ;
2018-04-14 13:27:54 +01:00
if ( IS_ERR ( rq ) ) {
err = PTR_ERR ( rq ) ;
goto err_pin ;
}
2018-07-06 11:39:44 +01:00
err = i915_vma_move_to_active ( vma , rq , EXEC_OBJECT_WRITE ) ;
if ( err )
goto err_req ;
2018-04-14 13:27:54 +01:00
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT ;
if ( INTEL_GEN ( ctx - > i915 ) > = 8 )
srm + + ;
cs = intel_ring_begin ( rq , 4 * RING_MAX_NONPRIV_SLOTS ) ;
2018-04-16 14:57:01 -07:00
if ( IS_ERR ( cs ) ) {
err = PTR_ERR ( cs ) ;
goto err_req ;
}
2018-04-14 13:27:54 +01:00
for ( i = 0 ; i < RING_MAX_NONPRIV_SLOTS ; i + + ) {
* cs + + = srm ;
* cs + + = i915_mmio_reg_offset ( RING_FORCE_TO_NONPRIV ( base , i ) ) ;
* cs + + = i915_ggtt_offset ( vma ) + sizeof ( u32 ) * i ;
* cs + + = 0 ;
}
intel_ring_advance ( rq , cs ) ;
i915_gem_object_get ( result ) ;
i915_gem_object_set_active_reference ( result ) ;
2018-06-12 11:51:35 +01:00
i915_request_add ( rq ) ;
2018-04-14 13:27:54 +01:00
i915_vma_unpin ( vma ) ;
return result ;
2018-04-16 14:57:01 -07:00
err_req :
i915_request_add ( rq ) ;
2018-04-14 13:27:54 +01:00
err_pin :
i915_vma_unpin ( vma ) ;
err_obj :
i915_gem_object_put ( result ) ;
return ERR_PTR ( err ) ;
}
2018-12-03 12:50:12 +00:00
static u32
get_whitelist_reg ( const struct intel_engine_cs * engine , unsigned int i )
2018-04-14 13:27:54 +01:00
{
2018-12-03 12:50:12 +00:00
i915_reg_t reg = i < engine - > whitelist . count ?
engine - > whitelist . list [ i ] . reg :
RING_NOPID ( engine - > mmio_base ) ;
return i915_mmio_reg_offset ( reg ) ;
2018-04-14 13:27:54 +01:00
}
2018-12-03 12:50:12 +00:00
static void
print_results ( const struct intel_engine_cs * engine , const u32 * results )
2018-04-14 13:27:54 +01:00
{
unsigned int i ;
for ( i = 0 ; i < RING_MAX_NONPRIV_SLOTS ; i + + ) {
2018-12-03 12:50:12 +00:00
u32 expected = get_whitelist_reg ( engine , i ) ;
2018-04-14 13:27:54 +01:00
u32 actual = results [ i ] ;
pr_info ( " RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x \n " ,
i , expected , actual ) ;
}
}
2018-12-03 12:50:12 +00:00
static int check_whitelist ( struct i915_gem_context * ctx ,
2018-04-14 13:27:54 +01:00
struct intel_engine_cs * engine )
{
struct drm_i915_gem_object * results ;
2018-07-11 13:29:52 +01:00
struct igt_wedge_me wedge ;
2018-04-14 13:27:54 +01:00
u32 * vaddr ;
int err ;
int i ;
results = read_nonprivs ( ctx , engine ) ;
if ( IS_ERR ( results ) )
return PTR_ERR ( results ) ;
2018-07-11 13:29:52 +01:00
err = 0 ;
igt_wedge_on_timeout ( & wedge , ctx - > i915 , HZ / 5 ) /* a safety net! */
err = i915_gem_object_set_to_cpu_domain ( results , false ) ;
if ( i915_terminally_wedged ( & ctx - > i915 - > gpu_error ) )
err = - EIO ;
2018-04-14 13:27:54 +01:00
if ( err )
goto out_put ;
vaddr = i915_gem_object_pin_map ( results , I915_MAP_WB ) ;
if ( IS_ERR ( vaddr ) ) {
err = PTR_ERR ( vaddr ) ;
goto out_put ;
}
for ( i = 0 ; i < RING_MAX_NONPRIV_SLOTS ; i + + ) {
2018-12-03 12:50:12 +00:00
u32 expected = get_whitelist_reg ( engine , i ) ;
2018-04-14 13:27:54 +01:00
u32 actual = vaddr [ i ] ;
if ( expected ! = actual ) {
2018-12-03 12:50:12 +00:00
print_results ( engine , vaddr ) ;
2018-04-14 13:27:54 +01:00
pr_err ( " Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x \n " ,
i , expected , actual ) ;
err = - EINVAL ;
break ;
}
}
i915_gem_object_unpin_map ( results ) ;
out_put :
i915_gem_object_put ( results ) ;
return err ;
}
static int do_device_reset ( struct intel_engine_cs * engine )
{
2018-11-30 09:52:11 +00:00
set_bit ( I915_RESET_HANDOFF , & engine - > i915 - > gpu_error . flags ) ;
i915_reset ( engine - > i915 , ENGINE_MASK ( engine - > id ) , " live_workarounds " ) ;
2018-04-14 13:27:54 +01:00
return 0 ;
}
static int do_engine_reset ( struct intel_engine_cs * engine )
{
2018-11-30 09:52:11 +00:00
return i915_reset_engine ( engine , " live_workarounds " ) ;
2018-04-14 13:27:54 +01:00
}
2018-11-30 09:52:11 +00:00
static int
switch_to_scratch_context ( struct intel_engine_cs * engine ,
struct igt_spinner * spin )
2018-04-14 13:27:54 +01:00
{
struct i915_gem_context * ctx ;
struct i915_request * rq ;
2018-11-30 09:52:11 +00:00
int err = 0 ;
2018-04-14 13:27:54 +01:00
ctx = kernel_context ( engine - > i915 ) ;
if ( IS_ERR ( ctx ) )
return PTR_ERR ( ctx ) ;
2018-09-20 15:49:34 +01:00
intel_runtime_pm_get ( engine - > i915 ) ;
2018-11-30 09:52:11 +00:00
if ( spin )
rq = igt_spinner_create_request ( spin , ctx , engine , MI_NOOP ) ;
else
rq = i915_request_alloc ( engine , ctx ) ;
2018-09-20 15:49:34 +01:00
intel_runtime_pm_put ( engine - > i915 ) ;
2018-04-14 13:27:54 +01:00
kernel_context_close ( ctx ) ;
2018-11-30 09:52:11 +00:00
if ( IS_ERR ( rq ) ) {
spin = NULL ;
err = PTR_ERR ( rq ) ;
goto err ;
}
2018-04-14 13:27:54 +01:00
i915_request_add ( rq ) ;
2018-11-30 09:52:11 +00:00
if ( spin & & ! igt_wait_for_spinner ( spin , rq ) ) {
pr_err ( " Spinner failed to start \n " ) ;
err = - ETIMEDOUT ;
}
err :
if ( err & & spin )
igt_spinner_end ( spin ) ;
return err ;
2018-04-14 13:27:54 +01:00
}
static int check_whitelist_across_reset ( struct intel_engine_cs * engine ,
int ( * reset ) ( struct intel_engine_cs * ) ,
const char * name )
{
2018-11-30 09:52:11 +00:00
struct drm_i915_private * i915 = engine - > i915 ;
bool want_spin = reset = = do_engine_reset ;
2018-04-14 13:27:54 +01:00
struct i915_gem_context * ctx ;
2018-11-30 09:52:11 +00:00
struct igt_spinner spin ;
2018-04-14 13:27:54 +01:00
int err ;
2018-11-30 09:52:11 +00:00
pr_info ( " Checking %d whitelisted registers (RING_NONPRIV) [%s] \n " ,
2018-12-03 12:50:12 +00:00
engine - > whitelist . count , name ) ;
2018-11-30 09:52:11 +00:00
if ( want_spin ) {
err = igt_spinner_init ( & spin , i915 ) ;
if ( err )
return err ;
}
ctx = kernel_context ( i915 ) ;
2018-04-14 13:27:54 +01:00
if ( IS_ERR ( ctx ) )
return PTR_ERR ( ctx ) ;
2018-12-03 12:50:12 +00:00
err = check_whitelist ( ctx , engine ) ;
2018-04-14 13:27:54 +01:00
if ( err ) {
pr_err ( " Invalid whitelist *before* %s reset! \n " , name ) ;
goto out ;
}
2018-11-30 09:52:11 +00:00
err = switch_to_scratch_context ( engine , want_spin ? & spin : NULL ) ;
2018-04-14 13:27:54 +01:00
if ( err )
goto out ;
2018-11-30 09:52:11 +00:00
intel_runtime_pm_get ( i915 ) ;
2018-04-14 13:27:54 +01:00
err = reset ( engine ) ;
2018-11-30 09:52:11 +00:00
intel_runtime_pm_put ( i915 ) ;
if ( want_spin ) {
igt_spinner_end ( & spin ) ;
igt_spinner_fini ( & spin ) ;
}
2018-04-14 13:27:54 +01:00
if ( err ) {
pr_err ( " %s reset failed \n " , name ) ;
goto out ;
}
2018-12-03 12:50:12 +00:00
err = check_whitelist ( ctx , engine ) ;
2018-04-14 13:27:54 +01:00
if ( err ) {
pr_err ( " Whitelist not preserved in context across %s reset! \n " ,
name ) ;
goto out ;
}
kernel_context_close ( ctx ) ;
2018-11-30 09:52:11 +00:00
ctx = kernel_context ( i915 ) ;
2018-04-14 13:27:54 +01:00
if ( IS_ERR ( ctx ) )
return PTR_ERR ( ctx ) ;
2018-12-03 12:50:12 +00:00
err = check_whitelist ( ctx , engine ) ;
2018-04-14 13:27:54 +01:00
if ( err ) {
pr_err ( " Invalid whitelist *after* %s reset in fresh context! \n " ,
name ) ;
goto out ;
}
out :
kernel_context_close ( ctx ) ;
return err ;
}
static int live_reset_whitelist ( void * arg )
{
struct drm_i915_private * i915 = arg ;
struct intel_engine_cs * engine = i915 - > engine [ RCS ] ;
2018-04-24 08:15:45 -05:00
int err = 0 ;
2018-04-14 13:27:54 +01:00
/* If we reset the gpu, we should not lose the RING_NONPRIV */
2018-12-03 12:50:12 +00:00
if ( ! engine | | engine - > whitelist . count = = 0 )
2018-04-14 13:27:54 +01:00
return 0 ;
2018-12-03 12:50:11 +00:00
igt_global_reset_lock ( i915 ) ;
2018-04-14 13:27:54 +01:00
if ( intel_has_reset_engine ( i915 ) ) {
err = check_whitelist_across_reset ( engine ,
2018-12-03 12:50:12 +00:00
do_engine_reset ,
2018-04-14 13:27:54 +01:00
" engine " ) ;
if ( err )
goto out ;
}
if ( intel_has_gpu_reset ( i915 ) ) {
err = check_whitelist_across_reset ( engine ,
2018-12-03 12:50:12 +00:00
do_device_reset ,
2018-04-14 13:27:54 +01:00
" device " ) ;
if ( err )
goto out ;
}
out :
2018-12-03 12:50:11 +00:00
igt_global_reset_unlock ( i915 ) ;
2018-04-14 13:27:54 +01:00
return err ;
}
2018-12-03 12:50:11 +00:00
static bool verify_gt_engine_wa ( struct drm_i915_private * i915 , const char * str )
{
struct intel_engine_cs * engine ;
enum intel_engine_id id ;
bool ok = true ;
ok & = intel_gt_verify_workarounds ( i915 , str ) ;
for_each_engine ( engine , i915 , id )
ok & = intel_engine_verify_workarounds ( engine , str ) ;
return ok ;
}
static int
live_gpu_reset_gt_engine_workarounds ( void * arg )
{
struct drm_i915_private * i915 = arg ;
struct i915_gpu_error * error = & i915 - > gpu_error ;
bool ok ;
if ( ! intel_has_gpu_reset ( i915 ) )
return 0 ;
pr_info ( " Verifying after GPU reset... \n " ) ;
igt_global_reset_lock ( i915 ) ;
2018-12-06 18:07:12 +00:00
intel_runtime_pm_get ( i915 ) ;
2018-12-03 12:50:11 +00:00
ok = verify_gt_engine_wa ( i915 , " before reset " ) ;
if ( ! ok )
goto out ;
set_bit ( I915_RESET_HANDOFF , & error - > flags ) ;
i915_reset ( i915 , ALL_ENGINES , " live_workarounds " ) ;
ok = verify_gt_engine_wa ( i915 , " after reset " ) ;
out :
2018-12-06 18:07:12 +00:00
intel_runtime_pm_put ( i915 ) ;
2018-12-03 12:50:11 +00:00
igt_global_reset_unlock ( i915 ) ;
return ok ? 0 : - ESRCH ;
}
static int
live_engine_reset_gt_engine_workarounds ( void * arg )
{
struct drm_i915_private * i915 = arg ;
struct intel_engine_cs * engine ;
struct i915_gem_context * ctx ;
struct igt_spinner spin ;
enum intel_engine_id id ;
struct i915_request * rq ;
int ret = 0 ;
if ( ! intel_has_reset_engine ( i915 ) )
return 0 ;
ctx = kernel_context ( i915 ) ;
if ( IS_ERR ( ctx ) )
return PTR_ERR ( ctx ) ;
igt_global_reset_lock ( i915 ) ;
2018-12-06 18:07:12 +00:00
intel_runtime_pm_get ( i915 ) ;
2018-12-03 12:50:11 +00:00
for_each_engine ( engine , i915 , id ) {
bool ok ;
pr_info ( " Verifying after %s reset... \n " , engine - > name ) ;
ok = verify_gt_engine_wa ( i915 , " before reset " ) ;
if ( ! ok ) {
ret = - ESRCH ;
goto err ;
}
i915_reset_engine ( engine , " live_workarounds " ) ;
ok = verify_gt_engine_wa ( i915 , " after idle reset " ) ;
if ( ! ok ) {
ret = - ESRCH ;
goto err ;
}
ret = igt_spinner_init ( & spin , i915 ) ;
if ( ret )
goto err ;
rq = igt_spinner_create_request ( & spin , ctx , engine , MI_NOOP ) ;
if ( IS_ERR ( rq ) ) {
ret = PTR_ERR ( rq ) ;
igt_spinner_fini ( & spin ) ;
goto err ;
}
i915_request_add ( rq ) ;
if ( ! igt_wait_for_spinner ( & spin , rq ) ) {
pr_err ( " Spinner failed to start \n " ) ;
igt_spinner_fini ( & spin ) ;
ret = - ETIMEDOUT ;
goto err ;
}
i915_reset_engine ( engine , " live_workarounds " ) ;
igt_spinner_end ( & spin ) ;
igt_spinner_fini ( & spin ) ;
ok = verify_gt_engine_wa ( i915 , " after busy reset " ) ;
if ( ! ok ) {
ret = - ESRCH ;
goto err ;
}
}
err :
2018-12-06 18:07:12 +00:00
intel_runtime_pm_put ( i915 ) ;
2018-12-03 12:50:11 +00:00
igt_global_reset_unlock ( i915 ) ;
kernel_context_close ( ctx ) ;
igt_flush_test ( i915 , I915_WAIT_LOCKED ) ;
return ret ;
}
2018-04-14 13:27:54 +01:00
int intel_workarounds_live_selftests ( struct drm_i915_private * i915 )
{
static const struct i915_subtest tests [ ] = {
SUBTEST ( live_reset_whitelist ) ,
2018-12-03 12:50:11 +00:00
SUBTEST ( live_gpu_reset_gt_engine_workarounds ) ,
SUBTEST ( live_engine_reset_gt_engine_workarounds ) ,
2018-04-14 13:27:54 +01:00
} ;
int err ;
2018-07-06 07:53:11 +01:00
if ( i915_terminally_wedged ( & i915 - > gpu_error ) )
return 0 ;
2018-04-14 13:27:54 +01:00
mutex_lock ( & i915 - > drm . struct_mutex ) ;
err = i915_subtests ( tests , i915 ) ;
mutex_unlock ( & i915 - > drm . struct_mutex ) ;
return err ;
}