2018-04-10 09:12:46 -07:00
/*
* SPDX - License - Identifier : MIT
*
* Copyright © 2014 - 2018 Intel Corporation
*/
# include "i915_drv.h"
2019-05-28 10:29:49 +01:00
# include "intel_context.h"
2019-11-25 10:58:56 +00:00
# include "intel_engine_pm.h"
2019-06-21 08:07:48 +01:00
# include "intel_gt.h"
2019-10-24 11:03:44 +01:00
# include "intel_ring.h"
2018-04-10 09:12:46 -07:00
# include "intel_workarounds.h"
/**
* DOC : Hardware workarounds
*
* This file is intended as a central place to implement most [ 1 ] _ of the
* required workarounds for hardware to work as originally intended . They fall
* in five basic categories depending on how / when they are applied :
*
* - Workarounds that touch registers that are saved / restored to / from the HW
* context image . The list is emitted ( via Load Register Immediate commands )
* everytime a new context is created .
* - GT workarounds . The list of these WAs is applied whenever these registers
* revert to default values ( on GPU reset , suspend / resume [ 2 ] _ , etc . . ) .
* - Display workarounds . The list is applied during display clock - gating
* initialization .
* - Workarounds that whitelist a privileged register , so that UMDs can manage
* them directly . This is just a special case of a MMMIO workaround ( as we
* write the list of these to / be - whitelisted registers to some special HW
* registers ) .
* - Workaround batchbuffers , that get executed automatically by the hardware
* on every HW context restore .
*
* . . [ 1 ] Please notice that there are other WAs that , due to their nature ,
* cannot be applied from a central place . Those are peppered around the rest
* of the code , as needed .
*
* . . [ 2 ] Technically , some registers are powercontext saved & restored , so they
* survive a suspend / resume . In practice , writing them again is not too
* costly and simplifies things . We can revisit this in the future .
*
* Layout
2019-05-23 10:06:46 -06:00
* ~ ~ ~ ~ ~ ~
2018-04-10 09:12:46 -07:00
*
* Keep things in this file ordered by WA type , as per the above ( context , GT ,
* display , register whitelist , batchbuffer ) . Then , inside each type , keep the
* following order :
*
* - Infrastructure functions and macros
* - WAs per platform in standard gen / chrono order
* - Public functions to init or apply the given workaround type .
*/
2019-07-12 00:07:45 -07:00
static void wa_init_start ( struct i915_wa_list * wal , const char * name , const char * engine_name )
2018-12-03 13:33:19 +00:00
{
wal - > name = name ;
2019-07-12 00:07:45 -07:00
wal - > engine_name = engine_name ;
2018-12-03 13:33:19 +00:00
}
2018-12-03 12:50:14 +00:00
# define WA_LIST_CHUNK (1 << 4)
2018-12-03 13:33:19 +00:00
static void wa_init_finish ( struct i915_wa_list * wal )
{
2018-12-03 12:50:14 +00:00
/* Trim unused entries. */
if ( ! IS_ALIGNED ( wal - > count , WA_LIST_CHUNK ) ) {
struct i915_wa * list = kmemdup ( wal - > list ,
wal - > count * sizeof ( * list ) ,
GFP_KERNEL ) ;
if ( list ) {
kfree ( wal - > list ) ;
wal - > list = list ;
}
}
2018-12-03 13:33:19 +00:00
if ( ! wal - > count )
return ;
2019-07-12 00:07:45 -07:00
DRM_DEBUG_DRIVER ( " Initialized %u %s workarounds on %s \n " ,
wal - > wa_count , wal - > name , wal - > engine_name ) ;
2018-12-03 13:33:19 +00:00
}
2018-12-03 13:33:57 +00:00
static void _wa_add ( struct i915_wa_list * wal , const struct i915_wa * wa )
2018-04-10 09:12:46 -07:00
{
2018-12-03 13:33:57 +00:00
unsigned int addr = i915_mmio_reg_offset ( wa - > reg ) ;
unsigned int start = 0 , end = wal - > count ;
2018-12-03 12:50:14 +00:00
const unsigned int grow = WA_LIST_CHUNK ;
2018-12-03 13:33:57 +00:00
struct i915_wa * wa_ ;
GEM_BUG_ON ( ! is_power_of_2 ( grow ) ) ;
if ( IS_ALIGNED ( wal - > count , grow ) ) { /* Either uninitialized or full. */
struct i915_wa * list ;
list = kmalloc_array ( ALIGN ( wal - > count + 1 , grow ) , sizeof ( * wa ) ,
GFP_KERNEL ) ;
if ( ! list ) {
DRM_ERROR ( " No space for workaround init! \n " ) ;
return ;
}
if ( wal - > list )
memcpy ( list , wal - > list , sizeof ( * wa ) * wal - > count ) ;
wal - > list = list ;
}
2018-06-15 13:02:07 +01:00
while ( start < end ) {
unsigned int mid = start + ( end - start ) / 2 ;
2018-12-03 13:33:57 +00:00
if ( i915_mmio_reg_offset ( wal - > list [ mid ] . reg ) < addr ) {
2018-06-15 13:02:07 +01:00
start = mid + 1 ;
2018-12-03 13:33:57 +00:00
} else if ( i915_mmio_reg_offset ( wal - > list [ mid ] . reg ) > addr ) {
2018-06-15 13:02:07 +01:00
end = mid ;
} else {
2018-12-03 13:33:57 +00:00
wa_ = & wal - > list [ mid ] ;
2018-06-15 13:02:07 +01:00
2020-01-31 23:50:35 +00:00
if ( ( wa - > clr | wa_ - > clr ) & & ! ( wa - > clr & ~ wa_ - > clr ) ) {
DRM_ERROR ( " Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x) \n " ,
2018-12-03 13:33:57 +00:00
i915_mmio_reg_offset ( wa_ - > reg ) ,
2020-01-31 23:50:35 +00:00
wa_ - > clr , wa_ - > set ) ;
2018-06-15 13:02:07 +01:00
2020-01-31 23:50:35 +00:00
wa_ - > set & = ~ wa - > clr ;
2018-06-15 13:02:07 +01:00
}
2018-12-03 13:33:57 +00:00
wal - > wa_count + + ;
2020-01-31 23:50:35 +00:00
wa_ - > set | = wa - > set ;
wa_ - > clr | = wa - > clr ;
2019-04-17 08:56:29 +01:00
wa_ - > read | = wa - > read ;
2018-06-15 13:02:07 +01:00
return ;
}
}
2018-04-10 09:12:46 -07:00
2018-12-03 13:33:57 +00:00
wal - > wa_count + + ;
wa_ = & wal - > list [ wal - > count + + ] ;
* wa_ = * wa ;
2018-04-10 09:12:46 -07:00
2018-12-03 13:33:57 +00:00
while ( wa_ - - > wal - > list ) {
GEM_BUG_ON ( i915_mmio_reg_offset ( wa_ [ 0 ] . reg ) = =
i915_mmio_reg_offset ( wa_ [ 1 ] . reg ) ) ;
if ( i915_mmio_reg_offset ( wa_ [ 1 ] . reg ) >
i915_mmio_reg_offset ( wa_ [ 0 ] . reg ) )
2018-06-15 13:02:07 +01:00
break ;
2018-04-10 09:12:46 -07:00
2018-12-03 13:33:57 +00:00
swap ( wa_ [ 1 ] , wa_ [ 0 ] ) ;
2018-06-15 13:02:07 +01:00
}
2018-04-10 09:12:46 -07:00
}
2020-01-31 23:50:35 +00:00
static void wa_add ( struct i915_wa_list * wal , i915_reg_t reg ,
u32 clear , u32 set , u32 read_mask )
2018-12-03 13:33:57 +00:00
{
struct i915_wa wa = {
2019-04-17 08:56:29 +01:00
. reg = reg ,
2020-01-31 23:50:35 +00:00
. clr = clear ,
. set = set ,
2019-11-28 07:40:05 +05:30
. read = read_mask ,
2018-12-03 13:33:57 +00:00
} ;
_wa_add ( wal , & wa ) ;
}
2019-11-28 07:40:05 +05:30
static void
2020-01-31 23:50:35 +00:00
wa_write_masked_or ( struct i915_wa_list * wal , i915_reg_t reg , u32 clear , u32 set )
2019-11-28 07:40:05 +05:30
{
2020-01-31 23:50:35 +00:00
wa_add ( wal , reg , clear , set , clear ) ;
2019-11-28 07:40:05 +05:30
}
2019-01-31 17:08:42 -08:00
static void
2020-01-31 23:50:35 +00:00
wa_write ( struct i915_wa_list * wal , i915_reg_t reg , u32 set )
{
wa_write_masked_or ( wal , reg , ~ 0 , set ) ;
}
static void
wa_write_or ( struct i915_wa_list * wal , i915_reg_t reg , u32 set )
2019-01-31 17:08:42 -08:00
{
2020-01-31 23:50:35 +00:00
wa_write_masked_or ( wal , reg , set , set ) ;
2019-01-31 17:08:42 -08:00
}
static void
2020-01-31 23:50:35 +00:00
wa_masked_en ( struct i915_wa_list * wal , i915_reg_t reg , u32 val )
2019-01-31 17:08:42 -08:00
{
2020-01-31 23:50:35 +00:00
wa_add ( wal , reg , 0 , _MASKED_BIT_ENABLE ( val ) , val ) ;
2019-01-31 17:08:42 -08:00
}
static void
2020-01-31 23:50:35 +00:00
wa_masked_dis ( struct i915_wa_list * wal , i915_reg_t reg , u32 val )
2019-01-31 17:08:42 -08:00
{
2020-01-31 23:50:35 +00:00
wa_add ( wal , reg , 0 , _MASKED_BIT_DISABLE ( val ) , val ) ;
2019-01-31 17:08:42 -08:00
}
2018-04-10 09:12:46 -07:00
# define WA_SET_BIT_MASKED(addr, mask) \
2020-01-31 23:50:35 +00:00
wa_masked_en ( wal , ( addr ) , ( mask ) )
2018-04-10 09:12:46 -07:00
# define WA_CLR_BIT_MASKED(addr, mask) \
2020-01-31 23:50:35 +00:00
wa_masked_dis ( wal , ( addr ) , ( mask ) )
2018-04-10 09:12:46 -07:00
# define WA_SET_FIELD_MASKED(addr, mask, value) \
2020-01-31 23:50:35 +00:00
wa_write_masked_or ( wal , ( addr ) , 0 , _MASKED_FIELD ( ( mask ) , ( value ) ) )
2018-04-10 09:12:46 -07:00
2019-05-20 15:25:46 +01:00
static void gen8_ctx_workarounds_init ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
2018-04-10 09:12:46 -07:00
{
WA_SET_BIT_MASKED ( INSTPM , INSTPM_FORCE_ORDERING ) ;
/* WaDisableAsyncFlipPerfMode:bdw,chv */
WA_SET_BIT_MASKED ( MI_MODE , ASYNC_FLIP_PERF_DISABLE ) ;
/* WaDisablePartialInstShootdown:bdw,chv */
WA_SET_BIT_MASKED ( GEN8_ROW_CHICKEN ,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE ) ;
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush .
*/
/* WaForceEnableNonCoherent:bdw,chv */
/* WaHdcDisableFetchWhenMasked:bdw,chv */
WA_SET_BIT_MASKED ( HDC_CHICKEN0 ,
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
HDC_FORCE_NON_COHERENT ) ;
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
* " The Hierarchical Z RAW Stall Optimization allows non-overlapping
* polygons in the same 8 x4 pixel / sample area to be processed without
* stalling waiting for the earlier ones to write to Hierarchical Z
* buffer . "
*
* This optimization is off by default for BDW and CHV ; turn it on .
*/
WA_CLR_BIT_MASKED ( CACHE_MODE_0_GEN7 , HIZ_RAW_STALL_OPT_DISABLE ) ;
/* Wa4x4STCOptimizationDisable:bdw,chv */
WA_SET_BIT_MASKED ( CACHE_MODE_1 , GEN8_4x4_STC_OPTIMIZATION_DISABLE ) ;
/*
* BSpec recommends 8 x4 when MSAA is used ,
* however in practice 16 x4 seems fastest .
*
* Note that PS / WM thread counts depend on the WIZ hashing
* disable bit , which we don ' t touch here , but it ' s good
* to keep in mind ( see 3 DSTATE_PS and 3 DSTATE_WM ) .
*/
WA_SET_FIELD_MASKED ( GEN7_GT_MODE ,
GEN6_WIZ_HASHING_MASK ,
GEN6_WIZ_HASHING_16x4 ) ;
}
2019-05-20 15:25:46 +01:00
static void bdw_ctx_workarounds_init ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
2018-04-10 09:12:46 -07:00
{
2018-12-03 13:33:57 +00:00
struct drm_i915_private * i915 = engine - > i915 ;
2018-04-10 09:12:46 -07:00
2019-05-20 15:25:46 +01:00
gen8_ctx_workarounds_init ( engine , wal ) ;
2018-04-10 09:12:46 -07:00
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED ( GEN8_ROW_CHICKEN , STALL_DOP_GATING_DISABLE ) ;
/* WaDisableDopClockGating:bdw
*
2019-12-24 00:40:10 -08:00
* Also see the related UCGTCL1 write in bdw_init_clock_gating ( )
2018-04-10 09:12:46 -07:00
* to disable EUTC clock gating .
*/
WA_SET_BIT_MASKED ( GEN7_ROW_CHICKEN2 ,
DOP_CLOCK_GATING_DISABLE ) ;
WA_SET_BIT_MASKED ( HALF_SLICE_CHICKEN3 ,
GEN8_SAMPLER_POWER_BYPASS_DIS ) ;
WA_SET_BIT_MASKED ( HDC_CHICKEN0 ,
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
2018-12-03 13:33:57 +00:00
( IS_BDW_GT3 ( i915 ) ? HDC_FENCE_DEST_SLM_DISABLE : 0 ) ) ;
2018-04-10 09:12:46 -07:00
}
2019-05-20 15:25:46 +01:00
static void chv_ctx_workarounds_init ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
2018-04-10 09:12:46 -07:00
{
2019-05-20 15:25:46 +01:00
gen8_ctx_workarounds_init ( engine , wal ) ;
2018-04-10 09:12:46 -07:00
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED ( GEN8_ROW_CHICKEN , STALL_DOP_GATING_DISABLE ) ;
/* Improve HiZ throughput on CHV. */
WA_SET_BIT_MASKED ( HIZ_CHICKEN , CHV_HZ_8X8_MODE_IN_1X ) ;
}
2019-05-20 15:25:46 +01:00
static void gen9_ctx_workarounds_init ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
2018-04-10 09:12:46 -07:00
{
2018-12-03 13:33:57 +00:00
struct drm_i915_private * i915 = engine - > i915 ;
if ( HAS_LLC ( i915 ) ) {
2018-04-10 09:12:46 -07:00
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine . See
* WaCompressedResourceDisplayNewHashMode .
*/
WA_SET_BIT_MASKED ( COMMON_SLICE_CHICKEN2 ,
GEN9_PBE_COMPRESSED_HASH_SELECTION ) ;
WA_SET_BIT_MASKED ( GEN9_HALF_SLICE_CHICKEN7 ,
GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR ) ;
}
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
WA_SET_BIT_MASKED ( GEN8_ROW_CHICKEN ,
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE ) ;
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED ( GEN9_HALF_SLICE_CHICKEN7 ,
GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION ) ;
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED ( CACHE_MODE_1 ,
GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE ) ;
/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
WA_CLR_BIT_MASKED ( GEN9_HALF_SLICE_CHICKEN5 ,
GEN9_CCS_TLB_PREFETCH_ENABLE ) ;
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED ( HDC_CHICKEN0 ,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE ) ;
/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
* both tied to WaForceContextSaveRestoreNonCoherent
* in some hsds for skl . We keep the tie for all gen9 . The
* documentation is a bit hazy and so we want to get common behaviour ,
* even though there is no clear evidence we would need both on kbl / bxt .
* This area has been source of system hangs so we play it safe
* and mimic the skl regardless of what bspec says .
*
* Use Force Non - Coherent whenever executing a 3 D context . This
* is a workaround for a possible hang in the unlikely event
* a TLB invalidation occurs during a PSD flush .
*/
/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED ( HDC_CHICKEN0 ,
HDC_FORCE_NON_COHERENT ) ;
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
2018-12-03 13:33:57 +00:00
if ( IS_SKYLAKE ( i915 ) | | IS_KABYLAKE ( i915 ) | | IS_COFFEELAKE ( i915 ) )
2018-04-10 09:12:46 -07:00
WA_SET_BIT_MASKED ( HALF_SLICE_CHICKEN3 ,
GEN8_SAMPLER_POWER_BYPASS_DIS ) ;
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
WA_SET_BIT_MASKED ( HALF_SLICE_CHICKEN2 , GEN8_ST_PO_DISABLE ) ;
/*
* Supporting preemption with fine - granularity requires changes in the
* batch buffer programming . Since we can ' t break old userspace , we
* need to set our default preemption level to safe value . Userspace is
* still able to use more fine - grained preemption levels , since in
* WaEnablePreemptionGranularityControlByUMD we ' re whitelisting the
* per - ctx register . As such , WaDisable { 3 D , GPGPU } MidCmdPreemption are
* not real HW workarounds , but merely a way to start using preemption
* while maintaining old contract with userspace .
*/
/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
WA_CLR_BIT_MASKED ( GEN8_CS_CHICKEN1 , GEN9_PREEMPT_3D_OBJECT_LEVEL ) ;
/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
WA_SET_FIELD_MASKED ( GEN8_CS_CHICKEN1 ,
GEN9_PREEMPT_GPGPU_LEVEL_MASK ,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL ) ;
2018-05-10 13:07:08 -07:00
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
2018-12-03 13:33:57 +00:00
if ( IS_GEN9_LP ( i915 ) )
2018-05-10 13:07:08 -07:00
WA_SET_BIT_MASKED ( GEN9_WM_CHICKEN3 , GEN9_FACTOR_IN_CLR_VAL_HIZ ) ;
2018-04-10 09:12:46 -07:00
}
2019-05-20 15:25:46 +01:00
static void skl_tune_iz_hashing ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
2018-04-10 09:12:46 -07:00
{
2018-12-03 13:33:57 +00:00
struct drm_i915_private * i915 = engine - > i915 ;
2018-04-10 09:12:46 -07:00
u8 vals [ 3 ] = { 0 , 0 , 0 } ;
unsigned int i ;
for ( i = 0 ; i < 3 ; i + + ) {
u8 ss ;
/*
* Only consider slices where one , and only one , subslice has 7
* EUs
*/
2018-12-31 16:56:41 +02:00
if ( ! is_power_of_2 ( RUNTIME_INFO ( i915 ) - > sseu . subslice_7eu [ i ] ) )
2018-04-10 09:12:46 -07:00
continue ;
/*
* subslice_7eu [ i ] ! = 0 ( because of the check above ) and
* ss_max = = 4 ( maximum number of subslices possible per slice )
*
* - > 0 < = ss < = 3 ;
*/
2018-12-31 16:56:41 +02:00
ss = ffs ( RUNTIME_INFO ( i915 ) - > sseu . subslice_7eu [ i ] ) - 1 ;
2018-04-10 09:12:46 -07:00
vals [ i ] = 3 - ss ;
}
if ( vals [ 0 ] = = 0 & & vals [ 1 ] = = 0 & & vals [ 2 ] = = 0 )
2018-12-03 13:33:57 +00:00
return ;
2018-04-10 09:12:46 -07:00
/* Tune IZ hashing. See intel_device_info_runtime_init() */
WA_SET_FIELD_MASKED ( GEN7_GT_MODE ,
GEN9_IZ_HASHING_MASK ( 2 ) |
GEN9_IZ_HASHING_MASK ( 1 ) |
GEN9_IZ_HASHING_MASK ( 0 ) ,
GEN9_IZ_HASHING ( 2 , vals [ 2 ] ) |
GEN9_IZ_HASHING ( 1 , vals [ 1 ] ) |
GEN9_IZ_HASHING ( 0 , vals [ 0 ] ) ) ;
}
2019-05-20 15:25:46 +01:00
static void skl_ctx_workarounds_init ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
2018-04-10 09:12:46 -07:00
{
2019-05-20 15:25:46 +01:00
gen9_ctx_workarounds_init ( engine , wal ) ;
skl_tune_iz_hashing ( engine , wal ) ;
2018-04-10 09:12:47 -07:00
}
2018-04-10 09:12:46 -07:00
2019-05-20 15:25:46 +01:00
static void bxt_ctx_workarounds_init ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
2018-04-10 09:12:47 -07:00
{
2019-05-20 15:25:46 +01:00
gen9_ctx_workarounds_init ( engine , wal ) ;
2018-04-10 09:12:46 -07:00
2018-04-10 09:12:47 -07:00
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED ( GEN8_ROW_CHICKEN ,
STALL_DOP_GATING_DISABLE ) ;
/* WaToEnableHwFixForPushConstHWBug:bxt */
WA_SET_BIT_MASKED ( COMMON_SLICE_CHICKEN2 ,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION ) ;
2018-04-10 09:12:46 -07:00
}
2019-05-20 15:25:46 +01:00
static void kbl_ctx_workarounds_init ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
2018-04-10 09:12:46 -07:00
{
2018-12-03 13:33:57 +00:00
struct drm_i915_private * i915 = engine - > i915 ;
2018-04-10 09:12:46 -07:00
2019-05-20 15:25:46 +01:00
gen9_ctx_workarounds_init ( engine , wal ) ;
2018-04-10 09:12:46 -07:00
2018-04-10 09:12:47 -07:00
/* WaToEnableHwFixForPushConstHWBug:kbl */
2018-12-03 13:33:57 +00:00
if ( IS_KBL_REVID ( i915 , KBL_REVID_C0 , REVID_FOREVER ) )
2018-04-10 09:12:47 -07:00
WA_SET_BIT_MASKED ( COMMON_SLICE_CHICKEN2 ,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION ) ;
2018-04-10 09:12:46 -07:00
2018-04-10 09:12:47 -07:00
/* WaDisableSbeCacheDispatchPortSharing:kbl */
WA_SET_BIT_MASKED ( GEN7_HALF_SLICE_CHICKEN1 ,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE ) ;
}
2019-05-20 15:25:46 +01:00
static void glk_ctx_workarounds_init ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
2018-04-10 09:12:47 -07:00
{
2019-05-20 15:25:46 +01:00
gen9_ctx_workarounds_init ( engine , wal ) ;
2018-04-10 09:12:47 -07:00
/* WaToEnableHwFixForPushConstHWBug:glk */
2018-04-10 09:12:46 -07:00
WA_SET_BIT_MASKED ( COMMON_SLICE_CHICKEN2 ,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION ) ;
}
2019-05-20 15:25:46 +01:00
static void cfl_ctx_workarounds_init ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
2018-04-10 09:12:46 -07:00
{
2019-05-20 15:25:46 +01:00
gen9_ctx_workarounds_init ( engine , wal ) ;
2018-04-10 09:12:47 -07:00
/* WaToEnableHwFixForPushConstHWBug:cfl */
WA_SET_BIT_MASKED ( COMMON_SLICE_CHICKEN2 ,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION ) ;
2018-04-10 09:12:46 -07:00
2018-04-10 09:12:47 -07:00
/* WaDisableSbeCacheDispatchPortSharing:cfl */
WA_SET_BIT_MASKED ( GEN7_HALF_SLICE_CHICKEN1 ,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE ) ;
}
2019-05-20 15:25:46 +01:00
static void cnl_ctx_workarounds_init ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
2018-04-10 09:12:47 -07:00
{
2018-12-03 13:33:57 +00:00
struct drm_i915_private * i915 = engine - > i915 ;
2018-04-10 09:12:46 -07:00
/* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED ( CNL_HDC_CHICKEN0 ,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT ) ;
/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
2018-12-03 13:33:57 +00:00
if ( IS_CNL_REVID ( i915 , CNL_REVID_B0 , CNL_REVID_B0 ) )
2018-04-10 09:12:46 -07:00
WA_SET_BIT_MASKED ( GEN8_ROW_CHICKEN , THROTTLE_12_5 ) ;
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
WA_SET_BIT_MASKED ( COMMON_SLICE_CHICKEN2 ,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION ) ;
/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
2018-12-03 13:33:57 +00:00
if ( IS_CNL_REVID ( i915 , 0 , CNL_REVID_B0 ) )
2018-04-10 09:12:46 -07:00
WA_SET_BIT_MASKED ( COMMON_SLICE_CHICKEN2 ,
GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE ) ;
/* WaPushConstantDereferenceHoldDisable:cnl */
WA_SET_BIT_MASKED ( GEN7_ROW_CHICKEN2 , PUSH_CONSTANT_DEREF_DISABLE ) ;
2018-04-10 09:12:47 -07:00
/* FtrEnableFastAnisoL1BankingFix:cnl */
2018-04-10 09:12:46 -07:00
WA_SET_BIT_MASKED ( HALF_SLICE_CHICKEN3 , CNL_FAST_ANISO_L1_BANKING_FIX ) ;
/* WaDisable3DMidCmdPreemption:cnl */
WA_CLR_BIT_MASKED ( GEN8_CS_CHICKEN1 , GEN9_PREEMPT_3D_OBJECT_LEVEL ) ;
/* WaDisableGPGPUMidCmdPreemption:cnl */
WA_SET_FIELD_MASKED ( GEN8_CS_CHICKEN1 ,
GEN9_PREEMPT_GPGPU_LEVEL_MASK ,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL ) ;
/* WaDisableEarlyEOT:cnl */
WA_SET_BIT_MASKED ( GEN8_ROW_CHICKEN , DISABLE_EARLY_EOT ) ;
}
2019-05-20 15:25:46 +01:00
static void icl_ctx_workarounds_init ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
2018-05-08 14:29:23 -07:00
{
2018-12-03 13:33:57 +00:00
struct drm_i915_private * i915 = engine - > i915 ;
2019-05-20 12:04:42 +01:00
/* WaDisableBankHangMode:icl */
wa_write ( wal ,
GEN8_L3CNTLREG ,
intel_uncore_read ( engine - > uncore , GEN8_L3CNTLREG ) |
GEN8_ERRDETBCTRL ) ;
2018-05-08 14:29:23 -07:00
/* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
2018-12-03 13:33:57 +00:00
if ( IS_ICL_REVID ( i915 , ICL_REVID_A0 , ICL_REVID_B0 ) )
2018-05-08 14:29:23 -07:00
WA_SET_BIT_MASKED ( GEN7_ROW_CHICKEN2 ,
PUSH_CONSTANT_DEREF_DISABLE ) ;
/* WaForceEnableNonCoherent:icl
* This is not the same workaround as in early Gen9 platforms , where
* lacking this could cause system hangs , but coherency performance
* overhead is high and only a few compute workloads really need it
* ( the register is whitelisted in hardware now , so UMDs can opt in
* for coherency if they have a good reason ) .
*/
WA_SET_BIT_MASKED ( ICL_HDC_MODE , HDC_FORCE_NON_COHERENT ) ;
2018-05-25 15:05:29 -07:00
/* Wa_2006611047:icl (pre-prod)
* Formerly known as WaDisableImprovedTdlClkGating
*/
2018-12-03 13:33:57 +00:00
if ( IS_ICL_REVID ( i915 , ICL_REVID_A0 , ICL_REVID_A0 ) )
2018-05-25 15:05:29 -07:00
WA_SET_BIT_MASKED ( GEN7_ROW_CHICKEN2 ,
GEN11_TDL_CLOCK_GATING_FIX_DISABLE ) ;
2018-05-25 15:05:31 -07:00
/* Wa_2006665173:icl (pre-prod) */
2018-12-03 13:33:57 +00:00
if ( IS_ICL_REVID ( i915 , ICL_REVID_A0 , ICL_REVID_A0 ) )
2018-05-25 15:05:31 -07:00
WA_SET_BIT_MASKED ( GEN11_COMMON_SLICE_CHICKEN3 ,
GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC ) ;
2019-01-31 17:08:44 -08:00
/* WaEnableFloatBlendOptimization:icl */
wa_write_masked_or ( wal ,
GEN10_CACHE_MODE_SS ,
0 , /* write-only, so skip validation */
_MASKED_BIT_ENABLE ( FLOAT_BLEND_OPTIMIZATION_ENABLE ) ) ;
2019-03-05 13:48:26 +01:00
/* WaDisableGPGPUMidThreadPreemption:icl */
WA_SET_FIELD_MASKED ( GEN8_CS_CHICKEN1 ,
GEN9_PREEMPT_GPGPU_LEVEL_MASK ,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL ) ;
2019-04-25 06:50:05 +01:00
/* allow headerless messages for preemptible GPGPU context */
WA_SET_BIT_MASKED ( GEN10_SAMPLER_MODE ,
GEN11_SAMPLER_ENABLE_HEADLESS_MSG ) ;
2018-05-08 14:29:23 -07:00
}
2019-08-17 02:38:42 -07:00
static void tgl_ctx_workarounds_init ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal )
{
2020-02-27 14:00:58 -08:00
/*
* Wa_1409142259 : tgl
* Wa_1409347922 : tgl
* Wa_1409252684 : tgl
* Wa_1409217633 : tgl
* Wa_1409207793 : tgl
* Wa_1409178076 : tgl
* Wa_1408979724 : tgl
*/
2019-09-09 16:14:45 -07:00
WA_SET_BIT_MASKED ( GEN11_COMMON_SLICE_CHICKEN3 ,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE ) ;
2019-11-28 07:40:05 +05:30
/*
2020-02-24 11:12:58 -08:00
* Wa_1604555607 : gen12 and Wa_1608008084 : gen12
* FF_MODE2 register will return the wrong value when read . The default
* value for this register is zero for all fields and there are no bit
* masks . So instead of doing a RMW we should just write the TDS timer
* value for Wa_1604555607 .
2019-11-28 07:40:05 +05:30
*/
2020-02-24 11:12:58 -08:00
wa_add ( wal , FF_MODE2 , FF_MODE2_TDS_TIMER_MASK ,
FF_MODE2_TDS_TIMER_128 , 0 ) ;
2020-03-04 15:31:44 +00:00
/* WaDisableGPGPUMidThreadPreemption:tgl */
WA_SET_FIELD_MASKED ( GEN8_CS_CHICKEN1 ,
GEN9_PREEMPT_GPGPU_LEVEL_MASK ,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL ) ;
2019-08-17 02:38:42 -07:00
}
2019-05-20 15:25:46 +01:00
static void
__intel_engine_init_ctx_wa ( struct intel_engine_cs * engine ,
struct i915_wa_list * wal ,
const char * name )
2018-04-10 09:12:46 -07:00
{
2018-12-03 13:33:57 +00:00
struct drm_i915_private * i915 = engine - > i915 ;
2019-05-20 15:25:46 +01:00
if ( engine - > class ! = RENDER_CLASS )
return ;
2019-07-12 00:07:45 -07:00
wa_init_start ( wal , name , engine - > name ) ;
2018-12-03 13:33:57 +00:00
2019-08-17 02:38:42 -07:00
if ( IS_GEN ( i915 , 12 ) )
tgl_ctx_workarounds_init ( engine , wal ) ;
else if ( IS_GEN ( i915 , 11 ) )
2019-05-20 15:25:46 +01:00
icl_ctx_workarounds_init ( engine , wal ) ;
2018-12-03 13:33:57 +00:00
else if ( IS_CANNONLAKE ( i915 ) )
2019-05-20 15:25:46 +01:00
cnl_ctx_workarounds_init ( engine , wal ) ;
2019-02-21 15:14:52 -08:00
else if ( IS_COFFEELAKE ( i915 ) )
2019-05-20 15:25:46 +01:00
cfl_ctx_workarounds_init ( engine , wal ) ;
2019-02-21 15:14:52 -08:00
else if ( IS_GEMINILAKE ( i915 ) )
2019-05-20 15:25:46 +01:00
glk_ctx_workarounds_init ( engine , wal ) ;
2019-02-21 15:14:52 -08:00
else if ( IS_KABYLAKE ( i915 ) )
2019-05-20 15:25:46 +01:00
kbl_ctx_workarounds_init ( engine , wal ) ;
2019-02-21 15:14:52 -08:00
else if ( IS_BROXTON ( i915 ) )
2019-05-20 15:25:46 +01:00
bxt_ctx_workarounds_init ( engine , wal ) ;
2019-02-21 15:14:52 -08:00
else if ( IS_SKYLAKE ( i915 ) )
2019-05-20 15:25:46 +01:00
skl_ctx_workarounds_init ( engine , wal ) ;
2019-02-21 15:14:52 -08:00
else if ( IS_CHERRYVIEW ( i915 ) )
2019-05-20 15:25:46 +01:00
chv_ctx_workarounds_init ( engine , wal ) ;
2019-02-21 15:14:52 -08:00
else if ( IS_BROADWELL ( i915 ) )
2019-05-20 15:25:46 +01:00
bdw_ctx_workarounds_init ( engine , wal ) ;
2019-02-21 15:14:52 -08:00
else if ( INTEL_GEN ( i915 ) < 8 )
return ;
2018-04-10 09:12:47 -07:00
else
2018-12-03 13:33:57 +00:00
MISSING_CASE ( INTEL_GEN ( i915 ) ) ;
2018-04-10 09:12:47 -07:00
2018-12-03 13:33:57 +00:00
wa_init_finish ( wal ) ;
2018-04-10 09:12:47 -07:00
}
2019-05-20 15:25:46 +01:00
void intel_engine_init_ctx_wa ( struct intel_engine_cs * engine )
{
__intel_engine_init_ctx_wa ( engine , & engine - > ctx_wa_list , " context " ) ;
}
2018-12-03 13:33:57 +00:00
int intel_engine_emit_ctx_wa ( struct i915_request * rq )
2018-04-10 09:12:47 -07:00
{
2018-12-03 13:33:57 +00:00
struct i915_wa_list * wal = & rq - > engine - > ctx_wa_list ;
struct i915_wa * wa ;
unsigned int i ;
2018-04-10 09:12:47 -07:00
u32 * cs ;
2018-12-03 13:33:57 +00:00
int ret ;
2018-04-10 09:12:47 -07:00
2018-12-03 13:33:57 +00:00
if ( wal - > count = = 0 )
2018-04-10 09:12:47 -07:00
return 0 ;
ret = rq - > engine - > emit_flush ( rq , EMIT_BARRIER ) ;
2018-04-10 09:12:46 -07:00
if ( ret )
return ret ;
2018-12-03 13:33:57 +00:00
cs = intel_ring_begin ( rq , ( wal - > count * 2 + 2 ) ) ;
2018-04-10 09:12:47 -07:00
if ( IS_ERR ( cs ) )
return PTR_ERR ( cs ) ;
2018-12-03 13:33:57 +00:00
* cs + + = MI_LOAD_REGISTER_IMM ( wal - > count ) ;
for ( i = 0 , wa = wal - > list ; i < wal - > count ; i + + , wa + + ) {
* cs + + = i915_mmio_reg_offset ( wa - > reg ) ;
2020-01-31 23:50:35 +00:00
* cs + + = wa - > set ;
2018-04-10 09:12:47 -07:00
}
* cs + + = MI_NOOP ;
intel_ring_advance ( rq , cs ) ;
ret = rq - > engine - > emit_flush ( rq , EMIT_BARRIER ) ;
if ( ret )
return ret ;
return 0 ;
}
2019-01-09 17:32:31 -08:00
static void
gen9_gt_workarounds_init ( struct drm_i915_private * i915 , struct i915_wa_list * wal )
2018-12-03 13:33:19 +00:00
{
2018-04-10 09:12:47 -07:00
/* WaDisableKillLogic:bxt,skl,kbl */
2018-12-03 13:33:19 +00:00
if ( ! IS_COFFEELAKE ( i915 ) )
wa_write_or ( wal ,
GAM_ECOCHK ,
ECOCHK_DIS_TLB ) ;
2018-04-10 09:12:47 -07:00
2018-12-03 13:33:19 +00:00
if ( HAS_LLC ( i915 ) ) {
2018-04-10 09:12:47 -07:00
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine . See
* WaCompressedResourceDisplayNewHashMode .
*/
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
MMCD_MISC_CTRL ,
MMCD_PCLA | MMCD_HOTSPOT_EN ) ;
2018-04-10 09:12:47 -07:00
}
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
GAM_ECOCHK ,
BDW_DISABLE_HDC_INVALIDATION ) ;
2018-04-10 09:12:47 -07:00
}
2019-01-09 17:32:31 -08:00
static void
skl_gt_workarounds_init ( struct drm_i915_private * i915 , struct i915_wa_list * wal )
2018-04-10 09:12:47 -07:00
{
2019-01-09 17:32:31 -08:00
gen9_gt_workarounds_init ( i915 , wal ) ;
2018-04-10 09:12:47 -07:00
/* WaDisableGafsUnitClkGating:skl */
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
GEN7_UCGCTL4 ,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE ) ;
2018-04-10 09:12:47 -07:00
/* WaInPlaceDecompressionHang:skl */
2018-12-03 13:33:19 +00:00
if ( IS_SKL_REVID ( i915 , SKL_REVID_H0 , REVID_FOREVER ) )
wa_write_or ( wal ,
GEN9_GAMT_ECO_REG_RW_IA ,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS ) ;
2018-04-10 09:12:47 -07:00
}
2019-01-09 17:32:31 -08:00
static void
bxt_gt_workarounds_init ( struct drm_i915_private * i915 , struct i915_wa_list * wal )
2018-04-10 09:12:47 -07:00
{
2019-01-09 17:32:31 -08:00
gen9_gt_workarounds_init ( i915 , wal ) ;
2018-04-10 09:12:47 -07:00
/* WaInPlaceDecompressionHang:bxt */
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
GEN9_GAMT_ECO_REG_RW_IA ,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS ) ;
2018-04-10 09:12:47 -07:00
}
2019-01-09 17:32:31 -08:00
static void
kbl_gt_workarounds_init ( struct drm_i915_private * i915 , struct i915_wa_list * wal )
2018-04-10 09:12:47 -07:00
{
2019-01-09 17:32:31 -08:00
gen9_gt_workarounds_init ( i915 , wal ) ;
2018-04-10 09:12:47 -07:00
2018-04-10 09:12:46 -07:00
/* WaDisableDynamicCreditSharing:kbl */
2018-12-03 13:33:19 +00:00
if ( IS_KBL_REVID ( i915 , 0 , KBL_REVID_B0 ) )
wa_write_or ( wal ,
GAMT_CHKN_BIT_REG ,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING ) ;
2018-04-10 09:12:46 -07:00
2018-04-10 09:12:47 -07:00
/* WaDisableGafsUnitClkGating:kbl */
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
GEN7_UCGCTL4 ,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE ) ;
2018-04-10 09:12:46 -07:00
2018-04-10 09:12:47 -07:00
/* WaInPlaceDecompressionHang:kbl */
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
GEN9_GAMT_ECO_REG_RW_IA ,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS ) ;
2018-04-10 09:12:47 -07:00
}
2018-04-10 09:12:46 -07:00
2019-01-09 17:32:31 -08:00
static void
glk_gt_workarounds_init ( struct drm_i915_private * i915 , struct i915_wa_list * wal )
2018-04-10 09:12:47 -07:00
{
2019-01-09 17:32:31 -08:00
gen9_gt_workarounds_init ( i915 , wal ) ;
2018-04-10 09:12:47 -07:00
}
2019-01-09 17:32:31 -08:00
static void
cfl_gt_workarounds_init ( struct drm_i915_private * i915 , struct i915_wa_list * wal )
2018-04-10 09:12:47 -07:00
{
2019-01-09 17:32:31 -08:00
gen9_gt_workarounds_init ( i915 , wal ) ;
2018-04-10 09:12:47 -07:00
/* WaDisableGafsUnitClkGating:cfl */
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
GEN7_UCGCTL4 ,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE ) ;
2018-04-10 09:12:46 -07:00
2018-04-10 09:12:47 -07:00
/* WaInPlaceDecompressionHang:cfl */
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
GEN9_GAMT_ECO_REG_RW_IA ,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS ) ;
2018-04-10 09:12:47 -07:00
}
2018-04-10 09:12:46 -07:00
2019-01-09 17:32:31 -08:00
static void
2019-04-12 21:24:57 +01:00
wa_init_mcr ( struct drm_i915_private * i915 , struct i915_wa_list * wal )
2018-05-18 15:39:57 -07:00
{
2019-04-12 21:24:57 +01:00
const struct sseu_dev_info * sseu = & RUNTIME_INFO ( i915 ) - > sseu ;
2019-07-17 19:06:21 +01:00
unsigned int slice , subslice ;
u32 l3_en , mcr , mcr_mask ;
GEM_BUG_ON ( INTEL_GEN ( i915 ) < 10 ) ;
2018-05-18 15:39:57 -07:00
2018-05-18 15:41:25 -07:00
/*
* WaProgramMgsrForL3BankSpecificMmioReads : cnl , icl
* L3Banks could be fused off in single slice scenario . If that is
* the case , we might need to program MCR select to a valid L3Bank
* by default , to make sure we correctly read certain registers
* later on ( in the range 0xB100 - 0xB3FF ) .
2019-07-17 19:06:21 +01:00
*
2018-05-18 15:40:32 -07:00
* WaProgramMgsrForCorrectSliceSpecificMmioReads : cnl , icl
2018-05-18 15:39:57 -07:00
* Before any MMIO read into slice / subslice specific registers , MCR
* packet control register needs to be programmed to point to any
* enabled s / ss pair . Otherwise , incorrect values will be returned .
* This means each subsequent MMIO read will be forwarded to an
* specific s / ss combination , but this is OK since these registers
* are consistent across s / ss in almost all cases . In the rare
* occasions , such as INSTDONE , where this value is dependent
* on s / ss combo , the read should be done with read_subslice_reg .
2019-07-17 19:06:21 +01:00
*
* Since GEN8_MCR_SELECTOR contains dual - purpose bits which select both
* to which subslice , or to which L3 bank , the respective mmio reads
* will go , we have to find a common index which works for both
* accesses .
*
* Case where we cannot find a common index fortunately should not
* happen in production hardware , so we only emit a warning instead of
* implementing something more complex that requires checking the range
* of every MMIO read .
2018-05-18 15:39:57 -07:00
*/
2019-07-17 19:06:21 +01:00
if ( INTEL_GEN ( i915 ) > = 10 & & is_power_of_2 ( sseu - > slice_mask ) ) {
u32 l3_fuse =
intel_uncore_read ( & i915 - > uncore , GEN10_MIRROR_FUSE3 ) &
GEN10_L3BANK_MASK ;
DRM_DEBUG_DRIVER ( " L3 fuse = %x \n " , l3_fuse ) ;
l3_en = ~ ( l3_fuse < < GEN10_L3BANK_PAIR_COUNT | l3_fuse ) ;
} else {
l3_en = ~ 0 ;
}
slice = fls ( sseu - > slice_mask ) - 1 ;
2019-08-23 09:03:07 -07:00
subslice = fls ( l3_en & intel_sseu_get_subslices ( sseu , slice ) ) ;
2019-07-17 19:06:21 +01:00
if ( ! subslice ) {
DRM_WARN ( " No common index found between subslice mask %x and L3 bank mask %x! \n " ,
2019-08-23 09:03:07 -07:00
intel_sseu_get_subslices ( sseu , slice ) , l3_en ) ;
2019-07-17 19:06:21 +01:00
subslice = fls ( l3_en ) ;
drm/i915/gt: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is readily
available.
The conversion was done automatically with below coccinelle semantic
patch. checkpatch errors/warnings are fixed manually.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
command: spatch --sp-file <script> --dir drivers/gpu/drm/i915/gt \
--linux-spacing --in-place
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200115034455.17658-7-pankaj.laxminarayan.bharadiya@intel.com
2020-01-15 09:14:50 +05:30
drm_WARN_ON ( & i915 - > drm , ! subslice ) ;
2019-07-17 19:06:21 +01:00
}
subslice - - ;
if ( INTEL_GEN ( i915 ) > = 11 ) {
mcr = GEN11_MCR_SLICE ( slice ) | GEN11_MCR_SUBSLICE ( subslice ) ;
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK ;
} else {
mcr = GEN8_MCR_SLICE ( slice ) | GEN8_MCR_SUBSLICE ( subslice ) ;
mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK ;
}
DRM_DEBUG_DRIVER ( " MCR slice/subslice = %x \n " , mcr ) ;
wa_write_masked_or ( wal , GEN8_MCR_SELECTOR , mcr_mask , mcr ) ;
2018-05-18 15:39:57 -07:00
}
2019-01-09 17:32:31 -08:00
static void
cnl_gt_workarounds_init ( struct drm_i915_private * i915 , struct i915_wa_list * wal )
2018-04-10 09:12:47 -07:00
{
2019-01-09 17:32:31 -08:00
wa_init_mcr ( i915 , wal ) ;
2018-05-18 15:39:57 -07:00
2018-04-10 09:12:47 -07:00
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
2018-12-03 13:33:19 +00:00
if ( IS_CNL_REVID ( i915 , CNL_REVID_B0 , CNL_REVID_B0 ) )
wa_write_or ( wal ,
GAMT_CHKN_BIT_REG ,
GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT ) ;
2018-04-10 09:12:47 -07:00
/* WaInPlaceDecompressionHang:cnl */
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
GEN9_GAMT_ECO_REG_RW_IA ,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS ) ;
2018-04-10 09:12:47 -07:00
}
2019-01-09 17:32:31 -08:00
static void
icl_gt_workarounds_init ( struct drm_i915_private * i915 , struct i915_wa_list * wal )
2018-05-08 14:29:23 -07:00
{
2019-01-09 17:32:31 -08:00
wa_init_mcr ( i915 , wal ) ;
2018-05-18 15:40:32 -07:00
2018-05-08 14:29:23 -07:00
/* WaInPlaceDecompressionHang:icl */
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
GEN9_GAMT_ECO_REG_RW_IA ,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS ) ;
2018-05-08 14:29:23 -07:00
2018-05-08 14:29:27 -07:00
/* WaModifyGamTlbPartitioning:icl */
2018-12-03 13:33:19 +00:00
wa_write_masked_or ( wal ,
GEN11_GACB_PERF_CTRL ,
GEN11_HASH_CTRL_MASK ,
GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4 ) ;
2018-05-08 14:29:28 -07:00
2018-05-08 14:29:29 -07:00
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
*/
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
GEN11_LSN_UNSLCVC ,
GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC ) ;
2018-05-08 14:29:30 -07:00
/* Wa_220166154:icl
* Formerly known as WaDisCtxReload
*/
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
GEN8_GAMW_ECO_DEV_RW_IA ,
GAMW_ECO_DEV_CTX_RELOAD_DISABLE ) ;
2018-05-08 14:29:31 -07:00
/* Wa_1405779004:icl (pre-prod) */
2018-12-03 13:33:19 +00:00
if ( IS_ICL_REVID ( i915 , ICL_REVID_A0 , ICL_REVID_A0 ) )
wa_write_or ( wal ,
SLICE_UNIT_LEVEL_CLKGATE ,
MSCUNIT_CLKGATE_DIS ) ;
2018-05-08 14:29:32 -07:00
/* Wa_1406680159:icl */
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
SUBSLICE_UNIT_LEVEL_CLKGATE ,
GWUNIT_CLKGATE_DIS ) ;
2018-05-08 14:29:33 -07:00
2018-05-08 14:29:34 -07:00
/* Wa_1406838659:icl (pre-prod) */
2018-12-03 13:33:19 +00:00
if ( IS_ICL_REVID ( i915 , ICL_REVID_A0 , ICL_REVID_B0 ) )
wa_write_or ( wal ,
INF_UNIT_LEVEL_CLKGATE ,
CGPSF_CLKGATE_DIS ) ;
2018-05-08 14:29:35 -07:00
2018-05-25 15:05:39 -07:00
/* Wa_1406463099:icl
* Formerly known as WaGamTlbPendError
*/
2018-12-03 13:33:19 +00:00
wa_write_or ( wal ,
GAMT_CHKN_BIT_REG ,
GAMT_CHKN_DISABLE_L3_COH_PIPE ) ;
2019-10-15 18:44:11 +03:00
/* Wa_1607087056:icl */
wa_write_or ( wal ,
SLICE_UNIT_LEVEL_CLKGATE ,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS ) ;
2018-05-08 14:29:23 -07:00
}
2019-08-17 02:38:42 -07:00
static void
tgl_gt_workarounds_init ( struct drm_i915_private * i915 , struct i915_wa_list * wal )
{
2019-10-15 18:44:44 +03:00
/* Wa_1409420604:tgl */
if ( IS_TGL_REVID ( i915 , TGL_REVID_A0 , TGL_REVID_A0 ) )
wa_write_or ( wal ,
SUBSLICE_UNIT_LEVEL_CLKGATE2 ,
CPSSUNIT_CLKGATE_DIS ) ;
2019-10-15 18:44:45 +03:00
2020-02-27 14:00:57 -08:00
/* Wa_1607087056:tgl also know as BUG:1409180338 */
2019-10-15 18:44:45 +03:00
if ( IS_TGL_REVID ( i915 , TGL_REVID_A0 , TGL_REVID_A0 ) )
wa_write_or ( wal ,
SLICE_UNIT_LEVEL_CLKGATE ,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS ) ;
2019-08-17 02:38:42 -07:00
}
2019-01-09 17:32:31 -08:00
static void
gt_init_workarounds ( struct drm_i915_private * i915 , struct i915_wa_list * wal )
2018-04-10 09:12:47 -07:00
{
2019-08-17 02:38:42 -07:00
if ( IS_GEN ( i915 , 12 ) )
tgl_gt_workarounds_init ( i915 , wal ) ;
else if ( IS_GEN ( i915 , 11 ) )
2019-03-01 09:27:03 -08:00
icl_gt_workarounds_init ( i915 , wal ) ;
2018-12-03 13:33:19 +00:00
else if ( IS_CANNONLAKE ( i915 ) )
2019-01-09 17:32:31 -08:00
cnl_gt_workarounds_init ( i915 , wal ) ;
2019-03-01 09:27:03 -08:00
else if ( IS_COFFEELAKE ( i915 ) )
cfl_gt_workarounds_init ( i915 , wal ) ;
else if ( IS_GEMINILAKE ( i915 ) )
glk_gt_workarounds_init ( i915 , wal ) ;
else if ( IS_KABYLAKE ( i915 ) )
kbl_gt_workarounds_init ( i915 , wal ) ;
else if ( IS_BROXTON ( i915 ) )
bxt_gt_workarounds_init ( i915 , wal ) ;
else if ( IS_SKYLAKE ( i915 ) )
skl_gt_workarounds_init ( i915 , wal ) ;
else if ( INTEL_GEN ( i915 ) < = 8 )
return ;
2018-04-10 09:12:47 -07:00
else
2018-12-03 13:33:19 +00:00
MISSING_CASE ( INTEL_GEN ( i915 ) ) ;
2019-01-09 17:32:31 -08:00
}
void intel_gt_init_workarounds ( struct drm_i915_private * i915 )
{
struct i915_wa_list * wal = & i915 - > gt_wa_list ;
2018-12-03 13:33:19 +00:00
2019-07-12 00:07:45 -07:00
wa_init_start ( wal , " GT " , " global " ) ;
2019-01-09 17:32:31 -08:00
gt_init_workarounds ( i915 , wal ) ;
2018-12-03 13:33:19 +00:00
wa_init_finish ( wal ) ;
}
static enum forcewake_domains
2019-04-12 21:24:57 +01:00
wal_get_fw_for_rmw ( struct intel_uncore * uncore , const struct i915_wa_list * wal )
2018-12-03 13:33:19 +00:00
{
enum forcewake_domains fw = 0 ;
struct i915_wa * wa ;
unsigned int i ;
for ( i = 0 , wa = wal - > list ; i < wal - > count ; i + + , wa + + )
2019-04-12 21:24:57 +01:00
fw | = intel_uncore_forcewake_for_reg ( uncore ,
2018-12-03 13:33:19 +00:00
wa - > reg ,
FW_REG_READ |
FW_REG_WRITE ) ;
return fw ;
}
2019-04-17 08:56:27 +01:00
static bool
wa_verify ( const struct i915_wa * wa , u32 cur , const char * name , const char * from )
{
2020-01-31 23:50:35 +00:00
if ( ( cur ^ wa - > set ) & wa - > read ) {
DRM_ERROR ( " %s workaround lost on %s! (%x=%x/%x, expected %x) \n " ,
2019-04-17 08:56:29 +01:00
name , from , i915_mmio_reg_offset ( wa - > reg ) ,
2020-01-31 23:50:35 +00:00
cur , cur & wa - > read , wa - > set ) ;
2019-04-17 08:56:27 +01:00
return false ;
}
return true ;
}
2018-12-03 13:33:19 +00:00
static void
2019-04-12 21:24:57 +01:00
wa_list_apply ( struct intel_uncore * uncore , const struct i915_wa_list * wal )
2018-12-03 13:33:19 +00:00
{
enum forcewake_domains fw ;
unsigned long flags ;
struct i915_wa * wa ;
unsigned int i ;
if ( ! wal - > count )
return ;
2019-04-12 21:24:57 +01:00
fw = wal_get_fw_for_rmw ( uncore , wal ) ;
2018-12-03 13:33:19 +00:00
2019-04-12 21:24:57 +01:00
spin_lock_irqsave ( & uncore - > lock , flags ) ;
intel_uncore_forcewake_get__locked ( uncore , fw ) ;
2018-12-03 13:33:19 +00:00
for ( i = 0 , wa = wal - > list ; i < wal - > count ; i + + , wa + + ) {
2020-01-31 23:50:35 +00:00
if ( wa - > clr )
intel_uncore_rmw_fw ( uncore , wa - > reg , wa - > clr , wa - > set ) ;
else
intel_uncore_write_fw ( uncore , wa - > reg , wa - > set ) ;
2019-04-17 08:56:27 +01:00
if ( IS_ENABLED ( CONFIG_DRM_I915_DEBUG_GEM ) )
wa_verify ( wa ,
intel_uncore_read_fw ( uncore , wa - > reg ) ,
wal - > name , " application " ) ;
2018-12-03 13:33:19 +00:00
}
2019-04-12 21:24:57 +01:00
intel_uncore_forcewake_put__locked ( uncore , fw ) ;
spin_unlock_irqrestore ( & uncore - > lock , flags ) ;
2018-12-03 13:33:19 +00:00
}
2019-06-21 08:07:48 +01:00
void intel_gt_apply_workarounds ( struct intel_gt * gt )
2018-12-03 13:33:19 +00:00
{
2019-06-21 08:07:48 +01:00
wa_list_apply ( gt - > uncore , & gt - > i915 - > gt_wa_list ) ;
2018-04-10 09:12:47 -07:00
}
2019-04-12 21:24:57 +01:00
static bool wa_list_verify ( struct intel_uncore * uncore ,
2018-12-03 12:50:10 +00:00
const struct i915_wa_list * wal ,
const char * from )
{
struct i915_wa * wa ;
unsigned int i ;
bool ok = true ;
for ( i = 0 , wa = wal - > list ; i < wal - > count ; i + + , wa + + )
2019-04-12 21:24:57 +01:00
ok & = wa_verify ( wa ,
intel_uncore_read ( uncore , wa - > reg ) ,
wal - > name , from ) ;
2018-12-03 12:50:10 +00:00
return ok ;
}
2019-06-21 08:07:48 +01:00
bool intel_gt_verify_workarounds ( struct intel_gt * gt , const char * from )
2018-12-03 12:50:10 +00:00
{
2019-06-21 08:07:48 +01:00
return wa_list_verify ( gt - > uncore , & gt - > i915 - > gt_wa_list , from ) ;
2018-12-03 12:50:10 +00:00
}
2019-07-12 00:07:43 -07:00
static inline bool is_nonpriv_flags_valid ( u32 flags )
{
/* Check only valid flag bits are set */
if ( flags & ~ RING_FORCE_TO_NONPRIV_MASK_VALID )
return false ;
/* NB: Only 3 out of 4 enum values are valid for access field */
if ( ( flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK ) = =
RING_FORCE_TO_NONPRIV_ACCESS_INVALID )
return false ;
return true ;
}
2018-12-03 12:50:12 +00:00
static void
2019-06-17 18:01:05 -07:00
whitelist_reg_ext ( struct i915_wa_list * wal , i915_reg_t reg , u32 flags )
2018-04-14 13:27:54 +01:00
{
2018-12-03 12:50:12 +00:00
struct i915_wa wa = {
. reg = reg
} ;
2018-04-10 09:12:47 -07:00
2018-12-03 12:50:12 +00:00
if ( GEM_DEBUG_WARN_ON ( wal - > count > = RING_MAX_NONPRIV_SLOTS ) )
return ;
2018-04-10 09:12:47 -07:00
2019-07-12 00:07:43 -07:00
if ( GEM_DEBUG_WARN_ON ( ! is_nonpriv_flags_valid ( flags ) ) )
return ;
2019-06-17 18:01:05 -07:00
wa . reg . reg | = flags ;
2018-12-03 13:33:57 +00:00
_wa_add ( wal , & wa ) ;
2018-04-10 09:12:47 -07:00
}
2019-06-17 18:01:05 -07:00
static void
whitelist_reg ( struct i915_wa_list * wal , i915_reg_t reg )
{
2019-07-12 00:07:43 -07:00
whitelist_reg_ext ( wal , reg , RING_FORCE_TO_NONPRIV_ACCESS_RW ) ;
2019-06-17 18:01:05 -07:00
}
2018-12-03 12:50:12 +00:00
static void gen9_whitelist_build ( struct i915_wa_list * w )
2018-04-10 09:12:47 -07:00
{
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
2018-04-14 13:27:54 +01:00
whitelist_reg ( w , GEN9_CTX_PREEMPT_REG ) ;
2018-04-10 09:12:47 -07:00
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
2018-04-14 13:27:54 +01:00
whitelist_reg ( w , GEN8_CS_CHICKEN1 ) ;
2018-04-10 09:12:47 -07:00
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
2018-04-14 13:27:54 +01:00
whitelist_reg ( w , GEN8_HDC_CHICKEN1 ) ;
2019-09-10 18:48:01 -07:00
/* WaSendPushConstantsFromMMIO:skl,bxt */
whitelist_reg ( w , COMMON_SLICE_CHICKEN2 ) ;
2018-04-10 09:12:47 -07:00
}
2019-06-17 18:01:06 -07:00
static void skl_whitelist_build ( struct intel_engine_cs * engine )
2018-04-10 09:12:47 -07:00
{
2019-06-17 18:01:06 -07:00
struct i915_wa_list * w = & engine - > whitelist ;
if ( engine - > class ! = RENDER_CLASS )
return ;
2018-04-14 13:27:54 +01:00
gen9_whitelist_build ( w ) ;
2018-04-10 09:12:47 -07:00
/* WaDisableLSQCROPERFforOCL:skl */
2018-04-14 13:27:54 +01:00
whitelist_reg ( w , GEN8_L3SQCREG4 ) ;
2018-04-10 09:12:46 -07:00
}
2019-06-17 18:01:06 -07:00
static void bxt_whitelist_build ( struct intel_engine_cs * engine )
2018-04-10 09:12:46 -07:00
{
2019-06-17 18:01:06 -07:00
if ( engine - > class ! = RENDER_CLASS )
return ;
gen9_whitelist_build ( & engine - > whitelist ) ;
2018-04-10 09:12:47 -07:00
}
2019-06-17 18:01:06 -07:00
static void kbl_whitelist_build ( struct intel_engine_cs * engine )
2018-04-10 09:12:47 -07:00
{
2019-06-17 18:01:06 -07:00
struct i915_wa_list * w = & engine - > whitelist ;
if ( engine - > class ! = RENDER_CLASS )
return ;
2018-04-14 13:27:54 +01:00
gen9_whitelist_build ( w ) ;
2018-04-10 09:12:46 -07:00
2018-04-10 09:12:47 -07:00
/* WaDisableLSQCROPERFforOCL:kbl */
2018-04-14 13:27:54 +01:00
whitelist_reg ( w , GEN8_L3SQCREG4 ) ;
2018-04-10 09:12:46 -07:00
}
2019-06-17 18:01:06 -07:00
static void glk_whitelist_build ( struct intel_engine_cs * engine )
2018-04-10 09:12:46 -07:00
{
2019-06-17 18:01:06 -07:00
struct i915_wa_list * w = & engine - > whitelist ;
if ( engine - > class ! = RENDER_CLASS )
return ;
2018-04-14 13:27:54 +01:00
gen9_whitelist_build ( w ) ;
2018-04-10 09:12:46 -07:00
2018-04-10 09:12:47 -07:00
/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
2018-04-14 13:27:54 +01:00
whitelist_reg ( w , GEN9_SLICE_COMMON_ECO_CHICKEN1 ) ;
2018-04-10 09:12:47 -07:00
}
2018-04-10 09:12:46 -07:00
2019-06-17 18:01:06 -07:00
static void cfl_whitelist_build ( struct intel_engine_cs * engine )
2018-04-10 09:12:47 -07:00
{
2019-06-28 15:07:19 +03:00
struct i915_wa_list * w = & engine - > whitelist ;
2019-06-17 18:01:06 -07:00
if ( engine - > class ! = RENDER_CLASS )
return ;
2019-06-28 15:07:19 +03:00
gen9_whitelist_build ( w ) ;
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD : cfl , whl , cml , aml
*
* This covers 4 register which are next to one another :
* - PS_INVOCATION_COUNT
* - PS_INVOCATION_COUNT_UDW
* - PS_DEPTH_COUNT
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext ( w , PS_INVOCATION_COUNT ,
2019-07-12 00:07:43 -07:00
RING_FORCE_TO_NONPRIV_ACCESS_RD |
2019-06-28 15:07:19 +03:00
RING_FORCE_TO_NONPRIV_RANGE_4 ) ;
2018-04-10 09:12:47 -07:00
}
2019-06-17 18:01:06 -07:00
static void cnl_whitelist_build ( struct intel_engine_cs * engine )
2018-04-10 09:12:47 -07:00
{
2019-06-17 18:01:06 -07:00
struct i915_wa_list * w = & engine - > whitelist ;
if ( engine - > class ! = RENDER_CLASS )
return ;
2018-04-10 09:12:47 -07:00
/* WaEnablePreemptionGranularityControlByUMD:cnl */
2018-04-14 13:27:54 +01:00
whitelist_reg ( w , GEN8_CS_CHICKEN1 ) ;
}
2019-06-17 18:01:06 -07:00
static void icl_whitelist_build ( struct intel_engine_cs * engine )
2018-05-08 14:29:23 -07:00
{
2019-06-17 18:01:06 -07:00
struct i915_wa_list * w = & engine - > whitelist ;
2019-06-17 18:01:07 -07:00
switch ( engine - > class ) {
case RENDER_CLASS :
/* WaAllowUMDToModifyHalfSliceChicken7:icl */
whitelist_reg ( w , GEN9_HALF_SLICE_CHICKEN7 ) ;
/* WaAllowUMDToModifySamplerMode:icl */
whitelist_reg ( w , GEN10_SAMPLER_MODE ) ;
/* WaEnableStateCacheRedirectToCS:icl */
whitelist_reg ( w , GEN9_SLICE_COMMON_ECO_CHICKEN1 ) ;
2019-06-28 15:07:20 +03:00
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD : icl
*
* This covers 4 register which are next to one another :
* - PS_INVOCATION_COUNT
* - PS_INVOCATION_COUNT_UDW
* - PS_DEPTH_COUNT
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext ( w , PS_INVOCATION_COUNT ,
2019-07-12 00:07:43 -07:00
RING_FORCE_TO_NONPRIV_ACCESS_RD |
2019-06-28 15:07:20 +03:00
RING_FORCE_TO_NONPRIV_RANGE_4 ) ;
2019-06-17 18:01:07 -07:00
break ;
case VIDEO_DECODE_CLASS :
/* hucStatusRegOffset */
whitelist_reg_ext ( w , _MMIO ( 0x2000 + engine - > mmio_base ) ,
2019-07-12 00:07:43 -07:00
RING_FORCE_TO_NONPRIV_ACCESS_RD ) ;
2019-06-17 18:01:07 -07:00
/* hucUKernelHdrInfoRegOffset */
whitelist_reg_ext ( w , _MMIO ( 0x2014 + engine - > mmio_base ) ,
2019-07-12 00:07:43 -07:00
RING_FORCE_TO_NONPRIV_ACCESS_RD ) ;
2019-06-17 18:01:07 -07:00
/* hucStatus2RegOffset */
whitelist_reg_ext ( w , _MMIO ( 0x23B0 + engine - > mmio_base ) ,
2019-07-12 00:07:43 -07:00
RING_FORCE_TO_NONPRIV_ACCESS_RD ) ;
2019-06-17 18:01:07 -07:00
break ;
default :
break ;
}
2018-05-08 14:29:23 -07:00
}
2019-08-17 02:38:42 -07:00
static void tgl_whitelist_build ( struct intel_engine_cs * engine )
{
2019-10-24 13:38:58 +03:00
struct i915_wa_list * w = & engine - > whitelist ;
switch ( engine - > class ) {
case RENDER_CLASS :
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD : tgl
2020-02-27 14:01:00 -08:00
* Wa_1408556865 : tgl
2019-10-24 13:38:58 +03:00
*
* This covers 4 registers which are next to one another :
* - PS_INVOCATION_COUNT
* - PS_INVOCATION_COUNT_UDW
* - PS_DEPTH_COUNT
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext ( w , PS_INVOCATION_COUNT ,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4 ) ;
2020-02-12 11:17:28 -08:00
/* Wa_1808121037:tgl */
whitelist_reg ( w , GEN7_COMMON_SLICE_CHICKEN1 ) ;
2020-02-27 14:00:52 -08:00
/* Wa_1806527549:tgl */
whitelist_reg ( w , HIZ_CHICKEN ) ;
2019-10-24 13:38:58 +03:00
break ;
default :
break ;
}
2019-08-17 02:38:42 -07:00
}
2018-12-03 12:50:12 +00:00
void intel_engine_init_whitelist ( struct intel_engine_cs * engine )
2018-04-14 13:27:54 +01:00
{
struct drm_i915_private * i915 = engine - > i915 ;
2018-12-03 12:50:12 +00:00
struct i915_wa_list * w = & engine - > whitelist ;
2018-04-14 13:27:54 +01:00
2019-07-12 00:07:45 -07:00
wa_init_start ( w , " whitelist " , engine - > name ) ;
2018-04-14 13:27:54 +01:00
2019-08-17 02:38:42 -07:00
if ( IS_GEN ( i915 , 12 ) )
tgl_whitelist_build ( engine ) ;
else if ( IS_GEN ( i915 , 11 ) )
2019-06-17 18:01:06 -07:00
icl_whitelist_build ( engine ) ;
2018-04-14 13:27:54 +01:00
else if ( IS_CANNONLAKE ( i915 ) )
2019-06-17 18:01:06 -07:00
cnl_whitelist_build ( engine ) ;
2019-03-01 09:27:03 -08:00
else if ( IS_COFFEELAKE ( i915 ) )
2019-06-17 18:01:06 -07:00
cfl_whitelist_build ( engine ) ;
2019-03-01 09:27:03 -08:00
else if ( IS_GEMINILAKE ( i915 ) )
2019-06-17 18:01:06 -07:00
glk_whitelist_build ( engine ) ;
2019-03-01 09:27:03 -08:00
else if ( IS_KABYLAKE ( i915 ) )
2019-06-17 18:01:06 -07:00
kbl_whitelist_build ( engine ) ;
2019-03-01 09:27:03 -08:00
else if ( IS_BROXTON ( i915 ) )
2019-06-17 18:01:06 -07:00
bxt_whitelist_build ( engine ) ;
2019-03-01 09:27:03 -08:00
else if ( IS_SKYLAKE ( i915 ) )
2019-06-17 18:01:06 -07:00
skl_whitelist_build ( engine ) ;
2019-03-01 09:27:03 -08:00
else if ( INTEL_GEN ( i915 ) < = 8 )
return ;
2018-04-14 13:27:54 +01:00
else
MISSING_CASE ( INTEL_GEN ( i915 ) ) ;
2018-04-10 09:12:46 -07:00
2018-12-03 12:50:12 +00:00
wa_init_finish ( w ) ;
2018-04-10 09:12:46 -07:00
}
2018-12-03 12:50:12 +00:00
void intel_engine_apply_whitelist ( struct intel_engine_cs * engine )
2018-04-10 09:12:46 -07:00
{
2018-12-03 12:50:12 +00:00
const struct i915_wa_list * wal = & engine - > whitelist ;
2019-04-12 21:24:57 +01:00
struct intel_uncore * uncore = engine - > uncore ;
2018-04-14 13:27:54 +01:00
const u32 base = engine - > mmio_base ;
2018-12-03 12:50:12 +00:00
struct i915_wa * wa ;
2018-04-14 13:27:54 +01:00
unsigned int i ;
2018-12-03 12:50:12 +00:00
if ( ! wal - > count )
2018-04-14 13:27:54 +01:00
return ;
2018-04-10 09:12:46 -07:00
2018-12-03 12:50:12 +00:00
for ( i = 0 , wa = wal - > list ; i < wal - > count ; i + + , wa + + )
2019-04-12 21:24:57 +01:00
intel_uncore_write ( uncore ,
RING_FORCE_TO_NONPRIV ( base , i ) ,
i915_mmio_reg_offset ( wa - > reg ) ) ;
2018-04-10 09:12:46 -07:00
2018-04-14 13:27:54 +01:00
/* And clear the rest just in case of garbage */
for ( ; i < RING_MAX_NONPRIV_SLOTS ; i + + )
2019-04-12 21:24:57 +01:00
intel_uncore_write ( uncore ,
RING_FORCE_TO_NONPRIV ( base , i ) ,
i915_mmio_reg_offset ( RING_NOPID ( base ) ) ) ;
2018-04-14 13:27:54 +01:00
}
2019-01-09 17:32:31 -08:00
static void
rcs_engine_wa_init ( struct intel_engine_cs * engine , struct i915_wa_list * wal )
2018-12-03 13:33:41 +00:00
{
struct drm_i915_private * i915 = engine - > i915 ;
2019-10-15 18:44:43 +03:00
if ( IS_TGL_REVID ( i915 , TGL_REVID_A0 , TGL_REVID_A0 ) ) {
2020-02-27 14:00:56 -08:00
/*
* Wa_1607138336 : tgl
* Wa_1607063988 : tgl
*/
2019-10-15 18:44:47 +03:00
wa_write_or ( wal ,
GEN9_CTX_PREEMPT_REG ,
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG ) ;
2019-10-15 18:44:48 +03:00
2020-02-27 14:00:55 -08:00
/*
* Wa_1607030317 : tgl
* Wa_1607186500 : tgl
* Wa_1607297627 : tgl there is 3 entries for this WA on BSpec , 2
* of then says it is fixed on B0 the other one says it is
* permanent
*/
2019-10-15 18:44:48 +03:00
wa_masked_en ( wal ,
GEN6_RC_SLEEP_PSMI_CONTROL ,
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
GEN8_RC_SEMA_IDLE_MSG_DISABLE ) ;
2019-11-13 15:19:53 -08:00
/*
* Wa_1606679103 : tgl
* ( see also Wa_1606682166 : icl )
*/
wa_write_or ( wal ,
GEN7_SARCHKMD ,
GEN7_DISABLE_SAMPLER_PREFETCH ) ;
2020-02-07 17:51:37 +02:00
/* Wa_1407928979:tgl */
wa_write_or ( wal ,
GEN7_FF_THREAD_MODE ,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE ) ;
2020-02-12 18:57:07 +02:00
2020-02-27 14:00:53 -08:00
/*
* Wa_1409085225 : tgl
* Wa_14010229206 : tgl
*/
wa_masked_en ( wal , GEN9_ROW_CHICKEN4 , GEN12_DISABLE_TDL_PUSH ) ;
2020-03-02 15:14:21 -08:00
/* Wa_1408615072:tgl */
2020-03-06 09:11:39 -08:00
wa_write_or ( wal , UNSLICE_UNIT_LEVEL_CLKGATE2 ,
VSUNIT_CLKGATE_DIS_TGL ) ;
2019-10-15 18:44:43 +03:00
}
2020-02-27 14:00:51 -08:00
if ( IS_TIGERLAKE ( i915 ) ) {
2020-02-27 14:00:54 -08:00
/* Wa_1606931601:tgl */
wa_masked_en ( wal , GEN7_ROW_CHICKEN2 , GEN12_DISABLE_EARLY_READ ) ;
2020-02-27 14:00:51 -08:00
/* Wa_1409804808:tgl */
wa_masked_en ( wal , GEN7_ROW_CHICKEN2 ,
GEN12_PUSH_CONST_DEREF_HOLD_DIS ) ;
2020-03-05 10:12:04 -08:00
/* Wa_1606700617:tgl */
wa_masked_en ( wal ,
GEN9_CS_DEBUG_MODE1 ,
FF_DOP_CLOCK_GATE_DISABLE ) ;
2020-02-27 14:00:51 -08:00
}
2019-04-12 11:09:20 -07:00
if ( IS_GEN ( i915 , 11 ) ) {
2018-12-03 13:33:41 +00:00
/* This is not an Wa. Enable for better image quality */
wa_masked_en ( wal ,
_3D_CHICKEN3 ,
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE ) ;
/* WaPipelineFlushCoherentLines:icl */
2019-07-17 19:06:23 +01:00
wa_write_or ( wal ,
GEN8_L3SQCREG4 ,
GEN8_LQSC_FLUSH_COHERENT_LINES ) ;
2018-12-03 13:33:41 +00:00
/*
* Wa_1405543622 : icl
* Formerly known as WaGAPZPriorityScheme
*/
wa_write_or ( wal ,
GEN8_GARBCNTL ,
GEN11_ARBITRATION_PRIO_ORDER_MASK ) ;
/*
* Wa_1604223664 : icl
* Formerly known as WaL3BankAddressHashing
*/
wa_write_masked_or ( wal ,
GEN8_GARBCNTL ,
GEN11_HASH_CTRL_EXCL_MASK ,
GEN11_HASH_CTRL_EXCL_BIT0 ) ;
wa_write_masked_or ( wal ,
GEN11_GLBLINVL ,
GEN11_BANK_HASH_ADDR_EXCL_MASK ,
GEN11_BANK_HASH_ADDR_EXCL_BIT0 ) ;
/*
* Wa_1405733216 : icl
* Formerly known as WaDisableCleanEvicts
*/
2019-07-17 19:06:23 +01:00
wa_write_or ( wal ,
GEN8_L3SQCREG4 ,
GEN11_LQSC_CLEAN_EVICT_DISABLE ) ;
2018-12-03 13:33:41 +00:00
/* WaForwardProgressSoftReset:icl */
wa_write_or ( wal ,
GEN10_SCRATCH_LNCF2 ,
PMFLUSHDONE_LNICRSDROP |
PMFLUSH_GAPL3UNBLOCK |
PMFLUSHDONE_LNEBLK ) ;
/* Wa_1406609255:icl (pre-prod) */
if ( IS_ICL_REVID ( i915 , ICL_REVID_A0 , ICL_REVID_B0 ) )
wa_write_or ( wal ,
GEN7_SARCHKMD ,
2019-06-25 10:06:55 +01:00
GEN7_DISABLE_DEMAND_PREFETCH ) ;
/* Wa_1606682166:icl */
wa_write_or ( wal ,
GEN7_SARCHKMD ,
GEN7_DISABLE_SAMPLER_PREFETCH ) ;
2019-07-17 19:06:24 +01:00
/* Wa_1409178092:icl */
wa_write_masked_or ( wal ,
GEN11_SCRATCH2 ,
GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE ,
0 ) ;
2020-03-02 15:14:20 -08:00
/* WaEnable32PlaneMode:icl */
wa_masked_en ( wal , GEN9_CSFE_CHICKEN1_RCS ,
GEN11_ENABLE_32_PLANE_MODE ) ;
/*
* Wa_1408615072 : icl , ehl ( vsunit )
* Wa_1407596294 : icl , ehl ( hsunit )
*/
2020-03-06 09:11:39 -08:00
wa_write_or ( wal , UNSLICE_UNIT_LEVEL_CLKGATE ,
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS ) ;
2020-03-02 15:14:20 -08:00
/* Wa_1407352427:icl,ehl */
2020-03-06 09:11:39 -08:00
wa_write_or ( wal , UNSLICE_UNIT_LEVEL_CLKGATE2 ,
PSDUNIT_CLKGATE_DIS ) ;
2018-12-03 13:33:41 +00:00
}
2020-03-04 15:31:44 +00:00
if ( IS_GEN_RANGE ( i915 , 9 , 12 ) ) {
/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
2018-12-03 13:33:41 +00:00
wa_masked_en ( wal ,
GEN7_FF_SLICE_CS_CHICKEN1 ,
GEN9_FFSC_PERCTX_PREEMPT_CTRL ) ;
}
if ( IS_SKYLAKE ( i915 ) | | IS_KABYLAKE ( i915 ) | | IS_COFFEELAKE ( i915 ) ) {
/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
wa_write_or ( wal ,
GEN8_GARBCNTL ,
GEN9_GAPS_TSV_CREDIT_DISABLE ) ;
}
if ( IS_BROXTON ( i915 ) ) {
/* WaDisablePooledEuLoadBalancingFix:bxt */
wa_masked_en ( wal ,
FF_SLICE_CS_CHICKEN2 ,
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE ) ;
}
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-12 10:10:43 -08:00
if ( IS_GEN ( i915 , 9 ) ) {
2018-12-03 13:33:41 +00:00
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
wa_masked_en ( wal ,
GEN9_CSFE_CHICKEN1_RCS ,
GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE ) ;
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
wa_write_or ( wal ,
BDW_SCRATCH1 ,
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE ) ;
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
if ( IS_GEN9_LP ( i915 ) )
wa_write_masked_or ( wal ,
GEN8_L3SQCREG1 ,
L3_PRIO_CREDITS_MASK ,
L3_GENERAL_PRIO_CREDITS ( 62 ) |
L3_HIGH_PRIO_CREDITS ( 2 ) ) ;
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
wa_write_or ( wal ,
GEN8_L3SQCREG4 ,
GEN8_LQSC_FLUSH_COHERENT_LINES ) ;
}
2020-02-01 19:40:04 +00:00
if ( IS_GEN ( i915 , 7 ) )
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
wa_masked_en ( wal ,
GFX_MODE_GEN7 ,
GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE ) ;
if ( IS_GEN_RANGE ( i915 , 6 , 7 ) )
/*
* We need to disable the AsyncFlip performance optimisations in
* order to use MI_WAIT_FOR_EVENT within the CS . It should
* already be programmed to ' 1 ' on all products .
*
* WaDisableAsyncFlipPerfMode : snb , ivb , hsw , vlv
*/
wa_masked_en ( wal ,
MI_MODE ,
ASYNC_FLIP_PERF_DISABLE ) ;
if ( IS_GEN ( i915 , 6 ) ) {
/*
* Required for the hardware to program scanline values for
* waiting
* WaEnableFlushTlbInvalidationMode : snb
*/
wa_masked_en ( wal ,
GFX_MODE ,
GFX_TLB_INVALIDATE_EXPLICIT ) ;
/*
* From the Sandybridge PRM , volume 1 part 3 , page 24 :
* " If this bit is set, STCunit will have LRA as replacement
* policy . [ . . . ] This bit must be reset . LRA replacement
* policy is not supported . "
*/
wa_masked_dis ( wal ,
CACHE_MODE_0 ,
CM0_STC_EVICT_DISABLE_LRA_SNB ) ;
}
if ( IS_GEN_RANGE ( i915 , 4 , 6 ) )
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
wa_add ( wal , MI_MODE ,
0 , _MASKED_BIT_ENABLE ( VS_TIMER_DISPATCH ) ,
/* XXX bit doesn't stick on Broadwater */
IS_I965G ( i915 ) ? 0 : VS_TIMER_DISPATCH ) ;
2018-12-03 13:33:41 +00:00
}
2019-01-09 17:32:31 -08:00
static void
xcs_engine_wa_init ( struct intel_engine_cs * engine , struct i915_wa_list * wal )
2018-12-03 13:33:41 +00:00
{
struct drm_i915_private * i915 = engine - > i915 ;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
if ( IS_KBL_REVID ( i915 , KBL_REVID_A0 , KBL_REVID_E0 ) ) {
wa_write ( wal ,
RING_SEMA_WAIT_POLL ( engine - > mmio_base ) ,
1 ) ;
}
}
2019-01-09 17:32:31 -08:00
static void
engine_init_workarounds ( struct intel_engine_cs * engine , struct i915_wa_list * wal )
{
2020-02-01 19:40:04 +00:00
if ( I915_SELFTEST_ONLY ( INTEL_GEN ( engine - > i915 ) < 4 ) )
2019-01-09 17:32:31 -08:00
return ;
2019-07-05 13:43:24 +01:00
if ( engine - > class = = RENDER_CLASS )
2019-01-09 17:32:31 -08:00
rcs_engine_wa_init ( engine , wal ) ;
else
xcs_engine_wa_init ( engine , wal ) ;
}
2018-12-03 13:33:41 +00:00
void intel_engine_init_workarounds ( struct intel_engine_cs * engine )
{
struct i915_wa_list * wal = & engine - > wa_list ;
2020-02-01 19:40:04 +00:00
if ( INTEL_GEN ( engine - > i915 ) < 4 )
2018-12-03 13:33:41 +00:00
return ;
2019-07-12 00:07:45 -07:00
wa_init_start ( wal , " engine " , engine - > name ) ;
2019-01-09 17:32:31 -08:00
engine_init_workarounds ( engine , wal ) ;
2018-12-03 13:33:41 +00:00
wa_init_finish ( wal ) ;
}
void intel_engine_apply_workarounds ( struct intel_engine_cs * engine )
{
2019-04-12 21:24:57 +01:00
wa_list_apply ( engine - > uncore , & engine - > wa_list ) ;
2018-12-03 13:33:41 +00:00
}
2019-04-17 08:56:28 +01:00
static struct i915_vma *
create_scratch ( struct i915_address_space * vm , int count )
{
struct drm_i915_gem_object * obj ;
struct i915_vma * vma ;
unsigned int size ;
int err ;
size = round_up ( count * sizeof ( u32 ) , PAGE_SIZE ) ;
obj = i915_gem_object_create_internal ( vm - > i915 , size ) ;
if ( IS_ERR ( obj ) )
return ERR_CAST ( obj ) ;
i915_gem_object_set_cache_coherency ( obj , I915_CACHE_LLC ) ;
vma = i915_vma_instance ( obj , vm , NULL ) ;
if ( IS_ERR ( vma ) ) {
err = PTR_ERR ( vma ) ;
goto err_obj ;
}
err = i915_vma_pin ( vma , 0 , 0 ,
i915_vma_is_ggtt ( vma ) ? PIN_GLOBAL : PIN_USER ) ;
if ( err )
goto err_obj ;
return vma ;
err_obj :
i915_gem_object_put ( obj ) ;
return ERR_PTR ( err ) ;
}
2019-07-17 19:06:22 +01:00
static bool mcr_range ( struct drm_i915_private * i915 , u32 offset )
{
/*
* Registers in this range are affected by the MCR selector
* which only controls CPU initiated MMIO . Routing does not
* work for CS access so we cannot verify them on this path .
*/
2019-08-09 17:56:53 +03:00
if ( INTEL_GEN ( i915 ) > = 8 & & ( offset > = 0xb000 & & offset < = 0xb4ff ) )
2019-07-17 19:06:22 +01:00
return true ;
return false ;
}
2019-04-17 08:56:28 +01:00
static int
wa_list_srm ( struct i915_request * rq ,
const struct i915_wa_list * wal ,
struct i915_vma * vma )
{
2019-07-17 19:06:22 +01:00
struct drm_i915_private * i915 = rq - > i915 ;
unsigned int i , count = 0 ;
2019-04-17 08:56:28 +01:00
const struct i915_wa * wa ;
u32 srm , * cs ;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT ;
2019-07-17 19:06:22 +01:00
if ( INTEL_GEN ( i915 ) > = 8 )
2019-04-17 08:56:28 +01:00
srm + + ;
2019-07-17 19:06:22 +01:00
for ( i = 0 , wa = wal - > list ; i < wal - > count ; i + + , wa + + ) {
if ( ! mcr_range ( i915 , i915_mmio_reg_offset ( wa - > reg ) ) )
count + + ;
}
cs = intel_ring_begin ( rq , 4 * count ) ;
2019-04-17 08:56:28 +01:00
if ( IS_ERR ( cs ) )
return PTR_ERR ( cs ) ;
for ( i = 0 , wa = wal - > list ; i < wal - > count ; i + + , wa + + ) {
2019-07-17 19:06:22 +01:00
u32 offset = i915_mmio_reg_offset ( wa - > reg ) ;
if ( mcr_range ( i915 , offset ) )
continue ;
2019-04-17 08:56:28 +01:00
* cs + + = srm ;
2019-07-17 19:06:22 +01:00
* cs + + = offset ;
2019-04-17 08:56:28 +01:00
* cs + + = i915_ggtt_offset ( vma ) + sizeof ( u32 ) * i ;
* cs + + = 0 ;
}
intel_ring_advance ( rq , cs ) ;
return 0 ;
}
2019-05-20 15:25:46 +01:00
static int engine_wa_list_verify ( struct intel_context * ce ,
2019-04-17 08:56:28 +01:00
const struct i915_wa_list * const wal ,
const char * from )
{
const struct i915_wa * wa ;
struct i915_request * rq ;
struct i915_vma * vma ;
unsigned int i ;
u32 * results ;
int err ;
if ( ! wal - > count )
return 0 ;
2019-06-21 08:08:08 +01:00
vma = create_scratch ( & ce - > engine - > gt - > ggtt - > vm , wal - > count ) ;
2019-04-17 08:56:28 +01:00
if ( IS_ERR ( vma ) )
return PTR_ERR ( vma ) ;
2019-11-25 10:58:56 +00:00
intel_engine_pm_get ( ce - > engine ) ;
2019-05-20 15:25:46 +01:00
rq = intel_context_create_request ( ce ) ;
2019-11-25 10:58:56 +00:00
intel_engine_pm_put ( ce - > engine ) ;
2019-04-17 08:56:28 +01:00
if ( IS_ERR ( rq ) ) {
err = PTR_ERR ( rq ) ;
goto err_vma ;
}
2020-01-31 14:26:10 +00:00
i915_vma_lock ( vma ) ;
err = i915_request_await_object ( rq , vma - > obj , true ) ;
if ( err = = 0 )
err = i915_vma_move_to_active ( vma , rq , EXEC_OBJECT_WRITE ) ;
i915_vma_unlock ( vma ) ;
if ( err ) {
i915_request_add ( rq ) ;
goto err_vma ;
}
2019-04-17 08:56:28 +01:00
err = wa_list_srm ( rq , wal , vma ) ;
if ( err )
goto err_vma ;
2019-11-21 09:33:26 +00:00
i915_request_get ( rq ) ;
2019-04-17 08:56:28 +01:00
i915_request_add ( rq ) ;
2019-06-18 08:41:30 +01:00
if ( i915_request_wait ( rq , 0 , HZ / 5 ) < 0 ) {
2019-04-17 08:56:28 +01:00
err = - ETIME ;
2019-11-21 09:33:26 +00:00
goto err_rq ;
2019-04-17 08:56:28 +01:00
}
results = i915_gem_object_pin_map ( vma - > obj , I915_MAP_WB ) ;
if ( IS_ERR ( results ) ) {
err = PTR_ERR ( results ) ;
2019-11-21 09:33:26 +00:00
goto err_rq ;
2019-04-17 08:56:28 +01:00
}
err = 0 ;
2019-07-17 19:06:22 +01:00
for ( i = 0 , wa = wal - > list ; i < wal - > count ; i + + , wa + + ) {
if ( mcr_range ( rq - > i915 , i915_mmio_reg_offset ( wa - > reg ) ) )
continue ;
2019-04-17 08:56:28 +01:00
if ( ! wa_verify ( wa , results [ i ] , wal - > name , from ) )
err = - ENXIO ;
2019-07-17 19:06:22 +01:00
}
2019-04-17 08:56:28 +01:00
i915_gem_object_unpin_map ( vma - > obj ) ;
2019-11-21 09:33:26 +00:00
err_rq :
i915_request_put ( rq ) ;
2019-04-17 08:56:28 +01:00
err_vma :
i915_vma_unpin ( vma ) ;
i915_vma_put ( vma ) ;
return err ;
}
int intel_engine_verify_workarounds ( struct intel_engine_cs * engine ,
const char * from )
{
2019-05-20 15:25:46 +01:00
return engine_wa_list_verify ( engine - > kernel_context ,
& engine - > wa_list ,
from ) ;
2019-04-17 08:56:28 +01:00
}
2018-04-14 13:27:54 +01:00
# if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2019-04-24 18:48:39 +01:00
# include "selftest_workarounds.c"
2018-04-14 13:27:54 +01:00
# endif