Merge tag 'drm-intel-gt-next-2022-08-24' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
UAPI Changes: - Create gt/gtN/.defaults/ for per gt sysfs defaults Create a gt/gtN/.defaults/ directory (similar to engine/<engine-name>/.defaults/) to expose default parameter values for each gt in sysfs. This allows userspace to restore default parameter values after they have changed. Driver Changes: - Support GuC v69 in parallel to v70 (Daniele) - Improve TLB invalidation to limit performance regression (Chris, Mauro) - Expose per-gt RPS defaults in sysfs (Ashutosh) - Suppress OOM warning for shmemfs object allocation failure (Chris, Nirmoy) - Disable PCI resize on 32-bit machines (Nirmoy) - Update DG2 to GuC v70.4.1 (John) - Fix CCS data copying on DG2 during swapping (Matt A) - Add DG2 performance tuning setting recommended by spec (Matt R) - Add GuC <-> kernel time stamp translation information to error logs (John) - Record GuC CTB info in error logs (John) - Route semaphores to GuC for Gen12+ when enabled (Michal Wi, John) - Improve resilency to bug #3575: Handle reset timeouts under unrelated kernel hangs (Chris, Ashutosh) - Avoid system freeze by removing shared locking on freeing objects (Chris, Nirmoy) - Demote GuC error "No response for request" into debug when expected (Zhanjun) - Fix GuC capture size warning and bump the size (John) - Use streaming loads to speed up dumping the GuC log (Chris, John) - Don't abort on CTB_UNUSED status from GuC (John) - Don't send spurious policy update for GuC child contexts (Daniele) - Don't leak the CCS state (Matt A) - Prefer drm_err over pr_err (John) - Eliminate unused calc_ctrl_surf_instr_size (Matt A) - Add dedicated function for non-ctx register tuning settings (Matt R) - Style and typo fixes, documentation improvements (Jason Wang, Mauro) - Selftest improvements (Matt B, Rahul, John) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/YwYTCjA/Rhpd1n4A@jlahtine-mobl.ger.corp.intel.com
This commit is contained in:
commit
2c2d7a67de
@ -75,7 +75,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
|
||||
if (size > resource_size(&mr->region))
|
||||
return -ENOMEM;
|
||||
|
||||
if (sg_alloc_table(st, page_count, GFP_KERNEL))
|
||||
if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN))
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
@ -137,7 +137,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
|
||||
* trigger the out-of-memory killer and for
|
||||
* this we want __GFP_RETRY_MAYFAIL.
|
||||
*/
|
||||
gfp |= __GFP_RETRY_MAYFAIL;
|
||||
gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
@ -209,7 +209,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
|
||||
GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
|
||||
|
||||
rebuild_st:
|
||||
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
||||
st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!st)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -55,6 +55,14 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
|
||||
for (tmp = 1, intel_gt_pm_get(gt); tmp; \
|
||||
intel_gt_pm_put(gt), tmp = 0)
|
||||
|
||||
/**
|
||||
* with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
|
||||
* it to sleep, run some code and then asynchrously put the reference
|
||||
* away.
|
||||
*
|
||||
* @gt: pointer to the gt
|
||||
* @wf: pointer to a temporary wakeref.
|
||||
*/
|
||||
#define with_intel_gt_pm_if_awake(gt, wf) \
|
||||
for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
|
||||
|
||||
|
@ -259,6 +259,9 @@
|
||||
#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
|
||||
#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
|
||||
|
||||
#define DRAW_WATERMARK _MMIO(0x26c0)
|
||||
#define VERT_WM_VAL REG_GENMASK(9, 0)
|
||||
|
||||
#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
|
||||
|
||||
#define RENDER_HWS_PGA_GEN7 _MMIO(0x4080)
|
||||
@ -374,6 +377,9 @@
|
||||
#define CHICKEN_RASTER_1 _MMIO(0x6204)
|
||||
#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
|
||||
|
||||
#define CHICKEN_RASTER_2 _MMIO(0x6208)
|
||||
#define TBIMR_FAST_CLIP REG_BIT(5)
|
||||
|
||||
#define VFLSKPD _MMIO(0x62a8)
|
||||
#define DIS_OVER_FETCH_CACHE REG_BIT(1)
|
||||
#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
|
||||
@ -1007,6 +1013,8 @@
|
||||
#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
|
||||
#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
|
||||
|
||||
#define GUCPMTIMESTAMP _MMIO(0xc3e8)
|
||||
|
||||
#define __GEN9_RCS0_MOCS0 0xc800
|
||||
#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
|
||||
#define __GEN9_VCS0_MOCS0 0xc900
|
||||
@ -1078,6 +1086,7 @@
|
||||
|
||||
#define GEN10_SAMPLER_MODE _MMIO(0xe18c)
|
||||
#define ENABLE_SMALLPL REG_BIT(15)
|
||||
#define SC_DISABLE_POWER_OPTIMIZATION_EBB REG_BIT(9)
|
||||
#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
|
||||
|
||||
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
|
||||
@ -1123,6 +1132,8 @@
|
||||
|
||||
#define RT_CTRL _MMIO(0xe530)
|
||||
#define DIS_NULL_QUERY REG_BIT(10)
|
||||
#define STACKID_CTRL REG_GENMASK(6, 5)
|
||||
#define STACKID_CTRL_512 REG_FIELD_PREP(STACKID_CTRL, 0x2)
|
||||
|
||||
#define EU_PERF_CNTL1 _MMIO(0xe558)
|
||||
#define EU_PERF_CNTL5 _MMIO(0xe55c)
|
||||
|
@ -22,11 +22,6 @@ bool is_object_gt(struct kobject *kobj)
|
||||
return !strncmp(kobj->name, "gt", 2);
|
||||
}
|
||||
|
||||
static struct intel_gt *kobj_to_gt(struct kobject *kobj)
|
||||
{
|
||||
return container_of(kobj, struct intel_gt, sysfs_gt);
|
||||
}
|
||||
|
||||
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
|
||||
const char *name)
|
||||
{
|
||||
@ -101,6 +96,10 @@ void intel_gt_sysfs_register(struct intel_gt *gt)
|
||||
gt->i915->sysfs_gt, "gt%d", gt->info.id))
|
||||
goto exit_fail;
|
||||
|
||||
gt->sysfs_defaults = kobject_create_and_add(".defaults", >->sysfs_gt);
|
||||
if (!gt->sysfs_defaults)
|
||||
goto exit_fail;
|
||||
|
||||
intel_gt_sysfs_pm_init(gt, >->sysfs_gt);
|
||||
|
||||
return;
|
||||
@ -113,5 +112,6 @@ exit_fail:
|
||||
|
||||
void intel_gt_sysfs_unregister(struct intel_gt *gt)
|
||||
{
|
||||
kobject_put(gt->sysfs_defaults);
|
||||
kobject_put(>->sysfs_gt);
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/kobject.h>
|
||||
|
||||
#include "i915_gem.h" /* GEM_BUG_ON() */
|
||||
#include "intel_gt_types.h"
|
||||
|
||||
struct intel_gt;
|
||||
|
||||
@ -22,6 +23,11 @@ intel_gt_create_kobj(struct intel_gt *gt,
|
||||
struct kobject *dir,
|
||||
const char *name);
|
||||
|
||||
static inline struct intel_gt *kobj_to_gt(struct kobject *kobj)
|
||||
{
|
||||
return container_of(kobj, struct intel_gt, sysfs_gt);
|
||||
}
|
||||
|
||||
void intel_gt_sysfs_register(struct intel_gt *gt);
|
||||
void intel_gt_sysfs_unregister(struct intel_gt *gt);
|
||||
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
|
||||
|
@ -727,6 +727,34 @@ static const struct attribute *media_perf_power_attrs[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
default_min_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct intel_gt *gt = kobj_to_gt(kobj->parent);
|
||||
|
||||
return sysfs_emit(buf, "%u\n", gt->defaults.min_freq);
|
||||
}
|
||||
|
||||
static struct kobj_attribute default_min_freq_mhz =
|
||||
__ATTR(rps_min_freq_mhz, 0444, default_min_freq_mhz_show, NULL);
|
||||
|
||||
static ssize_t
|
||||
default_max_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct intel_gt *gt = kobj_to_gt(kobj->parent);
|
||||
|
||||
return sysfs_emit(buf, "%u\n", gt->defaults.max_freq);
|
||||
}
|
||||
|
||||
static struct kobj_attribute default_max_freq_mhz =
|
||||
__ATTR(rps_max_freq_mhz, 0444, default_max_freq_mhz_show, NULL);
|
||||
|
||||
static const struct attribute * const rps_defaults_attrs[] = {
|
||||
&default_min_freq_mhz.attr,
|
||||
&default_max_freq_mhz.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj,
|
||||
const struct attribute * const *attrs)
|
||||
{
|
||||
@ -776,4 +804,10 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
|
||||
"failed to create gt%u media_perf_power_attrs sysfs (%pe)\n",
|
||||
gt->info.id, ERR_PTR(ret));
|
||||
}
|
||||
|
||||
ret = sysfs_create_files(gt->sysfs_defaults, rps_defaults_attrs);
|
||||
if (ret)
|
||||
drm_warn(>->i915->drm,
|
||||
"failed to add gt%u rps defaults (%pe)\n",
|
||||
gt->info.id, ERR_PTR(ret));
|
||||
}
|
||||
|
@ -76,6 +76,11 @@ enum intel_submission_method {
|
||||
INTEL_SUBMISSION_GUC,
|
||||
};
|
||||
|
||||
struct gt_defaults {
|
||||
u32 min_freq;
|
||||
u32 max_freq;
|
||||
};
|
||||
|
||||
struct intel_gt {
|
||||
struct drm_i915_private *i915;
|
||||
struct intel_uncore *uncore;
|
||||
@ -251,6 +256,10 @@ struct intel_gt {
|
||||
|
||||
/* gt/gtN sysfs */
|
||||
struct kobject sysfs_gt;
|
||||
|
||||
/* sysfs defaults per gt */
|
||||
struct gt_defaults defaults;
|
||||
struct kobject *sysfs_defaults;
|
||||
};
|
||||
|
||||
enum intel_gt_scratch_field {
|
||||
|
@ -511,44 +511,16 @@ static inline u32 *i915_flush_dw(u32 *cmd, u32 flags)
|
||||
return cmd;
|
||||
}
|
||||
|
||||
static u32 calc_ctrl_surf_instr_size(struct drm_i915_private *i915, int size)
|
||||
{
|
||||
u32 num_cmds, num_blks, total_size;
|
||||
|
||||
if (!GET_CCS_BYTES(i915, size))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* XY_CTRL_SURF_COPY_BLT transfers CCS in 256 byte
|
||||
* blocks. one XY_CTRL_SURF_COPY_BLT command can
|
||||
* transfer upto 1024 blocks.
|
||||
*/
|
||||
num_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
|
||||
NUM_CCS_BYTES_PER_BLOCK);
|
||||
num_cmds = DIV_ROUND_UP(num_blks, NUM_CCS_BLKS_PER_XFER);
|
||||
total_size = XY_CTRL_SURF_INSTR_SIZE * num_cmds;
|
||||
|
||||
/*
|
||||
* Adding a flush before and after XY_CTRL_SURF_COPY_BLT
|
||||
*/
|
||||
total_size += 2 * MI_FLUSH_DW_SIZE;
|
||||
|
||||
return total_size;
|
||||
}
|
||||
|
||||
static int emit_copy_ccs(struct i915_request *rq,
|
||||
u32 dst_offset, u8 dst_access,
|
||||
u32 src_offset, u8 src_access, int size)
|
||||
{
|
||||
struct drm_i915_private *i915 = rq->engine->i915;
|
||||
int mocs = rq->engine->gt->mocs.uc_index << 1;
|
||||
u32 num_ccs_blks, ccs_ring_size;
|
||||
u32 num_ccs_blks;
|
||||
u32 *cs;
|
||||
|
||||
ccs_ring_size = calc_ctrl_surf_instr_size(i915, size);
|
||||
WARN_ON(!ccs_ring_size);
|
||||
|
||||
cs = intel_ring_begin(rq, round_up(ccs_ring_size, 2));
|
||||
cs = intel_ring_begin(rq, 12);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
@ -583,8 +555,7 @@ static int emit_copy_ccs(struct i915_request *rq,
|
||||
FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
|
||||
|
||||
cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
|
||||
if (ccs_ring_size & 1)
|
||||
*cs++ = MI_NOOP;
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
@ -638,40 +609,38 @@ static int emit_copy(struct i915_request *rq,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scatter_list_length(struct scatterlist *sg)
|
||||
static u64 scatter_list_length(struct scatterlist *sg)
|
||||
{
|
||||
int len = 0;
|
||||
u64 len = 0;
|
||||
|
||||
while (sg && sg_dma_len(sg)) {
|
||||
len += sg_dma_len(sg);
|
||||
sg = sg_next(sg);
|
||||
};
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
|
||||
int *src_sz, u32 bytes_to_cpy, u32 ccs_bytes_to_cpy)
|
||||
u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
|
||||
{
|
||||
if (ccs_bytes_to_cpy) {
|
||||
if (!src_is_lmem)
|
||||
/*
|
||||
* When CHUNK_SZ is passed all the pages upto CHUNK_SZ
|
||||
* will be taken for the blt. in Flat-ccs supported
|
||||
* platform Smem obj will have more pages than required
|
||||
* for main meory hence limit it to the required size
|
||||
* for main memory
|
||||
*/
|
||||
*src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ);
|
||||
} else { /* ccs handling is not required */
|
||||
*src_sz = CHUNK_SZ;
|
||||
}
|
||||
if (ccs_bytes_to_cpy && !src_is_lmem)
|
||||
/*
|
||||
* When CHUNK_SZ is passed all the pages upto CHUNK_SZ
|
||||
* will be taken for the blt. in Flat-ccs supported
|
||||
* platform Smem obj will have more pages than required
|
||||
* for main meory hence limit it to the required size
|
||||
* for main memory
|
||||
*/
|
||||
return min_t(u64, bytes_to_cpy, CHUNK_SZ);
|
||||
else
|
||||
return CHUNK_SZ;
|
||||
}
|
||||
|
||||
static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy)
|
||||
static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
|
||||
{
|
||||
u32 len;
|
||||
u64 len;
|
||||
|
||||
do {
|
||||
GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
|
||||
@ -702,12 +671,12 @@ intel_context_migrate_copy(struct intel_context *ce,
|
||||
{
|
||||
struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
|
||||
struct drm_i915_private *i915 = ce->engine->i915;
|
||||
u32 ccs_bytes_to_cpy = 0, bytes_to_cpy;
|
||||
u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
|
||||
enum i915_cache_level ccs_cache_level;
|
||||
u32 src_offset, dst_offset;
|
||||
u8 src_access, dst_access;
|
||||
struct i915_request *rq;
|
||||
int src_sz, dst_sz;
|
||||
u64 src_sz, dst_sz;
|
||||
bool ccs_is_src, overwrite_ccs;
|
||||
int err;
|
||||
|
||||
@ -790,8 +759,8 @@ intel_context_migrate_copy(struct intel_context *ce,
|
||||
if (err)
|
||||
goto out_rq;
|
||||
|
||||
calculate_chunk_sz(i915, src_is_lmem, &src_sz,
|
||||
bytes_to_cpy, ccs_bytes_to_cpy);
|
||||
src_sz = calculate_chunk_sz(i915, src_is_lmem,
|
||||
bytes_to_cpy, ccs_bytes_to_cpy);
|
||||
|
||||
len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
|
||||
src_offset, src_sz);
|
||||
|
@ -1281,9 +1281,6 @@ static void intel_gt_reset_global(struct intel_gt *gt,
|
||||
intel_wedge_on_timeout(&w, gt, 5 * HZ) {
|
||||
intel_display_prepare_reset(gt->i915);
|
||||
|
||||
/* Flush everyone using a resource about to be clobbered */
|
||||
synchronize_srcu_expedited(>->reset.backoff_srcu);
|
||||
|
||||
intel_gt_reset(gt, engine_mask, reason);
|
||||
|
||||
intel_display_finish_reset(gt->i915);
|
||||
@ -1392,6 +1389,9 @@ void intel_gt_handle_error(struct intel_gt *gt,
|
||||
}
|
||||
}
|
||||
|
||||
/* Flush everyone using a resource about to be clobbered */
|
||||
synchronize_srcu_expedited(>->reset.backoff_srcu);
|
||||
|
||||
intel_gt_reset_global(gt, engine_mask, msg);
|
||||
|
||||
if (!intel_uc_uses_guc_submission(>->uc)) {
|
||||
|
@ -1979,7 +1979,9 @@ void intel_rps_init(struct intel_rps *rps)
|
||||
|
||||
/* Derive initial user preferences/limits from the hardware limits */
|
||||
rps->max_freq_softlimit = rps->max_freq;
|
||||
rps_to_gt(rps)->defaults.max_freq = rps->max_freq_softlimit;
|
||||
rps->min_freq_softlimit = rps->min_freq;
|
||||
rps_to_gt(rps)->defaults.min_freq = rps->min_freq_softlimit;
|
||||
|
||||
/* After setting max-softlimit, find the overclock max freq */
|
||||
if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
|
||||
|
@ -568,6 +568,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
||||
static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
|
||||
struct i915_wa_list *wal)
|
||||
{
|
||||
wa_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
|
||||
wa_write_clr_set(wal, GEN11_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
|
||||
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
|
||||
wa_add(wal,
|
||||
@ -2102,13 +2103,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
/* Wa_1509235366:dg2 */
|
||||
wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
|
||||
GLOBAL_INVALIDATION_MODE);
|
||||
|
||||
/*
|
||||
* The following are not actually "workarounds" but rather
|
||||
* recommended tuning settings documented in the bspec's
|
||||
* performance guide section.
|
||||
*/
|
||||
wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
|
||||
}
|
||||
|
||||
if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
|
||||
@ -2119,6 +2113,13 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
|
||||
}
|
||||
|
||||
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
|
||||
IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
|
||||
/* Wa_1509727124:dg2 */
|
||||
wa_masked_en(wal, GEN10_SAMPLER_MODE,
|
||||
SC_DISABLE_POWER_OPTIMIZATION_EBB);
|
||||
}
|
||||
|
||||
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
|
||||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
|
||||
/* Wa_14012419201:dg2 */
|
||||
@ -2195,15 +2196,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
|
||||
}
|
||||
|
||||
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) ||
|
||||
IS_DG2_G11(i915)) {
|
||||
/* Wa_22012654132:dg2 */
|
||||
wa_add(wal, GEN10_CACHE_MODE_SS, 0,
|
||||
_MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
|
||||
0 /* write-only, so skip validation */,
|
||||
true);
|
||||
}
|
||||
|
||||
/* Wa_14013202645:dg2 */
|
||||
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
|
||||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
|
||||
@ -2669,6 +2661,49 @@ ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The bspec performance guide has recommended MMIO tuning settings. These
|
||||
* aren't truly "workarounds" but we want to program them with the same
|
||||
* workaround infrastructure to ensure that they're automatically added to
|
||||
* the GuC save/restore lists, re-applied at the right times, and checked for
|
||||
* any conflicting programming requested by real workarounds.
|
||||
*
|
||||
* Programming settings should be added here only if their registers are not
|
||||
* part of an engine's register state context. If a register is part of a
|
||||
* context, then any tuning settings should be programmed in an appropriate
|
||||
* function invoked by __intel_engine_init_ctx_wa().
|
||||
*/
|
||||
static void
|
||||
add_render_compute_tuning_settings(struct drm_i915_private *i915,
|
||||
struct i915_wa_list *wal)
|
||||
{
|
||||
if (IS_PONTEVECCHIO(i915)) {
|
||||
wa_write(wal, XEHPC_L3SCRUB,
|
||||
SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
|
||||
}
|
||||
|
||||
if (IS_DG2(i915)) {
|
||||
wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
|
||||
wa_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
|
||||
wa_write_clr_set(wal, DRAW_WATERMARK, VERT_WM_VAL,
|
||||
REG_FIELD_PREP(VERT_WM_VAL, 0x3FF));
|
||||
|
||||
/*
|
||||
* This is also listed as Wa_22012654132 for certain DG2
|
||||
* steppings, but the tuning setting programming is a superset
|
||||
* since it applies to all DG2 variants and steppings.
|
||||
*
|
||||
* Note that register 0xE420 is write-only and cannot be read
|
||||
* back for verification on DG2 (due to Wa_14012342262), so
|
||||
* we need to explicitly skip the readback.
|
||||
*/
|
||||
wa_add(wal, GEN10_CACHE_MODE_SS, 0,
|
||||
_MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
|
||||
0 /* write-only, so skip validation */,
|
||||
true);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The workarounds in this function apply to shared registers in
|
||||
* the general render reset domain that aren't tied to a
|
||||
@ -2683,14 +2718,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
if (IS_PONTEVECCHIO(i915)) {
|
||||
/*
|
||||
* The following is not actually a "workaround" but rather
|
||||
* a recommended tuning setting documented in the bspec's
|
||||
* performance guide section.
|
||||
*/
|
||||
wa_write(wal, XEHPC_L3SCRUB, SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
|
||||
add_render_compute_tuning_settings(i915, wal);
|
||||
|
||||
if (IS_PONTEVECCHIO(i915)) {
|
||||
/* Wa_16016694945 */
|
||||
wa_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
|
||||
}
|
||||
|
@ -2077,7 +2077,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
|
||||
goto out;
|
||||
}
|
||||
|
||||
intel_context_set_banned(rq->context);
|
||||
intel_context_ban(rq->context, rq);
|
||||
err = intel_engine_pulse(arg->engine);
|
||||
if (err)
|
||||
goto out;
|
||||
@ -2136,7 +2136,7 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
intel_context_set_banned(rq[1]->context);
|
||||
intel_context_ban(rq[1]->context, rq[1]);
|
||||
err = intel_engine_pulse(arg->engine);
|
||||
if (err)
|
||||
goto out;
|
||||
@ -2219,7 +2219,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
intel_context_set_banned(rq[2]->context);
|
||||
intel_context_ban(rq[2]->context, rq[2]);
|
||||
err = intel_engine_pulse(arg->engine);
|
||||
if (err)
|
||||
goto out;
|
||||
@ -2234,7 +2234,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rq[1]->fence.error != 0) {
|
||||
/*
|
||||
* The behavior between having semaphores and not is different. With
|
||||
* semaphores the subsequent request is on the hardware and not cancelled
|
||||
* while without the request is held in the driver and cancelled.
|
||||
*/
|
||||
if (intel_engine_has_semaphores(rq[1]->engine) &&
|
||||
rq[1]->fence.error != 0) {
|
||||
pr_err("Normal inflight1 request did not complete\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
@ -2282,7 +2288,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
|
||||
goto out;
|
||||
}
|
||||
|
||||
intel_context_set_banned(rq->context);
|
||||
intel_context_ban(rq->context, rq);
|
||||
err = intel_engine_pulse(arg->engine); /* force reset */
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -1302,13 +1302,15 @@ static int igt_reset_wait(void *arg)
|
||||
{
|
||||
struct intel_gt *gt = arg;
|
||||
struct i915_gpu_error *global = >->i915->gpu_error;
|
||||
struct intel_engine_cs *engine = gt->engine[RCS0];
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_request *rq;
|
||||
unsigned int reset_count;
|
||||
struct hang h;
|
||||
long timeout;
|
||||
int err;
|
||||
|
||||
engine = intel_selftest_find_any_engine(gt);
|
||||
|
||||
if (!engine || !intel_engine_can_store_dword(engine))
|
||||
return 0;
|
||||
|
||||
@ -1432,7 +1434,7 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
|
||||
int (*fn)(void *),
|
||||
unsigned int flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = gt->engine[RCS0];
|
||||
struct intel_engine_cs *engine;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct task_struct *tsk = NULL;
|
||||
struct i915_request *rq;
|
||||
@ -1444,6 +1446,8 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
|
||||
if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
|
||||
return 0;
|
||||
|
||||
engine = intel_selftest_find_any_engine(gt);
|
||||
|
||||
if (!engine || !intel_engine_can_store_dword(engine))
|
||||
return 0;
|
||||
|
||||
@ -1819,12 +1823,14 @@ static int igt_handle_error(void *arg)
|
||||
{
|
||||
struct intel_gt *gt = arg;
|
||||
struct i915_gpu_error *global = >->i915->gpu_error;
|
||||
struct intel_engine_cs *engine = gt->engine[RCS0];
|
||||
struct intel_engine_cs *engine;
|
||||
struct hang h;
|
||||
struct i915_request *rq;
|
||||
struct i915_gpu_coredump *error;
|
||||
int err;
|
||||
|
||||
engine = intel_selftest_find_any_engine(gt);
|
||||
|
||||
/* Check that we can issue a global GPU and engine reset */
|
||||
|
||||
if (!intel_has_reset_engine(gt))
|
||||
|
@ -37,6 +37,7 @@
|
||||
* | | | - _`GUC_CTB_STATUS_OVERFLOW` = 1 (head/tail too large) |
|
||||
* | | | - _`GUC_CTB_STATUS_UNDERFLOW` = 2 (truncated message) |
|
||||
* | | | - _`GUC_CTB_STATUS_MISMATCH` = 4 (head/tail modified) |
|
||||
* | | | - _`GUC_CTB_STATUS_UNUSED` = 8 (CTB is not in use) |
|
||||
* +---+-------+--------------------------------------------------------------+
|
||||
* |...| | RESERVED = MBZ |
|
||||
* +---+-------+--------------------------------------------------------------+
|
||||
@ -49,9 +50,10 @@ struct guc_ct_buffer_desc {
|
||||
u32 tail;
|
||||
u32 status;
|
||||
#define GUC_CTB_STATUS_NO_ERROR 0
|
||||
#define GUC_CTB_STATUS_OVERFLOW (1 << 0)
|
||||
#define GUC_CTB_STATUS_UNDERFLOW (1 << 1)
|
||||
#define GUC_CTB_STATUS_MISMATCH (1 << 2)
|
||||
#define GUC_CTB_STATUS_OVERFLOW BIT(0)
|
||||
#define GUC_CTB_STATUS_UNDERFLOW BIT(1)
|
||||
#define GUC_CTB_STATUS_MISMATCH BIT(2)
|
||||
#define GUC_CTB_STATUS_UNUSED BIT(3)
|
||||
u32 reserved[13];
|
||||
} __packed;
|
||||
static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
|
||||
|
@ -389,6 +389,25 @@ void intel_guc_write_params(struct intel_guc *guc)
|
||||
intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
|
||||
}
|
||||
|
||||
void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
intel_wakeref_t wakeref;
|
||||
u32 stamp = 0;
|
||||
u64 ktime;
|
||||
|
||||
intel_device_info_print_runtime(RUNTIME_INFO(gt->i915), p);
|
||||
|
||||
with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
|
||||
stamp = intel_uncore_read(gt->uncore, GUCPMTIMESTAMP);
|
||||
ktime = ktime_get_boottime_ns();
|
||||
|
||||
drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", ktime, ktime);
|
||||
drm_printf(p, "GuC timestamp: 0x%08X [%u]\n", stamp, stamp);
|
||||
drm_printf(p, "CS timestamp frequency: %u Hz, %u ns\n",
|
||||
gt->clock_frequency, gt->clock_period_ns);
|
||||
}
|
||||
|
||||
int intel_guc_init(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
|
@ -464,4 +464,6 @@ void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
|
||||
|
||||
void intel_guc_write_barrier(struct intel_guc *guc);
|
||||
|
||||
void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p);
|
||||
|
||||
#endif
|
||||
|
@ -464,7 +464,11 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
|
||||
}
|
||||
|
||||
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
|
||||
#define LRC_SKIP_SIZE (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE)
|
||||
#define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
|
||||
#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
|
||||
XEHP_LR_HW_CONTEXT_SIZE : \
|
||||
LR_HW_CONTEXT_SIZE)
|
||||
#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
|
||||
static int guc_prep_golden_context(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
@ -525,7 +529,7 @@ static int guc_prep_golden_context(struct intel_guc *guc)
|
||||
* on all engines).
|
||||
*/
|
||||
ads_blob_write(guc, ads.eng_state_size[guc_class],
|
||||
real_size - LRC_SKIP_SIZE);
|
||||
real_size - LRC_SKIP_SIZE(gt->i915));
|
||||
ads_blob_write(guc, ads.golden_context_lrca[guc_class],
|
||||
addr_ggtt);
|
||||
|
||||
@ -599,7 +603,7 @@ static void guc_init_golden_context(struct intel_guc *guc)
|
||||
}
|
||||
|
||||
GEM_BUG_ON(ads_blob_read(guc, ads.eng_state_size[guc_class]) !=
|
||||
real_size - LRC_SKIP_SIZE);
|
||||
real_size - LRC_SKIP_SIZE(gt->i915));
|
||||
GEM_BUG_ON(ads_blob_read(guc, ads.golden_context_lrca[guc_class]) != addr_ggtt);
|
||||
|
||||
addr_ggtt += alloc_size;
|
||||
|
@ -600,10 +600,8 @@ intel_guc_capture_getnullheader(struct intel_guc *guc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
|
||||
|
||||
int
|
||||
intel_guc_capture_output_min_size_est(struct intel_guc *guc)
|
||||
static int
|
||||
guc_capture_output_min_size_est(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
struct intel_engine_cs *engine;
|
||||
@ -623,13 +621,8 @@ intel_guc_capture_output_min_size_est(struct intel_guc *guc)
|
||||
* For each engine instance, there would be 1 x guc_state_capture_group_t output
|
||||
* followed by 3 x guc_state_capture_t lists. The latter is how the register
|
||||
* dumps are split across different register types (where the '3' are global vs class
|
||||
* vs instance). Finally, let's multiply the whole thing by 3x (just so we are
|
||||
* not limited to just 1 round of data in a worst case full register dump log)
|
||||
*
|
||||
* NOTE: intel_guc_log that allocates the log buffer would round this size up to
|
||||
* a power of two.
|
||||
* vs instance).
|
||||
*/
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
|
||||
(3 * sizeof(struct guc_state_capture_header_t));
|
||||
@ -649,7 +642,30 @@ intel_guc_capture_output_min_size_est(struct intel_guc *guc)
|
||||
|
||||
worst_min_size += (num_regs * sizeof(struct guc_mmio_reg));
|
||||
|
||||
return (worst_min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER);
|
||||
return worst_min_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add on a 3x multiplier to allow for multiple back-to-back captures occurring
|
||||
* before the i915 can read the data out and process it
|
||||
*/
|
||||
#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
|
||||
|
||||
static void check_guc_capture_size(struct intel_guc *guc)
|
||||
{
|
||||
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
|
||||
int min_size = guc_capture_output_min_size_est(guc);
|
||||
int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
|
||||
|
||||
if (min_size < 0)
|
||||
drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
|
||||
min_size);
|
||||
else if (min_size > CAPTURE_BUFFER_SIZE)
|
||||
drm_warn(&i915->drm, "GuC error state capture buffer is too small: %d < %d\n",
|
||||
CAPTURE_BUFFER_SIZE, min_size);
|
||||
else if (spare_size > CAPTURE_BUFFER_SIZE)
|
||||
drm_notice(&i915->drm, "GuC error state capture buffer maybe too small: %d < %d (min = %d)\n",
|
||||
CAPTURE_BUFFER_SIZE, spare_size, min_size);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1580,5 +1596,7 @@ int intel_guc_capture_init(struct intel_guc *guc)
|
||||
INIT_LIST_HEAD(&guc->capture->outlist);
|
||||
INIT_LIST_HEAD(&guc->capture->cachelist);
|
||||
|
||||
check_guc_capture_size(guc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *m,
|
||||
void intel_guc_capture_get_matching_node(struct intel_gt *gt, struct intel_engine_coredump *ee,
|
||||
struct intel_context *ce);
|
||||
void intel_guc_capture_process(struct intel_guc *guc);
|
||||
int intel_guc_capture_output_min_size_est(struct intel_guc *guc);
|
||||
int intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
|
||||
void **outptr);
|
||||
int intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
|
||||
|
@ -455,6 +455,7 @@ corrupted:
|
||||
|
||||
/**
|
||||
* wait_for_ct_request_update - Wait for CT request state update.
|
||||
* @ct: pointer to CT
|
||||
* @req: pointer to pending request
|
||||
* @status: placeholder for status
|
||||
*
|
||||
@ -467,9 +468,10 @@ corrupted:
|
||||
* * 0 response received (status is valid)
|
||||
* * -ETIMEDOUT no response within hardcoded timeout
|
||||
*/
|
||||
static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
|
||||
static int wait_for_ct_request_update(struct intel_guc_ct *ct, struct ct_request *req, u32 *status)
|
||||
{
|
||||
int err;
|
||||
bool ct_enabled;
|
||||
|
||||
/*
|
||||
* Fast commands should complete in less than 10us, so sample quickly
|
||||
@ -481,12 +483,15 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
|
||||
#define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10
|
||||
#define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000
|
||||
#define done \
|
||||
(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
|
||||
(!(ct_enabled = intel_guc_ct_enabled(ct)) || \
|
||||
FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
|
||||
GUC_HXG_ORIGIN_GUC)
|
||||
err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS);
|
||||
if (err)
|
||||
err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
|
||||
#undef done
|
||||
if (!ct_enabled)
|
||||
err = -ENODEV;
|
||||
|
||||
*status = req->status;
|
||||
return err;
|
||||
@ -703,11 +708,18 @@ retry:
|
||||
|
||||
intel_guc_notify(ct_to_guc(ct));
|
||||
|
||||
err = wait_for_ct_request_update(&request, status);
|
||||
err = wait_for_ct_request_update(ct, &request, status);
|
||||
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
|
||||
if (unlikely(err)) {
|
||||
CT_ERROR(ct, "No response for request %#x (fence %u)\n",
|
||||
action[0], request.fence);
|
||||
if (err == -ENODEV)
|
||||
/* wait_for_ct_request_update returns -ENODEV on reset/suspend in progress.
|
||||
* In this case, output is debug rather than error info
|
||||
*/
|
||||
CT_DEBUG(ct, "Request %#x (fence %u) cancelled as CTB is disabled\n",
|
||||
action[0], request.fence);
|
||||
else
|
||||
CT_ERROR(ct, "No response for request %#x (fence %u)\n",
|
||||
action[0], request.fence);
|
||||
goto unlink;
|
||||
}
|
||||
|
||||
@ -771,8 +783,9 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
|
||||
|
||||
ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
|
||||
if (unlikely(ret < 0)) {
|
||||
CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
|
||||
action[0], ERR_PTR(ret), status);
|
||||
if (ret != -ENODEV)
|
||||
CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
|
||||
action[0], ERR_PTR(ret), status);
|
||||
} else if (unlikely(ret)) {
|
||||
CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
|
||||
action[0], ret, ret);
|
||||
@ -816,8 +829,22 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
|
||||
if (unlikely(ctb->broken))
|
||||
return -EPIPE;
|
||||
|
||||
if (unlikely(desc->status))
|
||||
goto corrupted;
|
||||
if (unlikely(desc->status)) {
|
||||
u32 status = desc->status;
|
||||
|
||||
if (status & GUC_CTB_STATUS_UNUSED) {
|
||||
/*
|
||||
* Potentially valid if a CLIENT_RESET request resulted in
|
||||
* contexts/engines being reset. But should never happen as
|
||||
* no contexts should be active when CLIENT_RESET is sent.
|
||||
*/
|
||||
CT_ERROR(ct, "Unexpected G2H after GuC has stopped!\n");
|
||||
status &= ~GUC_CTB_STATUS_UNUSED;
|
||||
}
|
||||
|
||||
if (status)
|
||||
goto corrupted;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(head > size);
|
||||
|
||||
|
@ -15,6 +15,32 @@
|
||||
|
||||
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
|
||||
|
||||
static u32 intel_guc_log_size(struct intel_guc_log *log)
|
||||
{
|
||||
/*
|
||||
* GuC Log buffer Layout:
|
||||
*
|
||||
* NB: Ordering must follow "enum guc_log_buffer_type".
|
||||
*
|
||||
* +===============================+ 00B
|
||||
* | Debug state header |
|
||||
* +-------------------------------+ 32B
|
||||
* | Crash dump state header |
|
||||
* +-------------------------------+ 64B
|
||||
* | Capture state header |
|
||||
* +-------------------------------+ 96B
|
||||
* | |
|
||||
* +===============================+ PAGE_SIZE (4KB)
|
||||
* | Debug logs |
|
||||
* +===============================+ + DEBUG_SIZE
|
||||
* | Crash Dump logs |
|
||||
* +===============================+ + CRASH_SIZE
|
||||
* | Capture logs |
|
||||
* +===============================+ + CAPTURE_SIZE
|
||||
*/
|
||||
return PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE + CAPTURE_BUFFER_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: GuC firmware log
|
||||
*
|
||||
@ -461,32 +487,7 @@ int intel_guc_log_create(struct intel_guc_log *log)
|
||||
|
||||
GEM_BUG_ON(log->vma);
|
||||
|
||||
/*
|
||||
* GuC Log buffer Layout
|
||||
* (this ordering must follow "enum guc_log_buffer_type" definition)
|
||||
*
|
||||
* +===============================+ 00B
|
||||
* | Debug state header |
|
||||
* +-------------------------------+ 32B
|
||||
* | Crash dump state header |
|
||||
* +-------------------------------+ 64B
|
||||
* | Capture state header |
|
||||
* +-------------------------------+ 96B
|
||||
* | |
|
||||
* +===============================+ PAGE_SIZE (4KB)
|
||||
* | Debug logs |
|
||||
* +===============================+ + DEBUG_SIZE
|
||||
* | Crash Dump logs |
|
||||
* +===============================+ + CRASH_SIZE
|
||||
* | Capture logs |
|
||||
* +===============================+ + CAPTURE_SIZE
|
||||
*/
|
||||
if (intel_guc_capture_output_min_size_est(guc) > CAPTURE_BUFFER_SIZE)
|
||||
DRM_WARN("GuC log buffer for state_capture maybe too small. %d < %d\n",
|
||||
CAPTURE_BUFFER_SIZE, intel_guc_capture_output_min_size_est(guc));
|
||||
|
||||
guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE +
|
||||
CAPTURE_BUFFER_SIZE;
|
||||
guc_log_size = intel_guc_log_size(log);
|
||||
|
||||
vma = intel_guc_allocate_vma(guc, guc_log_size);
|
||||
if (IS_ERR(vma)) {
|
||||
@ -749,8 +750,9 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
|
||||
struct intel_guc *guc = log_to_guc(log);
|
||||
struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
|
||||
struct drm_i915_gem_object *obj = NULL;
|
||||
u32 *map;
|
||||
int i = 0;
|
||||
void *map;
|
||||
u32 *page;
|
||||
int i, j;
|
||||
|
||||
if (!intel_guc_is_supported(guc))
|
||||
return -ENODEV;
|
||||
@ -763,21 +765,34 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
|
||||
if (!obj)
|
||||
return 0;
|
||||
|
||||
page = (u32 *)__get_free_page(GFP_KERNEL);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
intel_guc_dump_time_info(guc, p);
|
||||
|
||||
map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
|
||||
if (IS_ERR(map)) {
|
||||
DRM_DEBUG("Failed to pin object\n");
|
||||
drm_puts(p, "(log data unaccessible)\n");
|
||||
free_page((unsigned long)page);
|
||||
return PTR_ERR(map);
|
||||
}
|
||||
|
||||
for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
|
||||
drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
|
||||
*(map + i), *(map + i + 1),
|
||||
*(map + i + 2), *(map + i + 3));
|
||||
for (i = 0; i < obj->base.size; i += PAGE_SIZE) {
|
||||
if (!i915_memcpy_from_wc(page, map + i, PAGE_SIZE))
|
||||
memcpy(page, map + i, PAGE_SIZE);
|
||||
|
||||
for (j = 0; j < PAGE_SIZE / sizeof(u32); j += 4)
|
||||
drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
|
||||
*(page + j + 0), *(page + j + 1),
|
||||
*(page + j + 2), *(page + j + 3));
|
||||
}
|
||||
|
||||
drm_puts(p, "\n");
|
||||
|
||||
i915_gem_object_unpin_map(obj);
|
||||
free_page((unsigned long)page);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -22,11 +22,11 @@ struct intel_guc;
|
||||
#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
|
||||
#define CRASH_BUFFER_SIZE SZ_1M
|
||||
#define DEBUG_BUFFER_SIZE SZ_2M
|
||||
#define CAPTURE_BUFFER_SIZE SZ_1M
|
||||
#define CAPTURE_BUFFER_SIZE SZ_4M
|
||||
#else
|
||||
#define CRASH_BUFFER_SIZE SZ_8K
|
||||
#define DEBUG_BUFFER_SIZE SZ_64K
|
||||
#define CAPTURE_BUFFER_SIZE SZ_16K
|
||||
#define CAPTURE_BUFFER_SIZE SZ_2M
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -102,6 +102,10 @@
|
||||
#define GUC_SEND_TRIGGER (1<<0)
|
||||
#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0)
|
||||
|
||||
#define GEN12_GUC_SEM_INTR_ENABLES _MMIO(0xc71c)
|
||||
#define GUC_SEM_INTR_ROUTE_TO_GUC BIT(31)
|
||||
#define GUC_SEM_INTR_ENABLE_ALL (0xff)
|
||||
|
||||
#define GUC_NUM_DOORBELLS 256
|
||||
|
||||
/* format of the HW-monitored doorbell cacheline */
|
||||
|
@ -575,20 +575,24 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
|
||||
* unless they have deviated from defaults, in which case,
|
||||
* we retain the values and set min/max accordingly.
|
||||
*/
|
||||
if (!slpc->max_freq_softlimit)
|
||||
if (!slpc->max_freq_softlimit) {
|
||||
slpc->max_freq_softlimit = slpc->rp0_freq;
|
||||
else if (slpc->max_freq_softlimit != slpc->rp0_freq)
|
||||
slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit;
|
||||
} else if (slpc->max_freq_softlimit != slpc->rp0_freq) {
|
||||
ret = intel_guc_slpc_set_max_freq(slpc,
|
||||
slpc->max_freq_softlimit);
|
||||
}
|
||||
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
if (!slpc->min_freq_softlimit)
|
||||
if (!slpc->min_freq_softlimit) {
|
||||
slpc->min_freq_softlimit = slpc->min_freq;
|
||||
else if (slpc->min_freq_softlimit != slpc->min_freq)
|
||||
slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
|
||||
} else if (slpc->min_freq_softlimit != slpc->min_freq) {
|
||||
return intel_guc_slpc_set_min_freq(slpc,
|
||||
slpc->min_freq_softlimit);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2420,7 +2420,6 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
|
||||
struct context_policy policy;
|
||||
u32 execution_quantum;
|
||||
u32 preemption_timeout;
|
||||
bool missing = false;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
@ -2438,32 +2437,9 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
|
||||
__guc_context_policy_add_preempt_to_idle(&policy, 1);
|
||||
|
||||
ret = __guc_context_set_context_policies(guc, &policy, loop);
|
||||
missing = ret != 0;
|
||||
|
||||
if (!missing && intel_context_is_parent(ce)) {
|
||||
struct intel_context *child;
|
||||
|
||||
for_each_child(ce, child) {
|
||||
__guc_context_policy_start_klv(&policy, child->guc_id.id);
|
||||
|
||||
if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
|
||||
__guc_context_policy_add_preempt_to_idle(&policy, 1);
|
||||
|
||||
child->guc_state.prio = ce->guc_state.prio;
|
||||
__guc_context_policy_add_priority(&policy, ce->guc_state.prio);
|
||||
__guc_context_policy_add_execution_quantum(&policy, execution_quantum);
|
||||
__guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
|
||||
|
||||
ret = __guc_context_set_context_policies(guc, &policy, loop);
|
||||
if (ret) {
|
||||
missing = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ce->guc_state.lock, flags);
|
||||
if (missing)
|
||||
if (ret != 0)
|
||||
set_context_policy_required(ce);
|
||||
else
|
||||
clr_context_policy_required(ce);
|
||||
@ -4191,13 +4167,27 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
|
||||
|
||||
void intel_guc_submission_enable(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
|
||||
/* Enable and route to GuC */
|
||||
if (GRAPHICS_VER(gt->i915) >= 12)
|
||||
intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES,
|
||||
GUC_SEM_INTR_ROUTE_TO_GUC |
|
||||
GUC_SEM_INTR_ENABLE_ALL);
|
||||
|
||||
guc_init_lrc_mapping(guc);
|
||||
guc_init_engine_stats(guc);
|
||||
}
|
||||
|
||||
void intel_guc_submission_disable(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
|
||||
/* Note: By the time we're here, GuC may have already been reset */
|
||||
|
||||
/* Disable and route to host */
|
||||
if (GRAPHICS_VER(gt->i915) >= 12)
|
||||
intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, 0x0);
|
||||
}
|
||||
|
||||
static bool __guc_submission_supported(struct intel_guc *guc)
|
||||
@ -5163,4 +5153,5 @@ bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftest_guc.c"
|
||||
#include "selftest_guc_multi_lrc.c"
|
||||
#include "selftest_guc_hangcheck.c"
|
||||
#endif
|
||||
|
@ -53,7 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
|
||||
* firmware as TGL.
|
||||
*/
|
||||
#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
|
||||
fw_def(DG2, 0, guc_def(dg2, 70, 1, 2)) \
|
||||
fw_def(DG2, 0, guc_def(dg2, 70, 4, 1)) \
|
||||
fw_def(ALDERLAKE_P, 0, guc_def(adlp, 70, 1, 1)) \
|
||||
fw_def(ALDERLAKE_S, 0, guc_def(tgl, 70, 1, 1)) \
|
||||
fw_def(DG1, 0, guc_def(dg1, 70, 1, 1)) \
|
||||
@ -220,11 +220,11 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
|
||||
fw_blobs[i].rev < fw_blobs[i - 1].rev)
|
||||
continue;
|
||||
|
||||
pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
|
||||
intel_platform_name(fw_blobs[i - 1].p),
|
||||
fw_blobs[i - 1].rev,
|
||||
intel_platform_name(fw_blobs[i].p),
|
||||
fw_blobs[i].rev);
|
||||
drm_err(&i915->drm, "Invalid FW blob order: %s r%u comes before %s r%u\n",
|
||||
intel_platform_name(fw_blobs[i - 1].p),
|
||||
fw_blobs[i - 1].rev,
|
||||
intel_platform_name(fw_blobs[i].p),
|
||||
fw_blobs[i].rev);
|
||||
|
||||
uc_fw->path = NULL;
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ static int intel_guc_scrub_ctbs(void *arg)
|
||||
ce = intel_context_create(engine);
|
||||
if (IS_ERR(ce)) {
|
||||
ret = PTR_ERR(ce);
|
||||
pr_err("Failed to create context, %d: %d\n", i, ret);
|
||||
drm_err(>->i915->drm, "Failed to create context, %d: %d\n", i, ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -83,7 +83,7 @@ static int intel_guc_scrub_ctbs(void *arg)
|
||||
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
pr_err("Failed to create request, %d: %d\n", i, ret);
|
||||
drm_err(>->i915->drm, "Failed to create request, %d: %d\n", i, ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -93,7 +93,7 @@ static int intel_guc_scrub_ctbs(void *arg)
|
||||
for (i = 0; i < 3; ++i) {
|
||||
ret = i915_request_wait(last[i], 0, HZ);
|
||||
if (ret < 0) {
|
||||
pr_err("Last request failed to complete: %d\n", ret);
|
||||
drm_err(>->i915->drm, "Last request failed to complete: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
i915_request_put(last[i]);
|
||||
@ -110,7 +110,7 @@ static int intel_guc_scrub_ctbs(void *arg)
|
||||
/* GT will not idle if G2H are lost */
|
||||
ret = intel_gt_wait_for_idle(gt, HZ);
|
||||
if (ret < 0) {
|
||||
pr_err("GT failed to idle: %d\n", ret);
|
||||
drm_err(>->i915->drm, "GT failed to idle: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -150,7 +150,7 @@ static int intel_guc_steal_guc_ids(void *arg)
|
||||
|
||||
ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
|
||||
if (!ce) {
|
||||
pr_err("Context array allocation failed\n");
|
||||
drm_err(>->i915->drm, "Context array allocation failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -164,24 +164,24 @@ static int intel_guc_steal_guc_ids(void *arg)
|
||||
if (IS_ERR(ce[context_index])) {
|
||||
ret = PTR_ERR(ce[context_index]);
|
||||
ce[context_index] = NULL;
|
||||
pr_err("Failed to create context: %d\n", ret);
|
||||
drm_err(>->i915->drm, "Failed to create context: %d\n", ret);
|
||||
goto err_wakeref;
|
||||
}
|
||||
ret = igt_spinner_init(&spin, engine->gt);
|
||||
if (ret) {
|
||||
pr_err("Failed to create spinner: %d\n", ret);
|
||||
drm_err(>->i915->drm, "Failed to create spinner: %d\n", ret);
|
||||
goto err_contexts;
|
||||
}
|
||||
spin_rq = igt_spinner_create_request(&spin, ce[context_index],
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(spin_rq)) {
|
||||
ret = PTR_ERR(spin_rq);
|
||||
pr_err("Failed to create spinner request: %d\n", ret);
|
||||
drm_err(>->i915->drm, "Failed to create spinner request: %d\n", ret);
|
||||
goto err_contexts;
|
||||
}
|
||||
ret = request_add_spin(spin_rq, &spin);
|
||||
if (ret) {
|
||||
pr_err("Failed to add Spinner request: %d\n", ret);
|
||||
drm_err(>->i915->drm, "Failed to add Spinner request: %d\n", ret);
|
||||
goto err_spin_rq;
|
||||
}
|
||||
|
||||
@ -191,7 +191,7 @@ static int intel_guc_steal_guc_ids(void *arg)
|
||||
if (IS_ERR(ce[context_index])) {
|
||||
ret = PTR_ERR(ce[context_index--]);
|
||||
ce[context_index] = NULL;
|
||||
pr_err("Failed to create context: %d\n", ret);
|
||||
drm_err(>->i915->drm, "Failed to create context: %d\n", ret);
|
||||
goto err_spin_rq;
|
||||
}
|
||||
|
||||
@ -200,8 +200,8 @@ static int intel_guc_steal_guc_ids(void *arg)
|
||||
ret = PTR_ERR(rq);
|
||||
rq = NULL;
|
||||
if (ret != -EAGAIN) {
|
||||
pr_err("Failed to create request, %d: %d\n",
|
||||
context_index, ret);
|
||||
drm_err(>->i915->drm, "Failed to create request, %d: %d\n",
|
||||
context_index, ret);
|
||||
goto err_spin_rq;
|
||||
}
|
||||
} else {
|
||||
@ -215,7 +215,7 @@ static int intel_guc_steal_guc_ids(void *arg)
|
||||
igt_spinner_end(&spin);
|
||||
ret = intel_selftest_wait_for_rq(spin_rq);
|
||||
if (ret) {
|
||||
pr_err("Spin request failed to complete: %d\n", ret);
|
||||
drm_err(>->i915->drm, "Spin request failed to complete: %d\n", ret);
|
||||
i915_request_put(last);
|
||||
goto err_spin_rq;
|
||||
}
|
||||
@ -227,7 +227,7 @@ static int intel_guc_steal_guc_ids(void *arg)
|
||||
ret = i915_request_wait(last, 0, HZ * 30);
|
||||
i915_request_put(last);
|
||||
if (ret < 0) {
|
||||
pr_err("Last request failed to complete: %d\n", ret);
|
||||
drm_err(>->i915->drm, "Last request failed to complete: %d\n", ret);
|
||||
goto err_spin_rq;
|
||||
}
|
||||
|
||||
@ -235,7 +235,7 @@ static int intel_guc_steal_guc_ids(void *arg)
|
||||
rq = nop_user_request(ce[context_index], NULL);
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
pr_err("Failed to steal guc_id, %d: %d\n", context_index, ret);
|
||||
drm_err(>->i915->drm, "Failed to steal guc_id, %d: %d\n", context_index, ret);
|
||||
goto err_spin_rq;
|
||||
}
|
||||
|
||||
@ -243,21 +243,20 @@ static int intel_guc_steal_guc_ids(void *arg)
|
||||
ret = i915_request_wait(rq, 0, HZ);
|
||||
i915_request_put(rq);
|
||||
if (ret < 0) {
|
||||
pr_err("Request with stolen guc_id failed to complete: %d\n",
|
||||
ret);
|
||||
drm_err(>->i915->drm, "Request with stolen guc_id failed to complete: %d\n", ret);
|
||||
goto err_spin_rq;
|
||||
}
|
||||
|
||||
/* Wait for idle */
|
||||
ret = intel_gt_wait_for_idle(gt, HZ * 30);
|
||||
if (ret < 0) {
|
||||
pr_err("GT failed to idle: %d\n", ret);
|
||||
drm_err(>->i915->drm, "GT failed to idle: %d\n", ret);
|
||||
goto err_spin_rq;
|
||||
}
|
||||
|
||||
/* Verify a guc_id was stolen */
|
||||
if (guc->number_guc_id_stolen == number_guc_id_stolen) {
|
||||
pr_err("No guc_id was stolen");
|
||||
drm_err(>->i915->drm, "No guc_id was stolen");
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
ret = 0;
|
||||
|
159
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
Normal file
159
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
Normal file
@ -0,0 +1,159 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2022 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "selftests/igt_spinner.h"
|
||||
#include "selftests/igt_reset.h"
|
||||
#include "selftests/intel_scheduler_helpers.h"
|
||||
#include "gt/intel_engine_heartbeat.h"
|
||||
#include "gem/selftests/mock_context.h"
|
||||
|
||||
#define BEAT_INTERVAL 100
|
||||
|
||||
static struct i915_request *nop_request(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
|
||||
rq = intel_engine_create_kernel_request(engine);
|
||||
if (IS_ERR(rq))
|
||||
return rq;
|
||||
|
||||
i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
static int intel_hang_guc(void *arg)
|
||||
{
|
||||
struct intel_gt *gt = arg;
|
||||
int ret = 0;
|
||||
struct i915_gem_context *ctx;
|
||||
struct intel_context *ce;
|
||||
struct igt_spinner spin;
|
||||
struct i915_request *rq;
|
||||
intel_wakeref_t wakeref;
|
||||
struct i915_gpu_error *global = >->i915->gpu_error;
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned int reset_count;
|
||||
u32 guc_status;
|
||||
u32 old_beat;
|
||||
|
||||
ctx = kernel_context(gt->i915, NULL);
|
||||
if (IS_ERR(ctx)) {
|
||||
drm_err(>->i915->drm, "Failed get kernel context: %ld\n", PTR_ERR(ctx));
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
|
||||
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
|
||||
|
||||
ce = intel_context_create(gt->engine[BCS0]);
|
||||
if (IS_ERR(ce)) {
|
||||
ret = PTR_ERR(ce);
|
||||
drm_err(>->i915->drm, "Failed to create spinner request: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
engine = ce->engine;
|
||||
reset_count = i915_reset_count(global);
|
||||
|
||||
old_beat = engine->props.heartbeat_interval_ms;
|
||||
ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
|
||||
if (ret) {
|
||||
drm_err(>->i915->drm, "Failed to boost heatbeat interval: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = igt_spinner_init(&spin, engine->gt);
|
||||
if (ret) {
|
||||
drm_err(>->i915->drm, "Failed to create spinner: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
|
||||
intel_context_put(ce);
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
drm_err(>->i915->drm, "Failed to create spinner request: %d\n", ret);
|
||||
goto err_spin;
|
||||
}
|
||||
|
||||
ret = request_add_spin(rq, &spin);
|
||||
if (ret) {
|
||||
i915_request_put(rq);
|
||||
drm_err(>->i915->drm, "Failed to add Spinner request: %d\n", ret);
|
||||
goto err_spin;
|
||||
}
|
||||
|
||||
ret = intel_reset_guc(gt);
|
||||
if (ret) {
|
||||
i915_request_put(rq);
|
||||
drm_err(>->i915->drm, "Failed to reset GuC, ret = %d\n", ret);
|
||||
goto err_spin;
|
||||
}
|
||||
|
||||
guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
|
||||
if (!(guc_status & GS_MIA_IN_RESET)) {
|
||||
i915_request_put(rq);
|
||||
drm_err(>->i915->drm, "GuC failed to reset: status = 0x%08X\n", guc_status);
|
||||
ret = -EIO;
|
||||
goto err_spin;
|
||||
}
|
||||
|
||||
/* Wait for the heartbeat to cause a reset */
|
||||
ret = intel_selftest_wait_for_rq(rq);
|
||||
i915_request_put(rq);
|
||||
if (ret) {
|
||||
drm_err(>->i915->drm, "Request failed to complete: %d\n", ret);
|
||||
goto err_spin;
|
||||
}
|
||||
|
||||
if (i915_reset_count(global) == reset_count) {
|
||||
drm_err(>->i915->drm, "Failed to record a GPU reset\n");
|
||||
ret = -EINVAL;
|
||||
goto err_spin;
|
||||
}
|
||||
|
||||
err_spin:
|
||||
igt_spinner_end(&spin);
|
||||
igt_spinner_fini(&spin);
|
||||
intel_engine_set_heartbeat(engine, old_beat);
|
||||
|
||||
if (ret == 0) {
|
||||
rq = nop_request(engine);
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = intel_selftest_wait_for_rq(rq);
|
||||
i915_request_put(rq);
|
||||
if (ret) {
|
||||
drm_err(>->i915->drm, "No-op failed to complete: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
err:
|
||||
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
|
||||
kernel_context_close(ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int intel_guc_hang_check(struct drm_i915_private *i915)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(intel_hang_guc),
|
||||
};
|
||||
struct intel_gt *gt = to_gt(i915);
|
||||
|
||||
if (intel_gt_is_wedged(gt))
|
||||
return 0;
|
||||
|
||||
if (!intel_uc_uses_guc_submission(>->uc))
|
||||
return 0;
|
||||
|
||||
return intel_gt_live_subtests(tests, gt);
|
||||
}
|
@ -115,30 +115,30 @@ static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class)
|
||||
|
||||
parent = multi_lrc_create_parent(gt, class, 0);
|
||||
if (IS_ERR(parent)) {
|
||||
pr_err("Failed creating contexts: %ld", PTR_ERR(parent));
|
||||
drm_err(>->i915->drm, "Failed creating contexts: %ld", PTR_ERR(parent));
|
||||
return PTR_ERR(parent);
|
||||
} else if (!parent) {
|
||||
pr_debug("Not enough engines in class: %d", class);
|
||||
drm_dbg(>->i915->drm, "Not enough engines in class: %d", class);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rq = multi_lrc_nop_request(parent);
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
pr_err("Failed creating requests: %d", ret);
|
||||
drm_err(>->i915->drm, "Failed creating requests: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = intel_selftest_wait_for_rq(rq);
|
||||
if (ret)
|
||||
pr_err("Failed waiting on request: %d", ret);
|
||||
drm_err(>->i915->drm, "Failed waiting on request: %d", ret);
|
||||
|
||||
i915_request_put(rq);
|
||||
|
||||
if (ret >= 0) {
|
||||
ret = intel_gt_wait_for_idle(gt, HZ * 5);
|
||||
if (ret < 0)
|
||||
pr_err("GT failed to idle: %d\n", ret);
|
||||
drm_err(>->i915->drm, "GT failed to idle: %d\n", ret);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -671,6 +671,18 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m,
|
||||
pdev->subsystem_device);
|
||||
}
|
||||
|
||||
static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
|
||||
const char *name,
|
||||
const struct intel_ctb_coredump *ctb)
|
||||
{
|
||||
if (!ctb->size)
|
||||
return;
|
||||
|
||||
err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n",
|
||||
name, ctb->raw_status, ctb->raw_head, ctb->raw_tail,
|
||||
ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
|
||||
}
|
||||
|
||||
static void err_print_uc(struct drm_i915_error_state_buf *m,
|
||||
const struct intel_uc_coredump *error_uc)
|
||||
{
|
||||
@ -678,7 +690,12 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
|
||||
|
||||
intel_uc_fw_dump(&error_uc->guc_fw, &p);
|
||||
intel_uc_fw_dump(&error_uc->huc_fw, &p);
|
||||
intel_gpu_error_print_vma(m, NULL, error_uc->guc_log);
|
||||
err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
|
||||
intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
|
||||
err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
|
||||
err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
|
||||
err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1);
|
||||
intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb);
|
||||
}
|
||||
|
||||
static void err_free_sgl(struct scatterlist *sgl)
|
||||
@ -720,6 +737,8 @@ static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
|
||||
int i;
|
||||
|
||||
err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
|
||||
err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
|
||||
gt->clock_frequency, gt->clock_period_ns);
|
||||
err_printf(m, "EIR: 0x%08x\n", gt->eir);
|
||||
err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
|
||||
|
||||
@ -851,7 +870,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
|
||||
if (error->gt) {
|
||||
bool print_guc_capture = false;
|
||||
|
||||
if (error->gt->uc && error->gt->uc->is_guc_capture)
|
||||
if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
|
||||
print_guc_capture = true;
|
||||
|
||||
err_print_gt_display(m, error->gt);
|
||||
@ -1006,7 +1025,8 @@ static void cleanup_uc(struct intel_uc_coredump *uc)
|
||||
{
|
||||
kfree(uc->guc_fw.path);
|
||||
kfree(uc->huc_fw.path);
|
||||
i915_vma_coredump_free(uc->guc_log);
|
||||
i915_vma_coredump_free(uc->guc.vma_log);
|
||||
i915_vma_coredump_free(uc->guc.vma_ctb);
|
||||
|
||||
kfree(uc);
|
||||
}
|
||||
@ -1655,6 +1675,23 @@ gt_record_engines(struct intel_gt_coredump *gt,
|
||||
}
|
||||
}
|
||||
|
||||
static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
|
||||
const struct intel_guc_ct_buffer *ctb,
|
||||
const void *blob_ptr, struct intel_guc *guc)
|
||||
{
|
||||
if (!ctb || !ctb->desc)
|
||||
return;
|
||||
|
||||
saved->raw_status = ctb->desc->status;
|
||||
saved->raw_head = ctb->desc->head;
|
||||
saved->raw_tail = ctb->desc->tail;
|
||||
saved->head = ctb->head;
|
||||
saved->tail = ctb->tail;
|
||||
saved->size = ctb->size;
|
||||
saved->desc_offset = ((void *)ctb->desc) - blob_ptr;
|
||||
saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
|
||||
}
|
||||
|
||||
static struct intel_uc_coredump *
|
||||
gt_record_uc(struct intel_gt_coredump *gt,
|
||||
struct i915_vma_compress *compress)
|
||||
@ -1675,8 +1712,22 @@ gt_record_uc(struct intel_gt_coredump *gt,
|
||||
*/
|
||||
error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
|
||||
error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
|
||||
error_uc->guc_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
|
||||
"GuC log buffer", compress);
|
||||
|
||||
/*
|
||||
* Save the GuC log and include a timestamp reference for converting the
|
||||
* log times to system times (in conjunction with the error->boottime and
|
||||
* gt->clock_frequency fields saved elsewhere).
|
||||
*/
|
||||
error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP);
|
||||
error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
|
||||
"GuC log buffer", compress);
|
||||
error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma,
|
||||
"GuC CT buffer", compress);
|
||||
error_uc->guc.last_fence = uc->guc.ct.requests.last_fence;
|
||||
gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send,
|
||||
uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
|
||||
gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
|
||||
uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
|
||||
|
||||
return error_uc;
|
||||
}
|
||||
@ -1833,6 +1884,8 @@ static void gt_record_global_regs(struct intel_gt_coredump *gt)
|
||||
static void gt_record_info(struct intel_gt_coredump *gt)
|
||||
{
|
||||
memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info));
|
||||
gt->clock_frequency = gt->_gt->clock_frequency;
|
||||
gt->clock_period_ns = gt->_gt->clock_period_ns;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2027,9 +2080,9 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du
|
||||
error->gt->uc = gt_record_uc(error->gt, compress);
|
||||
if (error->gt->uc) {
|
||||
if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
|
||||
error->gt->uc->is_guc_capture = true;
|
||||
error->gt->uc->guc.is_guc_capture = true;
|
||||
else
|
||||
GEM_BUG_ON(error->gt->uc->is_guc_capture);
|
||||
GEM_BUG_ON(error->gt->uc->guc.is_guc_capture);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,6 +125,15 @@ struct intel_engine_coredump {
|
||||
struct intel_engine_coredump *next;
|
||||
};
|
||||
|
||||
struct intel_ctb_coredump {
|
||||
u32 raw_head, head;
|
||||
u32 raw_tail, tail;
|
||||
u32 raw_status;
|
||||
u32 desc_offset;
|
||||
u32 cmds_offset;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
struct intel_gt_coredump {
|
||||
const struct intel_gt *_gt;
|
||||
bool awake;
|
||||
@ -150,6 +159,8 @@ struct intel_gt_coredump {
|
||||
u32 gtt_cache;
|
||||
u32 aux_err; /* gen12 */
|
||||
u32 gam_done; /* gen12 */
|
||||
u32 clock_frequency;
|
||||
u32 clock_period_ns;
|
||||
|
||||
/* Display related */
|
||||
u32 derrmr;
|
||||
@ -163,8 +174,14 @@ struct intel_gt_coredump {
|
||||
struct intel_uc_coredump {
|
||||
struct intel_uc_fw guc_fw;
|
||||
struct intel_uc_fw huc_fw;
|
||||
struct i915_vma_coredump *guc_log;
|
||||
bool is_guc_capture;
|
||||
struct guc_info {
|
||||
struct intel_ctb_coredump ctb[2];
|
||||
struct i915_vma_coredump *vma_ctb;
|
||||
struct i915_vma_coredump *vma_log;
|
||||
u32 timestamp;
|
||||
u16 last_fence;
|
||||
bool is_guc_capture;
|
||||
} guc;
|
||||
} *uc;
|
||||
|
||||
struct intel_gt_coredump *next;
|
||||
|
@ -216,6 +216,10 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
|
||||
/**
|
||||
* i915_vma_resource_unbind - Unbind a vma resource
|
||||
* @vma_res: The vma resource to unbind.
|
||||
* @tlb: pointer to vma->obj->mm.tlb associated with the resource
|
||||
* to be stored at vma_res->tlb. When not-NULL, it will be used
|
||||
* to do TLB cache invalidation before freeing a VMA resource.
|
||||
* Used only for async unbind.
|
||||
*
|
||||
* At this point this function does little more than publish a fence that
|
||||
* signals immediately unless signaling is held back.
|
||||
|
@ -49,5 +49,6 @@ selftest(perf, i915_perf_live_selftests)
|
||||
selftest(slpc, intel_slpc_live_selftests)
|
||||
selftest(guc, intel_guc_live_selftests)
|
||||
selftest(guc_multi_lrc, intel_guc_multi_lrc_live_selftests)
|
||||
selftest(guc_hang, intel_guc_hang_check)
|
||||
/* Here be dragons: keep last to run last! */
|
||||
selftest(late_gt_pm, intel_gt_pm_late_selftests)
|
||||
|
@ -971,7 +971,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
/* Force the wait wait now to avoid including it in the benchmark */
|
||||
/* Force the wait now to avoid including it in the benchmark */
|
||||
err = i915_vma_sync(vma);
|
||||
if (err)
|
||||
goto err_pin;
|
||||
|
Loading…
x
Reference in New Issue
Block a user