Merge tag 'drm-intel-gt-next-2023-08-04' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Driver Changes:

- Avoid infinite GPU waits by avoidin premature release of request's
  reusable memory (Chris, Janusz)
- Expose RPS thresholds in sysfs (Tvrtko)
- Apply GuC SLPC min frequency softlimit correctly (Vinay)
- Restore SLPC efficient freq earlier (Vinay)
- Consider OA buffer boundary when zeroing out reports (Umesh)
- Extend Wa_14015795083 to TGL, RKL, DG1 and ADL (Matt R)
- Fix context workarounds with non-masked regs on MTL/DG2 (Lucas)
- Enable the CCS_FLUSH bit in the pipe control and in the CS for MTL+ (Andi)
- Update MTL workarounds 14018778641, 22016122933 (Tejas, Zhanjun)
- Ensure memory quiesced before AUX CCS invalidation (Jonathan)

- Add a gsc_info debugfs (Daniele)
- Invalidate the TLBs on each GT on multi-GT device (Chris)
- Fix a VMA UAF for multi-gt platform (Nirmoy)
- Do not use stolen on MTL due to HW bug (Nirmoy)
- Check HuC and GuC version compatibility on MTL (Daniele)
- Dump perf_limit_reasons for slow GuC init debug (Vinay)
- Replace kmap() with kmap_local_page() (Sumitra, Ira)
- Add sentinel to xehp_oa_b_counters for KASAN (Andrzej)
- Add the gen12_needs_ccs_aux_inv helper (Andi)
- Fixes and updates for GSC memory allocation (Daniele)
- Fix one wrong caching mode enum usage (Tvrtko)
- Fixes for GSC wakeref (Alan)

- Static checker fixes (Harshit, Arnd, Dan, Cristophe, David, Andi)
- Rename flags with bit_group_X according to the datasheet (Andi)
- Use direct alias for i915 in requests (Andrzej)
- Replace i915->gt0 with to_gt(i915) (Andi)
- Use the i915_vma_flush_writes helper (Tvrtko)
- Selftest improvements (Alan)
- Remove dead code (Tvrtko)

Signed-off-by: Dave Airlie <airlied@redhat.com>

# Conflicts:
#	drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZMy6kDd9npweR4uy@jlahtine-mobl.ger.corp.intel.com
This commit is contained in:
Dave Airlie 2023-08-07 13:49:24 +10:00
commit d9aa1da9a8
65 changed files with 1529 additions and 531 deletions

View File

@ -132,6 +132,7 @@ gt-y += \
gt/intel_sseu.o \
gt/intel_sseu_debugfs.o \
gt/intel_timeline.o \
gt/intel_tlb.o \
gt/intel_wopcm.o \
gt/intel_workarounds.o \
gt/shmem_utils.o \
@ -197,7 +198,8 @@ i915-y += \
gt/uc/intel_gsc_fw.o \
gt/uc/intel_gsc_proxy.o \
gt/uc/intel_gsc_uc.o \
gt/uc/intel_gsc_uc_heci_cmd_submit.o\
gt/uc/intel_gsc_uc_debugfs.o \
gt/uc/intel_gsc_uc_heci_cmd_submit.o \
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
gt/uc/intel_guc_capture.o \

View File

@ -68,10 +68,8 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
if (i915_vma_unset_ggtt_write(vma))
intel_gt_flush_ggtt_writes(vma->vm->gt);
}
for_each_ggtt_vma(vma, obj)
i915_vma_flush_writes(vma);
spin_unlock(&obj->vma.lock);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);

View File

@ -2229,8 +2229,8 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
if (GRAPHICS_VER(rq->engine->i915) != 7 || rq->engine->id != RCS0) {
drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
if (GRAPHICS_VER(rq->i915) != 7 || rq->engine->id != RCS0) {
drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
return -EINVAL;
}
@ -2691,6 +2691,7 @@ static int
eb_select_engine(struct i915_execbuffer *eb)
{
struct intel_context *ce, *child;
struct intel_gt *gt;
unsigned int idx;
int err;
@ -2714,10 +2715,17 @@ eb_select_engine(struct i915_execbuffer *eb)
}
}
eb->num_batches = ce->parallel.number_children + 1;
gt = ce->engine->gt;
for_each_child(ce, child)
intel_context_get(child);
intel_gt_pm_get(ce->engine->gt);
intel_gt_pm_get(gt);
/*
* Keep GT0 active on MTL so that i915_vma_parked() doesn't
* free VMAs while execbuf ioctl is validating VMAs.
*/
if (gt->info.id)
intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@ -2756,7 +2764,10 @@ eb_select_engine(struct i915_execbuffer *eb)
return err;
err:
intel_gt_pm_put(ce->engine->gt);
if (gt->info.id)
intel_gt_pm_put(to_gt(gt->i915));
intel_gt_pm_put(gt);
for_each_child(ce, child)
intel_context_put(child);
intel_context_put(ce);
@ -2769,6 +2780,12 @@ eb_put_engine(struct i915_execbuffer *eb)
struct intel_context *child;
i915_vm_put(eb->context->vm);
/*
* This works in conjunction with eb_select_engine() to prevent
* i915_vma_parked() from interfering while execbuf validates vmas.
*/
if (eb->gt->info.id)
intel_gt_pm_put(to_gt(eb->gt->i915));
intel_gt_pm_put(eb->gt);
for_each_child(eb->context, child)
intel_context_put(child);

View File

@ -17,6 +17,8 @@
#include "i915_selftest.h"
#include "i915_vma_resource.h"
#include "gt/intel_gt_defines.h"
struct drm_i915_gem_object;
struct intel_fronbuffer;
struct intel_memory_region;
@ -675,7 +677,7 @@ struct drm_i915_gem_object {
*/
bool dirty:1;
u32 tlb;
u32 tlb[I915_MAX_GT];
} mm;
struct {

View File

@ -7,7 +7,7 @@
#include <drm/drm_cache.h>
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_tlb.h"
#include "i915_drv.h"
#include "i915_gem_object.h"
@ -193,13 +193,16 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_gt *gt = to_gt(i915);
struct intel_gt *gt;
int id;
if (!obj->mm.tlb)
return;
for_each_gt(gt, i915, id) {
if (!obj->mm.tlb[id])
return;
intel_gt_invalidate_tlb(gt, obj->mm.tlb);
obj->mm.tlb = 0;
intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]);
obj->mm.tlb[id] = 0;
}
}
struct sg_table *

View File

@ -892,7 +892,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
} else {
resource_size_t lmem_range;
lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
lmem_size *= SZ_1G;
}

View File

@ -1246,8 +1246,10 @@ static int igt_write_huge(struct drm_i915_private *i915,
* times in succession a possibility by enlarging the permutation array.
*/
order = i915_random_order(count * count, &prng);
if (!order)
return -ENOMEM;
if (!order) {
err = -ENOMEM;
goto out;
}
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64(max - size, max_page_size);

View File

@ -76,7 +76,7 @@ int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH;
if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5)
if (IS_G4X(rq->i915) || GRAPHICS_VER(rq->i915) == 5)
cmd |= MI_INVALIDATE_ISP;
}

View File

@ -39,11 +39,11 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
if (GRAPHICS_VER(rq->engine->i915) == 9)
if (GRAPHICS_VER(rq->i915) == 9)
vf_flush_wa = true;
/* WaForGAMHang:kbl */
if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0))
if (IS_KBL_GRAPHICS_STEP(rq->i915, 0, STEP_C0))
dc_flush_wa = true;
}
@ -165,14 +165,60 @@ static u32 preparser_disable(bool state)
return MI_ARB_CHECK | 1 << 8 | state;
}
u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg)
static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine)
{
u32 gsi_offset = gt->uncore->gsi_offset;
switch (engine->id) {
case RCS0:
return GEN12_CCS_AUX_INV;
case BCS0:
return GEN12_BCS0_AUX_INV;
case VCS0:
return GEN12_VD0_AUX_INV;
case VCS2:
return GEN12_VD2_AUX_INV;
case VECS0:
return GEN12_VE0_AUX_INV;
case CCS0:
return GEN12_CCS0_AUX_INV;
default:
return INVALID_MMIO_REG;
}
}
static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
{
i915_reg_t reg = gen12_get_aux_inv_reg(engine);
if (IS_PONTEVECCHIO(engine->i915))
return false;
/*
* So far platforms supported by i915 having flat ccs do not require
* AUX invalidation. Check also whether the engine requires it.
*/
return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915);
}
u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
{
i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine);
u32 gsi_offset = engine->gt->uncore->gsi_offset;
if (!gen12_needs_ccs_aux_inv(engine))
return cs;
*cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
*cs++ = AUX_INV;
*cs++ = MI_NOOP;
*cs++ = MI_SEMAPHORE_WAIT_TOKEN |
MI_SEMAPHORE_REGISTER_POLL |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
*cs++ = 0;
*cs++ = 0;
return cs;
}
@ -180,8 +226,8 @@ u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv
static int mtl_dummy_pipe_control(struct i915_request *rq)
{
/* Wa_14016712196 */
if (IS_MTL_GRAPHICS_STEP(rq->engine->i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(rq->engine->i915, P, STEP_A0, STEP_B0)) {
if (IS_MTL_GRAPHICS_STEP(rq->i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(rq->i915, P, STEP_A0, STEP_B0)) {
u32 *cs;
/* dummy PIPE_CONTROL + depth flush */
@ -202,8 +248,13 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
{
struct intel_engine_cs *engine = rq->engine;
if (mode & EMIT_FLUSH) {
u32 flags = 0;
/*
* On Aux CCS platforms the invalidation of the Aux
* table requires quiescing memory traffic beforehand
*/
if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) {
u32 bit_group_0 = 0;
u32 bit_group_1 = 0;
int err;
u32 *cs;
@ -211,32 +262,40 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (err)
return err;
flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
flags |= PIPE_CONTROL_FLUSH_L3;
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
/*
* When required, in MTL and beyond platforms we
* need to set the CCS_FLUSH bit in the pipe control
*/
if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
bit_group_0 |= PIPE_CONTROL_CCS_FLUSH;
bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/* Wa_1409600907:tgl,adl-p */
flags |= PIPE_CONTROL_DEPTH_STALL;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
bit_group_1 |= PIPE_CONTROL_DEPTH_STALL;
bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE;
bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE;
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
flags |= PIPE_CONTROL_QW_WRITE;
bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX;
bit_group_1 |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_CS_STALL;
bit_group_1 |= PIPE_CONTROL_CS_STALL;
if (!HAS_3D_PIPELINE(engine->i915))
flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
else if (engine->class == COMPUTE_CLASS)
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
cs = gen12_emit_pipe_control(cs,
PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
flags, LRC_PPHWSP_SCRATCH_ADDR);
cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1,
LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(rq, cs);
}
@ -267,10 +326,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
else if (engine->class == COMPUTE_CLASS)
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
if (!HAS_FLAT_CCS(rq->engine->i915))
count = 8 + 4;
else
count = 8;
count = 8;
if (gen12_needs_ccs_aux_inv(rq->engine))
count += 8;
cs = intel_ring_begin(rq, count);
if (IS_ERR(cs))
@ -285,11 +343,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
if (!HAS_FLAT_CCS(rq->engine->i915)) {
/* hsdes: 1809175790 */
cs = gen12_emit_aux_table_inv(rq->engine->gt,
cs, GEN12_GFX_CCS_AUX_NV);
}
cs = gen12_emit_aux_table_inv(engine, cs);
*cs++ = preparser_disable(false);
intel_ring_advance(rq, cs);
@ -300,21 +354,14 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
{
intel_engine_mask_t aux_inv = 0;
u32 cmd, *cs;
u32 cmd = 4;
u32 *cs;
cmd = 4;
if (mode & EMIT_INVALIDATE) {
cmd += 2;
if (!HAS_FLAT_CCS(rq->engine->i915) &&
(rq->engine->class == VIDEO_DECODE_CLASS ||
rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) {
aux_inv = rq->engine->mask &
~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0);
if (aux_inv)
cmd += 4;
}
if (gen12_needs_ccs_aux_inv(rq->engine))
cmd += 8;
}
cs = intel_ring_begin(rq, cmd);
@ -338,6 +385,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
cmd |= MI_INVALIDATE_TLB;
if (rq->engine->class == VIDEO_DECODE_CLASS)
cmd |= MI_INVALIDATE_BSD;
if (gen12_needs_ccs_aux_inv(rq->engine) &&
rq->engine->class == COPY_ENGINE_CLASS)
cmd |= MI_FLUSH_DW_CCS;
}
*cs++ = cmd;
@ -345,14 +396,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
*cs++ = 0; /* upper addr */
*cs++ = 0; /* value */
if (aux_inv) { /* hsdes: 1809175790 */
if (rq->engine->class == VIDEO_DECODE_CLASS)
cs = gen12_emit_aux_table_inv(rq->engine->gt,
cs, GEN12_VD0_AUX_NV);
else
cs = gen12_emit_aux_table_inv(rq->engine->gt,
cs, GEN12_VE0_AUX_NV);
}
cs = gen12_emit_aux_table_inv(rq->engine, cs);
if (mode & EMIT_INVALIDATE)
*cs++ = preparser_disable(false);
@ -754,7 +798,7 @@ u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
{
struct drm_i915_private *i915 = rq->engine->i915;
struct drm_i915_private *i915 = rq->i915;
u32 flags = (PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TLB_INVALIDATE |
PIPE_CONTROL_TILE_CACHE_FLUSH |
@ -775,7 +819,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
/* Wa_1409600907 */
flags |= PIPE_CONTROL_DEPTH_STALL;
if (!HAS_3D_PIPELINE(rq->engine->i915))
if (!HAS_3D_PIPELINE(rq->i915))
flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
else if (rq->engine->class == COMPUTE_CLASS)
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;

View File

@ -13,6 +13,7 @@
#include "intel_gt_regs.h"
#include "intel_gpu_commands.h"
struct intel_engine_cs;
struct intel_gt;
struct i915_request;
@ -46,28 +47,32 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg);
u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs);
static inline u32 *
__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
__gen8_emit_pipe_control(u32 *batch, u32 bit_group_0,
u32 bit_group_1, u32 offset)
{
memset(batch, 0, 6 * sizeof(u32));
batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
batch[1] = flags1;
batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0;
batch[1] = bit_group_1;
batch[2] = offset;
return batch + 6;
}
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
static inline u32 *gen8_emit_pipe_control(u32 *batch,
u32 bit_group_1, u32 offset)
{
return __gen8_emit_pipe_control(batch, 0, flags, offset);
return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset);
}
static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0,
u32 bit_group_1, u32 offset)
{
return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
return __gen8_emit_pipe_control(batch, bit_group_0,
bit_group_1, offset);
}
static inline u32 *

View File

@ -1333,6 +1333,7 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
if (!frame)
return -ENOMEM;
frame->rq.i915 = engine->i915;
frame->rq.engine = engine;
frame->rq.context = ce;
rcu_assign_pointer(frame->rq.timeline, ce->timeline);

View File

@ -2718,7 +2718,7 @@ static int emit_pdps(struct i915_request *rq)
int err, i;
u32 *cs;
GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
GEM_BUG_ON(intel_vgpu_active(rq->i915));
/*
* Beware ye of the dragons, this sequence is magic!

View File

@ -121,6 +121,7 @@
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */
#define MI_SEMAPHORE_REGISTER_POLL (1 << 16)
#define MI_SEMAPHORE_POLL (1 << 15)
#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
@ -299,6 +300,7 @@
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
#define PIPE_CONTROL_CCS_FLUSH (1<<13) /* MTL+ */
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */

View File

@ -33,6 +33,7 @@
#include "intel_rps.h"
#include "intel_sa_media.h"
#include "intel_gt_sysfs.h"
#include "intel_tlb.h"
#include "intel_uncore.h"
#include "shmem_utils.h"
@ -50,8 +51,7 @@ void intel_gt_common_init_early(struct intel_gt *gt)
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
mutex_init(&gt->tlb.invalidate_lock);
seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
intel_gt_init_tlb(gt);
intel_gt_pm_init_early(gt);
intel_wopcm_init_early(&gt->wopcm);
@ -466,7 +466,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
obj = i915_gem_object_create_lmem(i915, size,
I915_BO_ALLOC_VOLATILE |
I915_BO_ALLOC_GPU_ONLY);
if (IS_ERR(obj))
if (IS_ERR(obj) && !IS_METEORLAKE(i915)) /* Wa_22018444074 */
obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
@ -846,7 +846,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
intel_gt_fini_timelines(gt);
mutex_destroy(&gt->tlb.invalidate_lock);
intel_gt_fini_tlb(gt);
intel_engines_free(gt);
}
}
@ -887,7 +887,7 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
int intel_gt_probe_all(struct drm_i915_private *i915)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_gt *gt = &i915->gt0;
struct intel_gt *gt = to_gt(i915);
const struct intel_gt_definition *gtdef;
phys_addr_t phys_addr;
unsigned int mmio_bar;
@ -1003,137 +1003,3 @@ void intel_gt_info_print(const struct intel_gt_info *info,
intel_sseu_dump(&info->sseu, p);
}
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
* cap at 1ms. We go a bit higher just in case.
*/
#define TLB_INVAL_TIMEOUT_US 100
#define TLB_INVAL_TIMEOUT_MS 4
/*
* On Xe_HP the TLB invalidation registers are located at the same MMIO offsets
* but are now considered MCR registers. Since they exist within a GAM range,
* the primary instance of the register rolls up the status from each unit.
*/
static int wait_for_invalidate(struct intel_engine_cs *engine)
{
if (engine->tlb_inv.mcr)
return intel_gt_mcr_wait_for_reg(engine->gt,
engine->tlb_inv.reg.mcr_reg,
engine->tlb_inv.done,
0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS);
else
return __intel_wait_for_register_fw(engine->gt->uncore,
engine->tlb_inv.reg.reg,
engine->tlb_inv.done,
0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS,
NULL);
}
static void mmio_invalidate_full(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
unsigned long flags;
if (GRAPHICS_VER(i915) < 8)
return;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
intel_gt_mcr_lock(gt, &flags);
spin_lock(&uncore->lock); /* serialise invalidate with GT reset */
awake = 0;
for_each_engine(engine, gt, id) {
if (!intel_engine_pm_is_awake(engine))
continue;
if (engine->tlb_inv.mcr)
intel_gt_mcr_multicast_write_fw(gt,
engine->tlb_inv.reg.mcr_reg,
engine->tlb_inv.request);
else
intel_uncore_write_fw(uncore,
engine->tlb_inv.reg.reg,
engine->tlb_inv.request);
awake |= engine->mask;
}
GT_TRACE(gt, "invalidated engines %08x\n", awake);
/* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
if (awake &&
(IS_TIGERLAKE(i915) ||
IS_DG1(i915) ||
IS_ROCKETLAKE(i915) ||
IS_ALDERLAKE_S(i915) ||
IS_ALDERLAKE_P(i915)))
intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(gt, flags);
for_each_engine_masked(engine, gt, awake, tmp) {
if (wait_for_invalidate(engine))
gt_err_ratelimited(gt,
"%s TLB invalidation did not complete in %ums!\n",
engine->name, TLB_INVAL_TIMEOUT_MS);
}
/*
* Use delayed put since a) we mostly expect a flurry of TLB
* invalidations so it is good to avoid paying the forcewake cost and
* b) it works around a bug in Icelake which cannot cope with too rapid
* transitions.
*/
intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
}
static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
{
u32 cur = intel_gt_tlb_seqno(gt);
/* Only skip if a *full* TLB invalidate barrier has passed */
return (s32)(cur - ALIGN(seqno, 2)) > 0;
}
void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
{
intel_wakeref_t wakeref;
if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
return;
if (intel_gt_is_wedged(gt))
return;
if (tlb_seqno_passed(gt, seqno))
return;
with_intel_gt_pm_if_awake(gt, wakeref) {
mutex_lock(&gt->tlb.invalidate_lock);
if (tlb_seqno_passed(gt, seqno))
goto unlock;
mmio_invalidate_full(gt);
write_seqcount_invalidate(&gt->tlb.seqno);
unlock:
mutex_unlock(&gt->tlb.invalidate_lock);
}
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_tlb.c"
#endif

View File

@ -107,16 +107,4 @@ void intel_gt_info_print(const struct intel_gt_info *info,
void intel_gt_watchdog_work(struct work_struct *work);
static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
{
return seqprop_sequence(&gt->tlb.seqno);
}
static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
{
return intel_gt_tlb_seqno(gt) | 1;
}
void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);
#endif /* __INTEL_GT_H__ */

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_GT_DEFINES__
#define __INTEL_GT_DEFINES__
#define I915_MAX_GT 2
#endif

View File

@ -332,9 +332,11 @@
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x4180)
#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208)
#define GEN12_VD0_AUX_NV _MMIO(0x4218)
#define GEN12_VD1_AUX_NV _MMIO(0x4228)
#define GEN12_CCS_AUX_INV _MMIO(0x4208)
#define GEN12_VD0_AUX_INV _MMIO(0x4218)
#define GEN12_VE0_AUX_INV _MMIO(0x4238)
#define GEN12_BCS0_AUX_INV _MMIO(0x4248)
#define GEN8_RTCR _MMIO(0x4260)
#define GEN8_M1TCR _MMIO(0x4264)
@ -342,14 +344,12 @@
#define GEN8_BTCR _MMIO(0x426c)
#define GEN8_VTCR _MMIO(0x4270)
#define GEN12_VD2_AUX_NV _MMIO(0x4298)
#define GEN12_VD3_AUX_NV _MMIO(0x42a8)
#define GEN12_VE0_AUX_NV _MMIO(0x4238)
#define BLT_HWS_PGA_GEN7 _MMIO(0x4280)
#define GEN12_VE1_AUX_NV _MMIO(0x42b8)
#define GEN12_VD2_AUX_INV _MMIO(0x4298)
#define GEN12_CCS0_AUX_INV _MMIO(0x42c8)
#define AUX_INV REG_BIT(0)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380)
#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)

View File

@ -700,6 +700,80 @@ static const struct attribute *media_perf_power_attrs[] = {
NULL
};
static ssize_t
rps_up_threshold_pct_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_rps *rps = &gt->rps;
return sysfs_emit(buf, "%u\n", intel_rps_get_up_threshold(rps));
}
static ssize_t
rps_up_threshold_pct_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_rps *rps = &gt->rps;
int ret;
u8 val;
ret = kstrtou8(buf, 10, &val);
if (ret)
return ret;
ret = intel_rps_set_up_threshold(rps, val);
return ret == 0 ? count : ret;
}
static struct kobj_attribute rps_up_threshold_pct =
__ATTR(rps_up_threshold_pct,
0664,
rps_up_threshold_pct_show,
rps_up_threshold_pct_store);
static ssize_t
rps_down_threshold_pct_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_rps *rps = &gt->rps;
return sysfs_emit(buf, "%u\n", intel_rps_get_down_threshold(rps));
}
static ssize_t
rps_down_threshold_pct_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_rps *rps = &gt->rps;
int ret;
u8 val;
ret = kstrtou8(buf, 10, &val);
if (ret)
return ret;
ret = intel_rps_set_down_threshold(rps, val);
return ret == 0 ? count : ret;
}
static struct kobj_attribute rps_down_threshold_pct =
__ATTR(rps_down_threshold_pct,
0664,
rps_down_threshold_pct_show,
rps_down_threshold_pct_store);
static const struct attribute * const gen6_gt_rps_attrs[] = {
&rps_up_threshold_pct.attr,
&rps_down_threshold_pct.attr,
NULL
};
static ssize_t
default_min_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
@ -722,9 +796,37 @@ default_max_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, cha
static struct kobj_attribute default_max_freq_mhz =
__ATTR(rps_max_freq_mhz, 0444, default_max_freq_mhz_show, NULL);
static ssize_t
default_rps_up_threshold_pct_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct intel_gt *gt = kobj_to_gt(kobj->parent);
return sysfs_emit(buf, "%u\n", gt->defaults.rps_up_threshold);
}
static struct kobj_attribute default_rps_up_threshold_pct =
__ATTR(rps_up_threshold_pct, 0444, default_rps_up_threshold_pct_show, NULL);
static ssize_t
default_rps_down_threshold_pct_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct intel_gt *gt = kobj_to_gt(kobj->parent);
return sysfs_emit(buf, "%u\n", gt->defaults.rps_down_threshold);
}
static struct kobj_attribute default_rps_down_threshold_pct =
__ATTR(rps_down_threshold_pct, 0444, default_rps_down_threshold_pct_show, NULL);
static const struct attribute * const rps_defaults_attrs[] = {
&default_min_freq_mhz.attr,
&default_max_freq_mhz.attr,
&default_rps_up_threshold_pct.attr,
&default_rps_down_threshold_pct.attr,
NULL
};
@ -752,6 +854,12 @@ static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj)
if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
ret = sysfs_create_file(kobj, vlv_attr);
if (is_object_gt(kobj) && !intel_uc_uses_guc_slpc(&gt->uc)) {
ret = sysfs_create_files(kobj, gen6_gt_rps_attrs);
if (ret)
return ret;
}
return ret;
}

View File

@ -83,6 +83,9 @@ enum intel_submission_method {
struct gt_defaults {
u32 min_freq;
u32 max_freq;
u8 rps_up_threshold;
u8 rps_down_threshold;
};
enum intel_gt_type {

View File

@ -1092,8 +1092,15 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
obj = i915_gem_object_create_lmem(engine->i915, context_size,
I915_BO_ALLOC_PM_VOLATILE);
if (IS_ERR(obj))
if (IS_ERR(obj)) {
obj = i915_gem_object_create_shmem(engine->i915, context_size);
/*
* Wa_22016122933: For MTL the shared memory needs to be mapped
* as WC on CPU side and UC (PAT index 2) on GPU side
*/
if (IS_METEORLAKE(engine->i915))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
}
if (IS_ERR(obj))
return ERR_CAST(obj);
@ -1364,10 +1371,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
IS_DG2_G11(ce->engine->i915))
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
/* hsdes: 1809175790 */
if (!HAS_FLAT_CCS(ce->engine->i915))
cs = gen12_emit_aux_table_inv(ce->engine->gt,
cs, GEN12_GFX_CCS_AUX_NV);
cs = gen12_emit_aux_table_inv(ce->engine, cs);
/* Wa_16014892111 */
if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) ||
@ -1392,17 +1396,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE,
0);
/* hsdes: 1809175790 */
if (!HAS_FLAT_CCS(ce->engine->i915)) {
if (ce->engine->class == VIDEO_DECODE_CLASS)
cs = gen12_emit_aux_table_inv(ce->engine->gt,
cs, GEN12_VD0_AUX_NV);
else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
cs = gen12_emit_aux_table_inv(ce->engine->gt,
cs, GEN12_VE0_AUX_NV);
}
return cs;
return gen12_emit_aux_table_inv(ce->engine, cs);
}
static void

View File

@ -366,7 +366,7 @@ static int emit_pte(struct i915_request *rq,
u64 offset,
int length)
{
bool has_64K_pages = HAS_64K_PAGES(rq->engine->i915);
bool has_64K_pages = HAS_64K_PAGES(rq->i915);
const u64 encode = rq->context->vm->pte_encode(0, pat_index,
is_lmem ? PTE_LM : 0);
struct intel_ring *ring = rq->ring;
@ -375,7 +375,7 @@ static int emit_pte(struct i915_request *rq,
u32 page_size;
u32 *hdr, *cs;
GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8);
GEM_BUG_ON(GRAPHICS_VER(rq->i915) < 8);
page_size = I915_GTT_PAGE_SIZE;
dword_length = 0x400;
@ -531,7 +531,7 @@ static int emit_copy_ccs(struct i915_request *rq,
u32 dst_offset, u8 dst_access,
u32 src_offset, u8 src_access, int size)
{
struct drm_i915_private *i915 = rq->engine->i915;
struct drm_i915_private *i915 = rq->i915;
int mocs = rq->engine->gt->mocs.uc_index << 1;
u32 num_ccs_blks;
u32 *cs;
@ -581,7 +581,7 @@ static int emit_copy_ccs(struct i915_request *rq,
static int emit_copy(struct i915_request *rq,
u32 dst_offset, u32 src_offset, int size)
{
const int ver = GRAPHICS_VER(rq->engine->i915);
const int ver = GRAPHICS_VER(rq->i915);
u32 instance = rq->engine->instance;
u32 *cs;
@ -917,7 +917,7 @@ out_ce:
static int emit_clear(struct i915_request *rq, u32 offset, int size,
u32 value, bool is_lmem)
{
struct drm_i915_private *i915 = rq->engine->i915;
struct drm_i915_private *i915 = rq->i915;
int mocs = rq->engine->gt->mocs.uc_index << 1;
const int ver = GRAPHICS_VER(i915);
int ring_sz;

View File

@ -8,6 +8,7 @@
#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
#include "intel_gt.h"
#include "intel_gtt.h"
#include "gen6_ppgtt.h"
#include "gen8_ppgtt.h"
@ -210,8 +211,7 @@ void ppgtt_unbind_vma(struct i915_address_space *vm,
return;
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
if (vma_res->tlb)
vma_invalidate_tlb(vm, vma_res->tlb);
vma_invalidate_tlb(vm, vma_res->tlb);
}
static unsigned long pd_count(u64 size, int shift)

View File

@ -220,7 +220,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
resource_size_t lmem_range;
u64 tile_stolen, flat_ccs_base;
lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
lmem_size *= SZ_1G;

View File

@ -35,9 +35,6 @@
#define RESET_MAX_RETRIES 3
/* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
{
struct drm_i915_file_private *file_priv = ctx->file_priv;

View File

@ -805,7 +805,7 @@ static int mi_set_context(struct i915_request *rq,
static int remap_l3_slice(struct i915_request *rq, int slice)
{
#define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32))
u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
int i;
if (!remap_info)

View File

@ -16,7 +16,9 @@
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm.h"
#include "intel_gt_pm_irq.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
@ -672,7 +674,6 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
{
struct intel_gt *gt = rps_to_gt(rps);
struct intel_uncore *uncore = gt->uncore;
u32 threshold_up = 0, threshold_down = 0; /* in % */
u32 ei_up = 0, ei_down = 0;
lockdep_assert_held(&rps->power.mutex);
@ -680,9 +681,6 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
if (new_power == rps->power.mode)
return;
threshold_up = 95;
threshold_down = 85;
/* Note the units here are not exactly 1us, but 1280ns. */
switch (new_power) {
case LOW_POWER:
@ -709,17 +707,22 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
GT_TRACE(gt,
"changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
new_power, threshold_up, ei_up, threshold_down, ei_down);
new_power,
rps->power.up_threshold, ei_up,
rps->power.down_threshold, ei_down);
set(uncore, GEN6_RP_UP_EI,
intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
set(uncore, GEN6_RP_UP_THRESHOLD,
intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10));
intel_gt_ns_to_pm_interval(gt,
ei_up * rps->power.up_threshold * 10));
set(uncore, GEN6_RP_DOWN_EI,
intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
set(uncore, GEN6_RP_DOWN_THRESHOLD,
intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
intel_gt_ns_to_pm_interval(gt,
ei_down *
rps->power.down_threshold * 10));
set(uncore, GEN6_RP_CONTROL,
(GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
@ -731,8 +734,6 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
skip_hw_write:
rps->power.mode = new_power;
rps->power.up_threshold = threshold_up;
rps->power.down_threshold = threshold_down;
}
static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
@ -1559,10 +1560,12 @@ void intel_rps_enable(struct intel_rps *rps)
return;
GT_TRACE(rps_to_gt(rps),
"min:%x, max:%x, freq:[%d, %d]\n",
"min:%x, max:%x, freq:[%d, %d], thresholds:[%u, %u]\n",
rps->min_freq, rps->max_freq,
intel_gpu_freq(rps, rps->min_freq),
intel_gpu_freq(rps, rps->max_freq));
intel_gpu_freq(rps, rps->max_freq),
rps->power.up_threshold,
rps->power.down_threshold);
GEM_BUG_ON(rps->max_freq < rps->min_freq);
GEM_BUG_ON(rps->idle_freq > rps->max_freq);
@ -2015,6 +2018,12 @@ void intel_rps_init(struct intel_rps *rps)
}
}
/* Set default thresholds in % */
rps->power.up_threshold = 95;
rps_to_gt(rps)->defaults.rps_up_threshold = rps->power.up_threshold;
rps->power.down_threshold = 85;
rps_to_gt(rps)->defaults.rps_down_threshold = rps->power.down_threshold;
/* Finally allow us to boost to max by default */
rps->boost_freq = rps->max_freq;
rps->idle_freq = rps->min_freq;
@ -2569,6 +2578,58 @@ int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
return set_min_freq(rps, val);
}
u8 intel_rps_get_up_threshold(struct intel_rps *rps)
{
return rps->power.up_threshold;
}
static int rps_set_threshold(struct intel_rps *rps, u8 *threshold, u8 val)
{
int ret;
if (val > 100)
return -EINVAL;
ret = mutex_lock_interruptible(&rps->lock);
if (ret)
return ret;
if (*threshold == val)
goto out_unlock;
*threshold = val;
/* Force reset. */
rps->last_freq = -1;
mutex_lock(&rps->power.mutex);
rps->power.mode = -1;
mutex_unlock(&rps->power.mutex);
intel_rps_set(rps, clamp(rps->cur_freq,
rps->min_freq_softlimit,
rps->max_freq_softlimit));
out_unlock:
mutex_unlock(&rps->lock);
return ret;
}
int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold)
{
return rps_set_threshold(rps, &rps->power.up_threshold, threshold);
}
u8 intel_rps_get_down_threshold(struct intel_rps *rps)
{
return rps->power.down_threshold;
}
int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold)
{
return rps_set_threshold(rps, &rps->power.down_threshold, threshold);
}
static void intel_rps_set_manual(struct intel_rps *rps, bool enable)
{
struct intel_uncore *uncore = rps_to_uncore(rps);

View File

@ -37,6 +37,10 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
int intel_gpu_freq(struct intel_rps *rps, int val);
int intel_freq_opcode(struct intel_rps *rps, int val);
u8 intel_rps_get_up_threshold(struct intel_rps *rps);
int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold);
u8 intel_rps_get_down_threshold(struct intel_rps *rps);
int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold);
u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps);
u32 intel_rps_get_requested_frequency(struct intel_rps *rps);

View File

@ -0,0 +1,159 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_perf_oa_regs.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_tlb.h"
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
* cap at 1ms. We go a bit higher just in case.
*/
#define TLB_INVAL_TIMEOUT_US 100
#define TLB_INVAL_TIMEOUT_MS 4
/*
* On Xe_HP the TLB invalidation registers are located at the same MMIO offsets
* but are now considered MCR registers. Since they exist within a GAM range,
* the primary instance of the register rolls up the status from each unit.
*/
static int wait_for_invalidate(struct intel_engine_cs *engine)
{
if (engine->tlb_inv.mcr)
return intel_gt_mcr_wait_for_reg(engine->gt,
engine->tlb_inv.reg.mcr_reg,
engine->tlb_inv.done,
0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS);
else
return __intel_wait_for_register_fw(engine->gt->uncore,
engine->tlb_inv.reg.reg,
engine->tlb_inv.done,
0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS,
NULL);
}
static void mmio_invalidate_full(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
unsigned long flags;
if (GRAPHICS_VER(i915) < 8)
return;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
intel_gt_mcr_lock(gt, &flags);
spin_lock(&uncore->lock); /* serialise invalidate with GT reset */
awake = 0;
for_each_engine(engine, gt, id) {
if (!intel_engine_pm_is_awake(engine))
continue;
if (engine->tlb_inv.mcr)
intel_gt_mcr_multicast_write_fw(gt,
engine->tlb_inv.reg.mcr_reg,
engine->tlb_inv.request);
else
intel_uncore_write_fw(uncore,
engine->tlb_inv.reg.reg,
engine->tlb_inv.request);
awake |= engine->mask;
}
GT_TRACE(gt, "invalidated engines %08x\n", awake);
/* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
if (awake &&
(IS_TIGERLAKE(i915) ||
IS_DG1(i915) ||
IS_ROCKETLAKE(i915) ||
IS_ALDERLAKE_S(i915) ||
IS_ALDERLAKE_P(i915)))
intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(gt, flags);
for_each_engine_masked(engine, gt, awake, tmp) {
if (wait_for_invalidate(engine))
gt_err_ratelimited(gt,
"%s TLB invalidation did not complete in %ums!\n",
engine->name, TLB_INVAL_TIMEOUT_MS);
}
/*
* Use delayed put since a) we mostly expect a flurry of TLB
* invalidations so it is good to avoid paying the forcewake cost and
* b) it works around a bug in Icelake which cannot cope with too rapid
* transitions.
*/
intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
}
static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
{
u32 cur = intel_gt_tlb_seqno(gt);
/* Only skip if a *full* TLB invalidate barrier has passed */
return (s32)(cur - ALIGN(seqno, 2)) > 0;
}
void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
{
intel_wakeref_t wakeref;
if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
return;
if (intel_gt_is_wedged(gt))
return;
if (tlb_seqno_passed(gt, seqno))
return;
with_intel_gt_pm_if_awake(gt, wakeref) {
mutex_lock(&gt->tlb.invalidate_lock);
if (tlb_seqno_passed(gt, seqno))
goto unlock;
mmio_invalidate_full(gt);
write_seqcount_invalidate(&gt->tlb.seqno);
unlock:
mutex_unlock(&gt->tlb.invalidate_lock);
}
}
void intel_gt_init_tlb(struct intel_gt *gt)
{
mutex_init(&gt->tlb.invalidate_lock);
seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
}
void intel_gt_fini_tlb(struct intel_gt *gt)
{
mutex_destroy(&gt->tlb.invalidate_lock);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_tlb.c"
#endif

View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef INTEL_TLB_H
#define INTEL_TLB_H
#include <linux/seqlock.h>
#include <linux/types.h>
#include "intel_gt_types.h"
void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno);
void intel_gt_init_tlb(struct intel_gt *gt);
void intel_gt_fini_tlb(struct intel_gt *gt);
static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
{
return seqprop_sequence(&gt->tlb.seqno);
}
static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
{
return intel_gt_tlb_seqno(gt) | 1;
}
#endif /* INTEL_TLB_H */

View File

@ -123,6 +123,22 @@ static void wa_init_finish(struct i915_wa_list *wal)
wal->wa_count, wal->name, wal->engine_name);
}
static enum forcewake_domains
wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
enum forcewake_domains fw = 0;
struct i915_wa *wa;
unsigned int i;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
fw |= intel_uncore_forcewake_for_reg(uncore,
wa->reg,
FW_REG_READ |
FW_REG_WRITE);
return fw;
}
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
{
unsigned int addr = i915_mmio_reg_offset(wa->reg);
@ -225,13 +241,13 @@ static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
static void
wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
{
wa_add(wal, reg, clear, set, clear, false);
wa_add(wal, reg, clear, set, clear | set, false);
}
static void
wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
{
wa_mcr_add(wal, reg, clear, set, clear, false);
wa_mcr_add(wal, reg, clear, set, clear | set, false);
}
static void
@ -621,10 +637,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
/* Wa_1406697149 (WaDisableBankHangMode:icl) */
wa_write(wal,
GEN8_L3CNTLREG,
intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
GEN8_ERRDETBCTRL);
wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
/* WaForceEnableNonCoherent:icl
* This is not the same workaround as in early Gen9 platforms, where
@ -653,7 +666,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_1604278689:icl,ehl */
wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
0, /* write-only register; skip validation */
0,
0xFFFFFFFF);
/* Wa_1406306137:icl,ehl */
@ -670,38 +683,8 @@ static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
wa_mcr_add(wal,
XEHP_FF_MODE2,
FF_MODE2_TDS_TIMER_MASK,
FF_MODE2_TDS_TIMER_128,
0, false);
}
/*
* These settings aren't actually workarounds, but general tuning settings that
* need to be programmed on several platforms.
*/
static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
/*
* Although some platforms refer to it as Wa_1604555607, we need to
* program it even on those that don't explicitly list that
* workaround.
*
* Note that the programming of this register is further modified
* according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
* Wa_1608008084 tells us the FF_MODE2 register will return the wrong
* value when read. The default value for this register is zero for all
* fields and there are no bit masks. So instead of doing a RMW we
* should just write TDS timer value. For the same reason read
* verification is ignored.
*/
wa_add(wal,
GEN12_FF_MODE2,
FF_MODE2_TDS_TIMER_MASK,
FF_MODE2_TDS_TIMER_128,
0, false);
wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
FF_MODE2_TDS_TIMER_128);
}
static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
@ -709,8 +692,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
{
struct drm_i915_private *i915 = engine->i915;
gen12_ctx_gt_tuning_init(engine, wal);
/*
* Wa_1409142259:tgl,dg1,adl-p
* Wa_1409347922:tgl,dg1,adl-p
@ -732,15 +713,27 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
/*
* Wa_16011163337
* Wa_16011163337 - GS_TIMER
*
* Like in gen12_ctx_gt_tuning_init(), read verification is ignored due
* to Wa_1608008084.
* TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
* need to program it even on those that don't explicitly list that
* workaround.
*
* Note that the programming of GEN12_FF_MODE2 is further modified
* according to the FF_MODE2 guidance given by Wa_1608008084.
* Wa_1608008084 tells us the FF_MODE2 register will return the wrong
* value when read from the CPU.
*
* The default value for this register is zero for all fields.
* So instead of doing a RMW we should just write the desired values
* for TDS and GS timers. Note that since the readback can't be trusted,
* the clear mask is just set to ~0 to make sure other bits are not
* inadvertently set. For the same reason read verification is ignored.
*/
wa_add(wal,
GEN12_FF_MODE2,
FF_MODE2_GS_TIMER_MASK,
FF_MODE2_GS_TIMER_224,
~0,
FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
0, false);
if (!IS_DG1(i915)) {
@ -987,6 +980,9 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
int intel_engine_emit_ctx_wa(struct i915_request *rq)
{
struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
struct intel_uncore *uncore = rq->engine->uncore;
enum forcewake_domains fw;
unsigned long flags;
struct i915_wa *wa;
unsigned int i;
u32 *cs;
@ -1003,13 +999,36 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
if (IS_ERR(cs))
return PTR_ERR(cs);
fw = wal_get_fw_for_rmw(uncore, wal);
intel_gt_mcr_lock(wal->gt, &flags);
spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw);
*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
u32 val;
/* Skip reading the register if it's not really needed */
if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
val = wa->set;
} else {
val = wa->is_mcr ?
intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) :
intel_uncore_read_fw(uncore, wa->reg);
val &= ~wa->clr;
val |= wa->set;
}
*cs++ = i915_mmio_reg_offset(wa->reg);
*cs++ = wa->set;
*cs++ = val;
}
*cs++ = MI_NOOP;
intel_uncore_forcewake_put__locked(uncore, fw);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(wal->gt, flags);
intel_ring_advance(rq, cs);
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
@ -1485,6 +1504,18 @@ gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
/*
* Wa_14015795083
*
* Firmware on some gen12 platforms locks the MISCCPCTL register,
* preventing i915 from modifying it for this workaround. Skip the
* readback verification for this workaround on debug builds; if the
* workaround doesn't stick due to firmware behavior, it's not an error
* that we want CI to flag.
*/
wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
0, 0, false);
}
static void
@ -1710,7 +1741,6 @@ static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Wa_14018778641 / Wa_18018781329 */
wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
@ -1743,8 +1773,6 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
* GT, the media GT's versions are regular singleton registers.
*/
wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
wa_write_or(wal, XELPMP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
debug_dump_steering(gt);
}
@ -1850,22 +1878,6 @@ void intel_gt_init_workarounds(struct intel_gt *gt)
wa_init_finish(wal);
}
static enum forcewake_domains
wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
enum forcewake_domains fw = 0;
struct i915_wa *wa;
unsigned int i;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
fw |= intel_uncore_forcewake_for_reg(uncore,
wa->reg,
FW_REG_READ |
FW_REG_WRITE);
return fw;
}
static bool
wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
const char *name, const char *from)
@ -3237,7 +3249,7 @@ wa_list_srm(struct i915_request *rq,
const struct i915_wa_list *wal,
struct i915_vma *vma)
{
struct drm_i915_private *i915 = rq->engine->i915;
struct drm_i915_private *i915 = rq->i915;
unsigned int i, count = 0;
const struct i915_wa *wa;
u32 srm, *cs;
@ -3336,7 +3348,7 @@ retry:
err = 0;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg)))
if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
continue;
if (!wa_verify(wal->gt, wa, results[i], wal->name, from))

View File

@ -62,7 +62,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
return PTR_ERR(cs);
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
if (GRAPHICS_VER(rq->engine->i915) >= 8)
if (GRAPHICS_VER(rq->i915) >= 8)
cmd++;
*cs++ = cmd;
*cs++ = i915_mmio_reg_offset(timestamp_reg(rq->engine));

View File

@ -137,7 +137,7 @@ static int read_mocs_table(struct i915_request *rq,
if (!table)
return 0;
if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
addr = global_mocs_offset() + gt->uncore->gsi_offset;
else
addr = mocs_offset(rq->engine);

View File

@ -140,7 +140,7 @@ static const u32 *__live_rc6_ctx(struct intel_context *ce)
}
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
if (GRAPHICS_VER(rq->engine->i915) >= 8)
if (GRAPHICS_VER(rq->i915) >= 8)
cmd++;
*cs++ = cmd;

View File

@ -459,12 +459,12 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
if (IS_ERR(cs))
return PTR_ERR(cs);
if (GRAPHICS_VER(rq->engine->i915) >= 8) {
if (GRAPHICS_VER(rq->i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = addr;
*cs++ = 0;
*cs++ = value;
} else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
} else if (GRAPHICS_VER(rq->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = addr;

View File

@ -6,6 +6,7 @@
#include "i915_selftest.h"
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gen8_engine_cs.h"
@ -354,7 +355,7 @@ out_a:
static void tlbinv_full(struct i915_address_space *vm, u64 addr, u64 length)
{
intel_gt_invalidate_tlb(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1);
intel_gt_invalidate_tlb_full(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1);
}
static int invalidate_full(void *arg)

View File

@ -8,6 +8,74 @@
#include <linux/types.h>
struct intel_gsc_version {
u16 major;
u16 minor;
u16 hotfix;
u16 build;
} __packed;
struct intel_gsc_partition {
u32 offset;
u32 size;
} __packed;
struct intel_gsc_layout_pointers {
u8 rom_bypass_vector[16];
/* size of pointers layout not including ROM bypass vector */
u16 size;
/*
* bit0: Backup copy of layout pointers exist
* bits1-15: reserved
*/
u8 flags;
u8 reserved;
u32 crc32;
struct intel_gsc_partition datap;
struct intel_gsc_partition boot1;
struct intel_gsc_partition boot2;
struct intel_gsc_partition boot3;
struct intel_gsc_partition boot4;
struct intel_gsc_partition boot5;
struct intel_gsc_partition temp_pages;
} __packed;
/* Boot partition structures */
struct intel_gsc_bpdt_header {
u32 signature;
#define INTEL_GSC_BPDT_HEADER_SIGNATURE 0x000055AA
u16 descriptor_count; /* num of entries after the header */
u8 version;
u8 configuration;
u32 crc32;
u32 build_version;
struct intel_gsc_version tool_version;
} __packed;
struct intel_gsc_bpdt_entry {
/*
* Bits 0-15: BPDT entry type
* Bits 16-17: reserved
* Bit 18: code sub-partition
* Bits 19-31: reserved
*/
u32 type;
#define INTEL_GSC_BPDT_ENTRY_TYPE_MASK GENMASK(15, 0)
#define INTEL_GSC_BPDT_ENTRY_TYPE_GSC_RBE 0x1
u32 sub_partition_offset; /* from the base of the BPDT header */
u32 sub_partition_size;
} __packed;
/* Code partition directory (CPD) structures */
struct intel_gsc_cpd_header_v2 {
u32 header_marker;
@ -44,13 +112,6 @@ struct intel_gsc_cpd_entry {
u8 reserved[4];
} __packed;
struct intel_gsc_version {
u16 major;
u16 minor;
u16 hotfix;
u16 build;
} __packed;
struct intel_gsc_manifest_header {
u32 header_type; /* 0x4 for manifest type */
u32 header_length; /* in dwords */

View File

@ -3,48 +3,216 @@
* Copyright © 2022 Intel Corporation
*/
#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
#include "gt/intel_ring.h"
#include "intel_gsc_binary_headers.h"
#include "intel_gsc_fw.h"
#define GSC_FW_STATUS_REG _MMIO(0x116C40)
#define GSC_FW_CURRENT_STATE REG_GENMASK(3, 0)
#define GSC_FW_CURRENT_STATE_RESET 0
#define GSC_FW_PROXY_STATE_NORMAL 5
#define GSC_FW_INIT_COMPLETE_BIT REG_BIT(9)
#include "intel_gsc_uc_heci_cmd_submit.h"
#include "i915_reg.h"
static bool gsc_is_in_reset(struct intel_uncore *uncore)
{
u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
u32 fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1));
return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
GSC_FW_CURRENT_STATE_RESET;
return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fw_status) ==
HECI1_FWSTS1_CURRENT_STATE_RESET;
}
static u32 gsc_uc_get_fw_status(struct intel_uncore *uncore)
static u32 gsc_uc_get_fw_status(struct intel_uncore *uncore, bool needs_wakeref)
{
intel_wakeref_t wakeref;
u32 fw_status = 0;
with_intel_runtime_pm(uncore->rpm, wakeref)
fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
if (needs_wakeref)
wakeref = intel_runtime_pm_get(uncore->rpm);
fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1));
if (needs_wakeref)
intel_runtime_pm_put(uncore->rpm, wakeref);
return fw_status;
}
bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc)
bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc, bool needs_wakeref)
{
return REG_FIELD_GET(GSC_FW_CURRENT_STATE,
gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore)) ==
GSC_FW_PROXY_STATE_NORMAL;
return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE,
gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore,
needs_wakeref)) ==
HECI1_FWSTS1_PROXY_STATE_NORMAL;
}
int intel_gsc_uc_fw_proxy_get_status(struct intel_gsc_uc *gsc)
{
if (!(IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)))
return -ENODEV;
if (!intel_uc_fw_is_loadable(&gsc->fw))
return -ENODEV;
if (__intel_uc_fw_status(&gsc->fw) == INTEL_UC_FIRMWARE_LOAD_FAIL)
return -ENOLINK;
if (!intel_gsc_uc_fw_proxy_init_done(gsc, true))
return -EAGAIN;
return 0;
}
bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
{
return gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore) & GSC_FW_INIT_COMPLETE_BIT;
return gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore, false) &
HECI1_FWSTS1_INIT_COMPLETE;
}
static inline u32 cpd_entry_offset(const struct intel_gsc_cpd_entry *entry)
{
return entry->offset & INTEL_GSC_CPD_ENTRY_OFFSET_MASK;
}
int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, size_t size)
{
struct intel_gsc_uc *gsc = container_of(gsc_fw, struct intel_gsc_uc, fw);
struct intel_gt *gt = gsc_uc_to_gt(gsc);
const struct intel_gsc_layout_pointers *layout = data;
const struct intel_gsc_bpdt_header *bpdt_header = NULL;
const struct intel_gsc_bpdt_entry *bpdt_entry = NULL;
const struct intel_gsc_cpd_header_v2 *cpd_header = NULL;
const struct intel_gsc_cpd_entry *cpd_entry = NULL;
const struct intel_gsc_manifest_header *manifest;
size_t min_size = sizeof(*layout);
int i;
if (size < min_size) {
gt_err(gt, "GSC FW too small! %zu < %zu\n", size, min_size);
return -ENODATA;
}
/*
* The GSC binary starts with the pointer layout, which contains the
* locations of the various partitions of the binary. The one we're
* interested in to get the version is the boot1 partition, where we can
* find a BPDT header followed by entries, one of which points to the
* RBE sub-section of the partition. From here, we can parse the CPD
* header and the following entries to find the manifest location
* (entry identified by the "RBEP.man" name), from which we can finally
* extract the version.
*
* --------------------------------------------------
* [ intel_gsc_layout_pointers ]
* [ ... ]
* [ boot1.offset >---------------------------]------o
* [ ... ] |
* -------------------------------------------------- |
* |
* -------------------------------------------------- |
* [ intel_gsc_bpdt_header ]<-----o
* --------------------------------------------------
* [ intel_gsc_bpdt_entry[] ]
* [ entry1 ]
* [ ... ]
* [ entryX ]
* [ type == GSC_RBE ]
* [ offset >-----------------------------]------o
* [ ... ] |
* -------------------------------------------------- |
* |
* -------------------------------------------------- |
* [ intel_gsc_cpd_header_v2 ]<-----o
* --------------------------------------------------
* [ intel_gsc_cpd_entry[] ]
* [ entry1 ]
* [ ... ]
* [ entryX ]
* [ "RBEP.man" ]
* [ ... ]
* [ offset >----------------------------]------o
* [ ... ] |
* -------------------------------------------------- |
* |
* -------------------------------------------------- |
* [ intel_gsc_manifest_header ]<-----o
* [ ... ]
* [ intel_gsc_version fw_version ]
* [ ... ]
* --------------------------------------------------
*/
min_size = layout->boot1.offset + layout->boot1.size;
if (size < min_size) {
gt_err(gt, "GSC FW too small for boot section! %zu < %zu\n",
size, min_size);
return -ENODATA;
}
min_size = sizeof(*bpdt_header);
if (layout->boot1.size < min_size) {
gt_err(gt, "GSC FW boot section too small for BPDT header: %u < %zu\n",
layout->boot1.size, min_size);
return -ENODATA;
}
bpdt_header = data + layout->boot1.offset;
if (bpdt_header->signature != INTEL_GSC_BPDT_HEADER_SIGNATURE) {
gt_err(gt, "invalid signature for BPDT header: 0x%08x!\n",
bpdt_header->signature);
return -EINVAL;
}
min_size += sizeof(*bpdt_entry) * bpdt_header->descriptor_count;
if (layout->boot1.size < min_size) {
gt_err(gt, "GSC FW boot section too small for BPDT entries: %u < %zu\n",
layout->boot1.size, min_size);
return -ENODATA;
}
bpdt_entry = (void *)bpdt_header + sizeof(*bpdt_header);
for (i = 0; i < bpdt_header->descriptor_count; i++, bpdt_entry++) {
if ((bpdt_entry->type & INTEL_GSC_BPDT_ENTRY_TYPE_MASK) !=
INTEL_GSC_BPDT_ENTRY_TYPE_GSC_RBE)
continue;
cpd_header = (void *)bpdt_header + bpdt_entry->sub_partition_offset;
min_size = bpdt_entry->sub_partition_offset + sizeof(*cpd_header);
break;
}
if (!cpd_header) {
gt_err(gt, "couldn't find CPD header in GSC binary!\n");
return -ENODATA;
}
if (layout->boot1.size < min_size) {
gt_err(gt, "GSC FW boot section too small for CPD header: %u < %zu\n",
layout->boot1.size, min_size);
return -ENODATA;
}
if (cpd_header->header_marker != INTEL_GSC_CPD_HEADER_MARKER) {
gt_err(gt, "invalid marker for CPD header in GSC bin: 0x%08x!\n",
cpd_header->header_marker);
return -EINVAL;
}
min_size += sizeof(*cpd_entry) * cpd_header->num_of_entries;
if (layout->boot1.size < min_size) {
gt_err(gt, "GSC FW boot section too small for CPD entries: %u < %zu\n",
layout->boot1.size, min_size);
return -ENODATA;
}
cpd_entry = (void *)cpd_header + cpd_header->header_length;
for (i = 0; i < cpd_header->num_of_entries; i++, cpd_entry++) {
if (strcmp(cpd_entry->name, "RBEP.man") == 0) {
manifest = (void *)cpd_header + cpd_entry_offset(cpd_entry);
intel_uc_fw_version_from_gsc_manifest(&gsc->release,
manifest);
gsc->security_version = manifest->security_version;
break;
}
}
return 0;
}
static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
@ -115,38 +283,21 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
void *src, *dst;
void *src;
if (!gsc->local)
return -ENODEV;
obj = gsc->local->obj;
if (obj->base.size < gsc->fw.size)
if (gsc->local->size < gsc->fw.size)
return -ENOSPC;
/*
* Wa_22016122933: For MTL the shared memory needs to be mapped
* as WC on CPU side and UC (PAT index 2) on GPU side
*/
if (IS_METEORLAKE(i915))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
dst = i915_gem_object_pin_map_unlocked(obj,
i915_coherent_map_type(i915, obj, true));
if (IS_ERR(dst))
return PTR_ERR(dst);
src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
i915_coherent_map_type(i915, gsc->fw.obj, true));
if (IS_ERR(src)) {
i915_gem_object_unpin_map(obj);
if (IS_ERR(src))
return PTR_ERR(src);
}
memset(dst, 0, obj->base.size);
memcpy(dst, src, gsc->fw.size);
memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
/*
* Wa_22016122933: Making sure the data in dst is
@ -155,7 +306,6 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
intel_guc_write_barrier(&gt->uc.guc);
i915_gem_object_unpin_map(gsc->fw.obj);
i915_gem_object_unpin_map(obj);
return 0;
}
@ -163,12 +313,94 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
static int gsc_fw_wait(struct intel_gt *gt)
{
return intel_wait_for_register(gt->uncore,
GSC_FW_STATUS_REG,
GSC_FW_INIT_COMPLETE_BIT,
GSC_FW_INIT_COMPLETE_BIT,
HECI_FWSTS(MTL_GSC_HECI1_BASE, 1),
HECI1_FWSTS1_INIT_COMPLETE,
HECI1_FWSTS1_INIT_COMPLETE,
500);
}
struct intel_gsc_mkhi_header {
u8 group_id;
#define MKHI_GROUP_ID_GFX_SRV 0x30
u8 command;
#define MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION (0x42)
u8 reserved;
u8 result;
} __packed;
struct mtl_gsc_ver_msg_in {
struct intel_gsc_mtl_header header;
struct intel_gsc_mkhi_header mkhi;
} __packed;
struct mtl_gsc_ver_msg_out {
struct intel_gsc_mtl_header header;
struct intel_gsc_mkhi_header mkhi;
u16 proj_major;
u16 compat_major;
u16 compat_minor;
u16 reserved[5];
} __packed;
#define GSC_VER_PKT_SZ SZ_4K
static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct mtl_gsc_ver_msg_in *msg_in;
struct mtl_gsc_ver_msg_out *msg_out;
struct i915_vma *vma;
u64 offset;
void *vaddr;
int err;
err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_VER_PKT_SZ * 2,
&vma, &vaddr);
if (err) {
gt_err(gt, "failed to allocate vma for GSC version query\n");
return err;
}
offset = i915_ggtt_offset(vma);
msg_in = vaddr;
msg_out = vaddr + GSC_VER_PKT_SZ;
intel_gsc_uc_heci_cmd_emit_mtl_header(&msg_in->header,
HECI_MEADDRESS_MKHI,
sizeof(*msg_in), 0);
msg_in->mkhi.group_id = MKHI_GROUP_ID_GFX_SRV;
msg_in->mkhi.command = MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION;
err = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc,
offset,
sizeof(*msg_in),
offset + GSC_VER_PKT_SZ,
GSC_VER_PKT_SZ);
if (err) {
gt_err(gt,
"failed to submit GSC request for compatibility version: %d\n",
err);
goto out_vma;
}
if (msg_out->header.message_size != sizeof(*msg_out)) {
gt_err(gt, "invalid GSC reply length %u [expected %zu], s=0x%x, f=0x%x, r=0x%x\n",
msg_out->header.message_size, sizeof(*msg_out),
msg_out->header.status, msg_out->header.flags, msg_out->mkhi.result);
err = -EPROTO;
goto out_vma;
}
gsc->fw.file_selected.ver.major = msg_out->compat_major;
gsc->fw.file_selected.ver.minor = msg_out->compat_minor;
out_vma:
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
return err;
}
int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
@ -226,10 +458,24 @@ int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
if (err)
goto fail;
err = gsc_fw_query_compatibility_version(gsc);
if (err)
goto fail;
/* we only support compatibility version 1.0 at the moment */
err = intel_uc_check_file_version(gsc_fw, NULL);
if (err)
goto fail;
/* FW is not fully operational until we enable SW proxy */
intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
gt_info(gt, "Loaded GSC firmware %s\n", gsc_fw->file_selected.path);
gt_info(gt, "Loaded GSC firmware %s (cv%u.%u, r%u.%u.%u.%u, svn %u)\n",
gsc_fw->file_selected.path,
gsc_fw->file_selected.ver.major, gsc_fw->file_selected.ver.minor,
gsc->release.major, gsc->release.minor,
gsc->release.patch, gsc->release.build,
gsc->security_version);
return 0;

View File

@ -9,10 +9,13 @@
#include <linux/types.h>
struct intel_gsc_uc;
struct intel_uc_fw;
struct intel_uncore;
int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, size_t size);
int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc);
bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc);
bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc);
bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc, bool needs_wakeref);
int intel_gsc_uc_fw_proxy_get_status(struct intel_gsc_uc *gsc);
#endif

View File

@ -7,10 +7,11 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
#include "intel_gsc_uc.h"
#include "intel_gsc_fw.h"
#include "i915_drv.h"
#include "intel_gsc_proxy.h"
#include "intel_gsc_uc.h"
#include "i915_drv.h"
#include "i915_reg.h"
static void gsc_work(struct work_struct *work)
{
@ -61,8 +62,18 @@ static void gsc_work(struct work_struct *work)
}
ret = intel_gsc_proxy_request_handler(gsc);
if (ret)
if (ret) {
if (actions & GSC_ACTION_FW_LOAD) {
/*
* A proxy failure right after firmware load means the proxy-init
* step has failed so mark GSC as not usable after this
*/
drm_err(&gt->i915->drm,
"GSC proxy handler failed to init\n");
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
}
goto out_put;
}
/* mark the GSC FW init as done the first time we run this */
if (actions & GSC_ACTION_FW_LOAD) {
@ -71,12 +82,13 @@ static void gsc_work(struct work_struct *work)
* complete the request handling cleanly, so we need to check the
* status register to check if the proxy init was actually successful
*/
if (intel_gsc_uc_fw_proxy_init_done(gsc)) {
if (intel_gsc_uc_fw_proxy_init_done(gsc, false)) {
drm_dbg(&gt->i915->drm, "GSC Proxy initialized\n");
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_RUNNING);
} else {
drm_err(&gt->i915->drm,
"GSC status reports proxy init not complete\n");
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
}
}
}
@ -133,26 +145,85 @@ void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc)
}
}
static int gsc_allocate_and_map_vma(struct intel_gsc_uc *gsc, u32 size)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
void __iomem *vaddr;
int ret = 0;
/*
* The GSC FW doesn't immediately suspend after becoming idle, so there
* is a chance that it could still be awake after we successfully
* return from the pci suspend function, even if there are no pending
* operations.
* The FW might therefore try to access memory for its suspend operation
* after the kernel has completed the HW suspend flow; this can cause
* issues if the FW is mapped in normal RAM memory, as some of the
* involved HW units might've already lost power.
* The driver must therefore avoid this situation and the recommended
* way to do so is to use stolen memory for the GSC memory allocation,
* because stolen memory takes a different path in HW and it is
* guaranteed to always work as long as the GPU itself is awake (which
* it must be if the GSC is awake).
*/
obj = i915_gem_object_create_stolen(gt->i915, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
}
vaddr = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto err;
}
i915_vma_make_unshrinkable(vma);
gsc->local = vma;
gsc->local_vaddr = vaddr;
return 0;
err:
i915_gem_object_put(obj);
return ret;
}
static void gsc_unmap_and_free_vma(struct intel_gsc_uc *gsc)
{
struct i915_vma *vma = fetch_and_zero(&gsc->local);
if (!vma)
return;
gsc->local_vaddr = NULL;
i915_vma_unpin_iomap(vma);
i915_gem_object_put(vma->obj);
}
int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
{
static struct lock_class_key gsc_lock;
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct intel_engine_cs *engine = gt->engine[GSC0];
struct intel_context *ce;
struct i915_vma *vma;
int err;
err = intel_uc_fw_init(&gsc->fw);
if (err)
goto out;
vma = intel_guc_allocate_vma(&gt->uc.guc, SZ_8M);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
err = gsc_allocate_and_map_vma(gsc, SZ_4M);
if (err)
goto out_fw;
}
gsc->local = vma;
ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
I915_GEM_HWS_GSC_ADDR,
@ -173,7 +244,7 @@ int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
return 0;
out_vma:
i915_vma_unpin_and_release(&gsc->local, 0);
gsc_unmap_and_free_vma(gsc);
out_fw:
intel_uc_fw_fini(&gsc->fw);
out:
@ -197,7 +268,7 @@ void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
if (gsc->ce)
intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce));
i915_vma_unpin_and_release(&gsc->local, 0);
gsc_unmap_and_free_vma(gsc);
intel_uc_fw_fini(&gsc->fw);
}
@ -245,3 +316,45 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
queue_work(gsc->wq, &gsc->work);
}
void intel_gsc_uc_load_status(struct intel_gsc_uc *gsc, struct drm_printer *p)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
if (!intel_gsc_uc_is_supported(gsc)) {
drm_printf(p, "GSC not supported\n");
return;
}
if (!intel_gsc_uc_is_wanted(gsc)) {
drm_printf(p, "GSC disabled\n");
return;
}
drm_printf(p, "GSC firmware: %s\n", gsc->fw.file_selected.path);
if (gsc->fw.file_selected.path != gsc->fw.file_wanted.path)
drm_printf(p, "GSC firmware wanted: %s\n", gsc->fw.file_wanted.path);
drm_printf(p, "\tstatus: %s\n", intel_uc_fw_status_repr(gsc->fw.status));
drm_printf(p, "Release: %u.%u.%u.%u\n",
gsc->release.major, gsc->release.minor,
gsc->release.patch, gsc->release.build);
drm_printf(p, "Compatibility Version: %u.%u [min expected %u.%u]\n",
gsc->fw.file_selected.ver.major, gsc->fw.file_selected.ver.minor,
gsc->fw.file_wanted.ver.major, gsc->fw.file_wanted.ver.minor);
drm_printf(p, "SVN: %u\n", gsc->security_version);
with_intel_runtime_pm(uncore->rpm, wakeref) {
u32 i;
for (i = 1; i <= 6; i++) {
u32 status = intel_uncore_read(uncore,
HECI_FWSTS(MTL_GSC_HECI1_BASE, i));
drm_printf(p, "HECI1 FWSTST%u = 0x%08x\n", i, status);
}
}
}

View File

@ -8,6 +8,7 @@
#include "intel_uc_fw.h"
struct drm_printer;
struct i915_vma;
struct intel_context;
struct i915_gsc_proxy_component;
@ -17,7 +18,26 @@ struct intel_gsc_uc {
struct intel_uc_fw fw;
/* GSC-specific additions */
/*
* The GSC has 3 version numbers:
* - Release version (incremented with each build)
* - Security version (incremented on security fix)
* - Compatibility version (incremented on interface change)
*
* The one we care about to use the binary is the last one, so that's
* the one we save inside the intel_uc_fw structure. The other two
* versions are only used for debug/info purposes, so we save them here.
*
* Note that the release and security versions are available in the
* binary header, while the compatibility version must be queried after
* loading the binary.
*/
struct intel_uc_fw_ver release;
u32 security_version;
struct i915_vma *local; /* private memory for GSC usage */
void __iomem *local_vaddr; /* pointer to access the private memory */
struct intel_context *ce; /* for submission to GSC FW via GSC engine */
/* for delayed load and proxy handling */
@ -44,6 +64,7 @@ void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc);
void intel_gsc_uc_resume(struct intel_gsc_uc *gsc);
void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc);
void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc);
void intel_gsc_uc_load_status(struct intel_gsc_uc *gsc, struct drm_printer *p);
static inline bool intel_gsc_uc_is_supported(struct intel_gsc_uc *gsc)
{

View File

@ -0,0 +1,39 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <drm/drm_print.h>
#include "gt/intel_gt.h"
#include "gt/intel_gt_debugfs.h"
#include "gt/intel_gt_print.h"
#include "intel_gsc_uc.h"
#include "intel_gsc_uc_debugfs.h"
#include "i915_drv.h"
static int gsc_info_show(struct seq_file *m, void *data)
{
struct drm_printer p = drm_seq_file_printer(m);
struct intel_gsc_uc *gsc = m->private;
if (!intel_gsc_uc_is_supported(gsc))
return -ENODEV;
intel_gsc_uc_load_status(gsc, &p);
return 0;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(gsc_info);
void intel_gsc_uc_debugfs_register(struct intel_gsc_uc *gsc_uc, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "gsc_info", &gsc_info_fops, NULL },
};
if (!intel_gsc_uc_is_supported(gsc_uc))
return;
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gsc_uc);
}

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef DEBUGFS_GSC_UC_H
#define DEBUGFS_GSC_UC_H
struct intel_gsc_uc;
struct dentry;
void intel_gsc_uc_debugfs_register(struct intel_gsc_uc *gsc, struct dentry *root);
#endif /* DEBUGFS_GSC_UC_H */

View File

@ -17,6 +17,7 @@ struct intel_gsc_mtl_header {
#define GSC_HECI_VALIDITY_MARKER 0xA578875A
u8 heci_client_id;
#define HECI_MEADDRESS_MKHI 7
#define HECI_MEADDRESS_PROXY 10
#define HECI_MEADDRESS_PXP 17
#define HECI_MEADDRESS_HDCP 18

View File

@ -251,9 +251,11 @@ static int guc_wait_ucode(struct intel_guc *guc)
if (ret == 0)
ret = -ENXIO;
} else if (delta_ms > 200) {
guc_warn(guc, "excessive init time: %lldms! [freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps),
before_freq, status, count, ret);
guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, status, count, ret);
guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
intel_rps_read_actual_frequency(&uncore->gt->rps), before_freq,
intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps),

View File

@ -470,12 +470,19 @@ int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
ret = slpc_set_param(slpc,
SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
val);
if (ret)
if (ret) {
guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
val, ERR_PTR(ret));
else
} else {
slpc->ignore_eff_freq = val;
/* Set min to RPn when we disable efficient freq */
if (val)
ret = slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
slpc->min_freq);
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&slpc->lock);
return ret;
@ -602,9 +609,8 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
return ret;
if (!slpc->min_freq_softlimit) {
ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit);
if (unlikely(ret))
return ret;
/* Min softlimit is initialized to RPn */
slpc->min_freq_softlimit = slpc->min_freq;
slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
} else {
return intel_guc_slpc_set_min_freq(slpc,
@ -755,6 +761,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
return ret;
}
/* Set cached value of ignore efficient freq */
intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
/* Revert SLPC min/max to softlimits if necessary */
ret = slpc_set_softlimits(slpc);
if (unlikely(ret)) {
@ -765,9 +774,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set cached media freq ratio mode */
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
/* Set cached value of ignore efficient freq */
intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
return 0;
}

View File

@ -310,9 +310,9 @@ void intel_huc_init_early(struct intel_huc *huc)
huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HUC_LOAD_SUCCESSFUL;
huc->status[INTEL_HUC_AUTH_BY_GSC].value = HUC_LOAD_SUCCESSFUL;
} else {
huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS5(MTL_GSC_HECI1_BASE);
huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI_FWSTS5_HUC_AUTH_DONE;
huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI_FWSTS5_HUC_AUTH_DONE;
huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS(MTL_GSC_HECI1_BASE, 5);
huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI1_FWSTS5_HUC_AUTH_DONE;
huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI1_FWSTS5_HUC_AUTH_DONE;
}
}

View File

@ -107,15 +107,6 @@ out_unpin:
return err;
}
static void get_version_from_gsc_manifest(struct intel_uc_fw_ver *ver, const void *data)
{
const struct intel_gsc_manifest_header *manifest = data;
ver->major = manifest->fw_version.major;
ver->minor = manifest->fw_version.minor;
ver->patch = manifest->fw_version.hotfix;
}
static bool css_valid(const void *data, size_t size)
{
const struct uc_css_header *css = data;
@ -227,8 +218,8 @@ int intel_huc_fw_get_binary_info(struct intel_uc_fw *huc_fw, const void *data, s
for (i = 0; i < header->num_of_entries; i++, entry++) {
if (strcmp(entry->name, "HUCP.man") == 0)
get_version_from_gsc_manifest(&huc_fw->file_selected.ver,
data + entry_offset(entry));
intel_uc_fw_version_from_gsc_manifest(&huc_fw->file_selected.ver,
data + entry_offset(entry));
if (strcmp(entry->name, "huc_fw") == 0) {
u32 offset = entry_offset(entry);

View File

@ -10,6 +10,7 @@
#include "gt/intel_gt_debugfs.h"
#include "intel_guc_debugfs.h"
#include "intel_gsc_uc_debugfs.h"
#include "intel_huc_debugfs.h"
#include "intel_uc.h"
#include "intel_uc_debugfs.h"
@ -58,6 +59,7 @@ void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc);
intel_gsc_uc_debugfs_register(&uc->gsc, root);
intel_guc_debugfs_register(&uc->guc, root);
intel_huc_debugfs_register(&uc->huc, root);
}

View File

@ -12,6 +12,8 @@
#include "gem/i915_gem_lmem.h"
#include "gt/intel_gt_print.h"
#include "intel_gsc_binary_headers.h"
#include "intel_gsc_fw.h"
#include "intel_uc_fw.h"
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
@ -468,6 +470,17 @@ static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc
}
}
void intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver *ver,
const void *data)
{
const struct intel_gsc_manifest_header *manifest = data;
ver->major = manifest->fw_version.major;
ver->minor = manifest->fw_version.minor;
ver->patch = manifest->fw_version.hotfix;
ver->build = manifest->fw_version.build;
}
/**
* intel_uc_fw_init_early - initialize the uC object and select the firmware
* @uc_fw: uC firmware
@ -668,13 +681,18 @@ static int check_gsc_manifest(struct intel_gt *gt,
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
if (uc_fw->type != INTEL_UC_FW_TYPE_HUC) {
gt_err(gt, "trying to GSC-parse a non-HuC binary");
switch (uc_fw->type) {
case INTEL_UC_FW_TYPE_HUC:
intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
break;
case INTEL_UC_FW_TYPE_GSC:
intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size);
break;
default:
MISSING_CASE(uc_fw->type);
return -EINVAL;
}
intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
if (uc_fw->dma_start_offset) {
u32 delta = uc_fw->dma_start_offset;
@ -734,10 +752,6 @@ static int check_fw_header(struct intel_gt *gt,
{
int err = 0;
/* GSC FW version is queried after the FW is loaded */
if (uc_fw->type == INTEL_UC_FW_TYPE_GSC)
return 0;
if (uc_fw->has_gsc_headers)
err = check_gsc_manifest(gt, fw, uc_fw);
else
@ -773,6 +787,80 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **
return 0;
}
static int check_mtl_huc_guc_compatibility(struct intel_gt *gt,
struct intel_uc_fw_file *huc_selected)
{
struct intel_uc_fw_file *guc_selected = &gt->uc.guc.fw.file_selected;
struct intel_uc_fw_ver *huc_ver = &huc_selected->ver;
struct intel_uc_fw_ver *guc_ver = &guc_selected->ver;
bool new_huc, new_guc;
/* we can only do this check after having fetched both GuC and HuC */
GEM_BUG_ON(!huc_selected->path || !guc_selected->path);
/*
* Due to changes in the authentication flow for MTL, HuC 8.5.1 or newer
* requires GuC 70.7.0 or newer. Older HuC binaries will instead require
* GuC < 70.7.0.
*/
new_huc = huc_ver->major > 8 ||
(huc_ver->major == 8 && huc_ver->minor > 5) ||
(huc_ver->major == 8 && huc_ver->minor == 5 && huc_ver->patch >= 1);
new_guc = guc_ver->major > 70 ||
(guc_ver->major == 70 && guc_ver->minor >= 7);
if (new_huc != new_guc) {
UNEXPECTED(gt, "HuC %u.%u.%u is incompatible with GuC %u.%u.%u\n",
huc_ver->major, huc_ver->minor, huc_ver->patch,
guc_ver->major, guc_ver->minor, guc_ver->patch);
gt_info(gt, "MTL GuC 70.7.0+ and HuC 8.5.1+ don't work with older releases\n");
return -ENOEXEC;
}
return 0;
}
int intel_uc_check_file_version(struct intel_uc_fw *uc_fw, bool *old_ver)
{
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct intel_uc_fw_file *wanted = &uc_fw->file_wanted;
struct intel_uc_fw_file *selected = &uc_fw->file_selected;
int ret;
/*
* MTL has some compatibility issues with early GuC/HuC binaries
* not working with newer ones. This is specific to MTL and we
* don't expect it to extend to other platforms.
*/
if (IS_METEORLAKE(gt->i915) && uc_fw->type == INTEL_UC_FW_TYPE_HUC) {
ret = check_mtl_huc_guc_compatibility(gt, selected);
if (ret)
return ret;
}
if (!wanted->ver.major || !selected->ver.major)
return 0;
/* Check the file's major version was as it claimed */
if (selected->ver.major != wanted->ver.major) {
UNEXPECTED(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
intel_uc_fw_type_repr(uc_fw->type), selected->path,
selected->ver.major, selected->ver.minor,
wanted->ver.major, wanted->ver.minor);
if (!intel_uc_fw_is_overridden(uc_fw))
return -ENOEXEC;
} else if (old_ver) {
if (selected->ver.minor < wanted->ver.minor)
*old_ver = true;
else if ((selected->ver.minor == wanted->ver.minor) &&
(selected->ver.patch < wanted->ver.patch))
*old_ver = true;
}
return 0;
}
/**
* intel_uc_fw_fetch - fetch uC firmware
* @uc_fw: uC firmware
@ -840,25 +928,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
goto fail;
}
if (uc_fw->file_wanted.ver.major && uc_fw->file_selected.ver.major) {
/* Check the file's major version was as it claimed */
if (uc_fw->file_selected.ver.major != uc_fw->file_wanted.ver.major) {
UNEXPECTED(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
if (!intel_uc_fw_is_overridden(uc_fw)) {
err = -ENOEXEC;
goto fail;
}
} else {
if (uc_fw->file_selected.ver.minor < uc_fw->file_wanted.ver.minor)
old_ver = true;
else if ((uc_fw->file_selected.ver.minor == uc_fw->file_wanted.ver.minor) &&
(uc_fw->file_selected.ver.patch < uc_fw->file_wanted.ver.patch))
old_ver = true;
}
}
err = intel_uc_check_file_version(uc_fw, &old_ver);
if (err)
goto fail;
if (old_ver && uc_fw->file_selected.ver.major) {
/* Preserve the version that was really wanted */

View File

@ -70,6 +70,7 @@ struct intel_uc_fw_ver {
u32 major;
u32 minor;
u32 patch;
u32 build;
};
/*
@ -289,6 +290,9 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
return __intel_uc_fw_get_upload_size(uc_fw);
}
void intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver *ver,
const void *data);
int intel_uc_check_file_version(struct intel_uc_fw *uc_fw, bool *old_ver);
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_type type,
bool needs_ggtt_mapping);

View File

@ -974,7 +974,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0)
if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
context_page_num = 19;
context_base = (void *) ctx->lrc_reg_state -

View File

@ -449,8 +449,11 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
}
} while (unlikely(is_barrier(active)));
if (!__i915_active_fence_set(active, fence))
fence = __i915_active_fence_set(active, fence);
if (!fence)
__i915_active_acquire(ref);
else
dma_fence_put(fence);
out:
i915_active_release(ref);
@ -469,13 +472,9 @@ __i915_active_set_fence(struct i915_active *ref,
return NULL;
}
rcu_read_lock();
prev = __i915_active_fence_set(active, fence);
if (prev)
prev = dma_fence_get_rcu(prev);
else
if (!prev)
__i915_active_acquire(ref);
rcu_read_unlock();
return prev;
}
@ -1019,10 +1018,11 @@ void i915_request_add_active_barriers(struct i915_request *rq)
*
* Records the new @fence as the last active fence along its timeline in
* this active tracker, moving the tracking callbacks from the previous
* fence onto this one. Returns the previous fence (if not already completed),
* which the caller must ensure is executed before the new fence. To ensure
* that the order of fences within the timeline of the i915_active_fence is
* understood, it should be locked by the caller.
* fence onto this one. Gets and returns a reference to the previous fence
* (if not already completed), which the caller must put after making sure
* that it is executed before the new fence. To ensure that the order of
* fences within the timeline of the i915_active_fence is understood, it
* should be locked by the caller.
*/
struct dma_fence *
__i915_active_fence_set(struct i915_active_fence *active,
@ -1031,7 +1031,23 @@ __i915_active_fence_set(struct i915_active_fence *active,
struct dma_fence *prev;
unsigned long flags;
if (fence == rcu_access_pointer(active->fence))
/*
* In case of fences embedded in i915_requests, their memory is
* SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
* by new requests. Then, there is a risk of passing back a pointer
* to a new, completely unrelated fence that reuses the same memory
* while tracked under a different active tracker. Combined with i915
* perf open/close operations that build await dependencies between
* engine kernel context requests and user requests from different
* timelines, this can lead to dependency loops and infinite waits.
*
* As a countermeasure, we try to get a reference to the active->fence
* first, so if we succeed and pass it back to our user then it is not
* released and potentially reused by an unrelated request before the
* user has a chance to set up an await dependency on it.
*/
prev = i915_active_fence_get(active);
if (fence == prev)
return fence;
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
@ -1040,27 +1056,56 @@ __i915_active_fence_set(struct i915_active_fence *active,
* Consider that we have two threads arriving (A and B), with
* C already resident as the active->fence.
*
* A does the xchg first, and so it sees C or NULL depending
* on the timing of the interrupt handler. If it is NULL, the
* previous fence must have been signaled and we know that
* we are first on the timeline. If it is still present,
* we acquire the lock on that fence and serialise with the interrupt
* handler, in the process removing it from any future interrupt
* callback. A will then wait on C before executing (if present).
*
* As B is second, it sees A as the previous fence and so waits for
* it to complete its transition and takes over the occupancy for
* itself -- remembering that it needs to wait on A before executing.
* Both A and B have got a reference to C or NULL, depending on the
* timing of the interrupt handler. Let's assume that if A has got C
* then it has locked C first (before B).
*
* Note the strong ordering of the timeline also provides consistent
* nesting rules for the fence->lock; the inner lock is always the
* older lock.
*/
spin_lock_irqsave(fence->lock, flags);
prev = xchg(__active_fence_slot(active), fence);
if (prev) {
GEM_BUG_ON(prev == fence);
if (prev)
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
/*
* A does the cmpxchg first, and so it sees C or NULL, as before, or
* something else, depending on the timing of other threads and/or
* interrupt handler. If not the same as before then A unlocks C if
* applicable and retries, starting from an attempt to get a new
* active->fence. Meanwhile, B follows the same path as A.
* Once A succeeds with cmpxch, B fails again, retires, gets A from
* active->fence, locks it as soon as A completes, and possibly
* succeeds with cmpxchg.
*/
while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
if (prev) {
spin_unlock(prev->lock);
dma_fence_put(prev);
}
spin_unlock_irqrestore(fence->lock, flags);
prev = i915_active_fence_get(active);
GEM_BUG_ON(prev == fence);
spin_lock_irqsave(fence->lock, flags);
if (prev)
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
}
/*
* If prev is NULL then the previous fence must have been signaled
* and we know that we are first on the timeline. If it is still
* present then, having the lock on that fence already acquired, we
* serialise with the interrupt handler, in the process of removing it
* from any future interrupt callback. A will then wait on C before
* executing (if present).
*
* As B is second, it sees A as the previous fence and so waits for
* it to complete its transition and takes over the occupancy for
* itself -- remembering that it needs to wait on A before executing.
*/
if (prev) {
__list_del_entry(&active->cb.node);
spin_unlock(prev->lock); /* serialise with prev->cb_list */
}
@ -1077,11 +1122,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
int err = 0;
/* Must maintain timeline ordering wrt previous active requests */
rcu_read_lock();
fence = __i915_active_fence_set(active, &rq->fence);
if (fence) /* but the previous fence may not belong to that timeline! */
fence = dma_fence_get_rcu(fence);
rcu_read_unlock();
if (fence) {
err = i915_request_await_dma_fence(rq, fence);
dma_fence_put(fence);

View File

@ -323,7 +323,6 @@ struct drm_i915_private {
/*
* i915->gt[0] == &i915->gt0
*/
#define I915_MAX_GT 2
struct intel_gt *gt[I915_MAX_GT];
struct kobject *sysfs_gt;

View File

@ -1175,9 +1175,9 @@ i915_vma_coredump_create(const struct intel_gt *gt,
drm_clflush_pages(&page, 1);
s = kmap(page);
s = kmap_local_page(page);
ret = compress_page(compress, s, dst, false);
kunmap(page);
kunmap_local(s);
drm_clflush_pages(&page, 1);

View File

@ -1319,7 +1319,7 @@ __store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
u32 *cs, cmd;
cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
if (GRAPHICS_VER(rq->engine->i915) >= 8)
if (GRAPHICS_VER(rq->i915) >= 8)
cmd++;
cs = intel_ring_begin(rq, 4);
@ -4431,6 +4431,7 @@ static const struct i915_range mtl_oam_b_counters[] = {
static const struct i915_range xehp_oa_b_counters[] = {
{ .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */
{ .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */
{}
};
static const struct i915_range gen7_oa_mux_regs[] = {

View File

@ -941,8 +941,30 @@
#define HECI_H_GS1(base) _MMIO((base) + 0xc4c)
#define HECI_H_GS1_ER_PREP REG_BIT(0)
#define HECI_FWSTS5(base) _MMIO((base) + 0xc68)
#define HECI_FWSTS5_HUC_AUTH_DONE (1 << 19)
/*
* The FWSTS register values are FW defined and can be different between
* HECI1 and HECI2
*/
#define HECI_FWSTS1 0xc40
#define HECI1_FWSTS1_CURRENT_STATE REG_GENMASK(3, 0)
#define HECI1_FWSTS1_CURRENT_STATE_RESET 0
#define HECI1_FWSTS1_PROXY_STATE_NORMAL 5
#define HECI1_FWSTS1_INIT_COMPLETE REG_BIT(9)
#define HECI_FWSTS2 0xc48
#define HECI_FWSTS3 0xc60
#define HECI_FWSTS4 0xc64
#define HECI_FWSTS5 0xc68
#define HECI1_FWSTS5_HUC_AUTH_DONE (1 << 19)
#define HECI_FWSTS6 0xc6c
/* the FWSTS regs are 1-based, so we use -base for index 0 to get an invalid reg */
#define HECI_FWSTS(base, x) _MMIO((base) + _PICK(x, -(base), \
HECI_FWSTS1, \
HECI_FWSTS2, \
HECI_FWSTS3, \
HECI_FWSTS4, \
HECI_FWSTS5, \
HECI_FWSTS6))
#define HSW_GTT_CACHE_EN _MMIO(0x4024)
#define GTT_CACHE_EN_ALL 0xF0007FFF

View File

@ -1353,7 +1353,7 @@ __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
{
mark_external(rq);
return i915_sw_fence_await_dma_fence(&rq->submit, fence,
i915_fence_context_timeout(rq->engine->i915,
i915_fence_context_timeout(rq->i915,
fence->context),
I915_FENCE_GFP);
}
@ -1661,6 +1661,11 @@ __i915_request_ensure_parallel_ordering(struct i915_request *rq,
request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
/*
* Users have to put a reference potentially got by
* __i915_active_fence_set() to the returned request
* when no longer needed
*/
return to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
}
@ -1707,6 +1712,10 @@ __i915_request_ensure_ordering(struct i915_request *rq,
0);
}
/*
* Users have to put the reference to prev potentially got
* by __i915_active_fence_set() when no longer needed
*/
return prev;
}
@ -1760,6 +1769,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
prev = __i915_request_ensure_ordering(rq, timeline);
else
prev = __i915_request_ensure_parallel_ordering(rq, timeline);
if (prev)
i915_request_put(prev);
/*
* Make sure that no request gazumped us - if it was allocated after

View File

@ -277,7 +277,7 @@ TRACE_EVENT(i915_request_queue,
),
TP_fast_assign(
__entry->dev = rq->engine->i915->drm.primary->index;
__entry->dev = rq->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@ -304,7 +304,7 @@ DECLARE_EVENT_CLASS(i915_request,
),
TP_fast_assign(
__entry->dev = rq->engine->i915->drm.primary->index;
__entry->dev = rq->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@ -353,7 +353,7 @@ TRACE_EVENT(i915_request_in,
),
TP_fast_assign(
__entry->dev = rq->engine->i915->drm.primary->index;
__entry->dev = rq->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@ -382,7 +382,7 @@ TRACE_EVENT(i915_request_out,
),
TP_fast_assign(
__entry->dev = rq->engine->i915->drm.primary->index;
__entry->dev = rq->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@ -623,7 +623,7 @@ TRACE_EVENT(i915_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
__entry->dev = rq->engine->i915->drm.primary->index;
__entry->dev = rq->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;

View File

@ -34,6 +34,7 @@
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_tlb.h"
#include "i915_drv.h"
#include "i915_gem_evict.h"
@ -1339,6 +1340,12 @@ err_unpin:
void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
{
struct intel_gt *gt;
int id;
if (!tlb)
return;
/*
* Before we release the pages that were bound by this vma, we
* must invalidate all the TLBs that may still have a reference
@ -1347,7 +1354,9 @@ void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
* the most recent TLB invalidation seqno, and if we have not yet
* flushed the TLBs upon release, perform a full invalidation.
*/
WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
for_each_gt(gt, vm->i915, id)
WRITE_ONCE(tlb[id],
intel_gt_next_invalidate_tlb_full(vm->gt));
}
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
@ -2014,7 +2023,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
if (async)
unbind_fence = i915_vma_resource_unbind(vma_res,
&vma->obj->mm.tlb);
vma->obj->mm.tlb);
else
unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
@ -2031,7 +2040,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
dma_fence_put(unbind_fence);
unbind_fence = NULL;
}
vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
}
/*

View File

@ -162,8 +162,8 @@ static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i9
* for HuC authentication. For now, its limited to DG2.
*/
if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) &&
intel_huc_is_loaded_by_gsc(&i915->gt0.uc.huc) && intel_uc_uses_huc(&i915->gt0.uc))
return &i915->gt0;
intel_huc_is_loaded_by_gsc(&to_gt(i915)->uc.huc) && intel_uc_uses_huc(&to_gt(i915)->uc))
return to_gt(i915);
return NULL;
}
@ -188,8 +188,8 @@ static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_p
* Else we rely on mei-pxp module but only on legacy platforms
* prior to having separate media GTs and has a valid VDBOX.
*/
if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(&i915->gt0))
return &i915->gt0;
if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(to_gt(i915)))
return to_gt(i915);
return NULL;
}

View File

@ -197,7 +197,7 @@ bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
* are out of order) will suffice.
*/
if (intel_huc_is_authenticated(&pxp->ctrl_gt->uc.huc, INTEL_HUC_AUTH_BY_GSC) &&
intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc))
intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc, true))
return true;
return false;

View File

@ -168,7 +168,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
return PTR_ERR(cs);
len = 5;
if (GRAPHICS_VER(rq->engine->i915) >= 8)
if (GRAPHICS_VER(rq->i915) >= 8)
len++;
*cs++ = GFX_OP_PIPE_CONTROL(len);

View File

@ -24,6 +24,8 @@
#include <linux/random.h>
#include "gt/intel_gt_pm.h"
#include "gt/uc/intel_gsc_fw.h"
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_selftest.h"
@ -127,6 +129,31 @@ static void set_default_test_all(struct selftest *st, unsigned int count)
st[i].enabled = true;
}
static bool
__gsc_proxy_init_progressing(struct intel_gsc_uc *gsc)
{
return intel_gsc_uc_fw_proxy_get_status(gsc) == -EAGAIN;
}
static void
__wait_gsc_proxy_completed(struct drm_i915_private *i915)
{
bool need_to_wait = (IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY) &&
i915->media_gt &&
HAS_ENGINE(i915->media_gt, GSC0) &&
intel_uc_fw_is_loadable(&i915->media_gt->uc.gsc.fw));
/*
* The gsc proxy component depends on the kernel component driver load ordering
* and in corner cases (the first time after an IFWI flash), init-completion
* firmware flows take longer.
*/
unsigned long timeout_ms = 8000;
if (need_to_wait && wait_for(!__gsc_proxy_init_progressing(&i915->media_gt->uc.gsc),
timeout_ms))
pr_warn(DRIVER_NAME "Timed out waiting for gsc_proxy_completion!\n");
}
static int __run_selftests(const char *name,
struct selftest *st,
unsigned int count,
@ -206,6 +233,8 @@ int i915_live_selftests(struct pci_dev *pdev)
if (!i915_selftest.live)
return 0;
__wait_gsc_proxy_completed(pdev_to_i915(pdev));
err = run_selftests(live, pdev_to_i915(pdev));
if (err) {
i915_selftest.live = err;
@ -227,6 +256,8 @@ int i915_perf_selftests(struct pci_dev *pdev)
if (!i915_selftest.perf)
return 0;
__wait_gsc_proxy_completed(pdev_to_i915(pdev));
err = run_selftests(perf, pdev_to_i915(pdev));
if (err) {
i915_selftest.perf = err;

View File

@ -159,15 +159,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
batch = spin->batch;
if (GRAPHICS_VER(rq->engine->i915) >= 8) {
if (GRAPHICS_VER(rq->i915) >= 8) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
} else if (GRAPHICS_VER(rq->engine->i915) >= 6) {
} else if (GRAPHICS_VER(rq->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
*batch++ = hws_address(hws, rq);
} else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
} else if (GRAPHICS_VER(rq->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = hws_address(hws, rq);
@ -179,11 +179,11 @@ igt_spinner_create_request(struct igt_spinner *spin,
*batch++ = arbitration_command;
if (GRAPHICS_VER(rq->engine->i915) >= 8)
if (GRAPHICS_VER(rq->i915) >= 8)
*batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
else if (IS_HASWELL(rq->engine->i915))
else if (IS_HASWELL(rq->i915))
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
else if (GRAPHICS_VER(rq->engine->i915) >= 6)
else if (GRAPHICS_VER(rq->i915) >= 6)
*batch++ = MI_BATCH_BUFFER_START;
else
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
@ -201,7 +201,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
}
flags = 0;
if (GRAPHICS_VER(rq->engine->i915) <= 5)
if (GRAPHICS_VER(rq->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);

View File

@ -114,7 +114,7 @@ static struct dev_pm_domain pm_domain = {
static void mock_gt_probe(struct drm_i915_private *i915)
{
i915->gt[0] = &i915->gt0;
i915->gt[0] = to_gt(i915);
i915->gt[0]->name = "Mock GT";
}