drm/i915/guc: Prepare for error propagation
Currently guc_mmio_reg_add() relies on having enough memory available in the array to add a new slot. It uses `GEM_BUG_ON(count >= regset->size);` to protect going above the threshold. In order to allow guc_mmio_reg_add() to handle the memory allocation by itself, it must return an error in case of failures. Adjust return code so this error can be propagated to the callers of guc_mmio_reg_add() and guc_mmio_regset_init(). No intended change in behavior. Cc: Matt Roper <matthew.d.roper@intel.com> Cc: John Harrison <John.C.Harrison@Intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com> Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220208070141.2095177-2-lucas.demarchi@intel.com
This commit is contained in:
parent
876f7a438e
commit
f4044ca196
@ -224,8 +224,8 @@ static int guc_mmio_reg_cmp(const void *a, const void *b)
|
||||
return (int)ra->offset - (int)rb->offset;
|
||||
}
|
||||
|
||||
static void guc_mmio_reg_add(struct temp_regset *regset,
|
||||
u32 offset, u32 flags)
|
||||
static long __must_check guc_mmio_reg_add(struct temp_regset *regset,
|
||||
u32 offset, u32 flags)
|
||||
{
|
||||
u32 count = regset->used;
|
||||
struct guc_mmio_reg reg = {
|
||||
@ -244,7 +244,7 @@ static void guc_mmio_reg_add(struct temp_regset *regset,
|
||||
*/
|
||||
if (bsearch(®, regset->registers, count,
|
||||
sizeof(reg), guc_mmio_reg_cmp))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
slot = ®set->registers[count];
|
||||
regset->used++;
|
||||
@ -257,6 +257,8 @@ static void guc_mmio_reg_add(struct temp_regset *regset,
|
||||
|
||||
swap(slot[1], slot[0]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define GUC_MMIO_REG_ADD(regset, reg, masked) \
|
||||
@ -264,32 +266,35 @@ static void guc_mmio_reg_add(struct temp_regset *regset,
|
||||
i915_mmio_reg_offset((reg)), \
|
||||
(masked) ? GUC_REGSET_MASKED : 0)
|
||||
|
||||
static void guc_mmio_regset_init(struct temp_regset *regset,
|
||||
struct intel_engine_cs *engine)
|
||||
static int guc_mmio_regset_init(struct temp_regset *regset,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
const u32 base = engine->mmio_base;
|
||||
struct i915_wa_list *wal = &engine->wa_list;
|
||||
struct i915_wa *wa;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
regset->used = 0;
|
||||
|
||||
GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true);
|
||||
GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false);
|
||||
GUC_MMIO_REG_ADD(regset, RING_IMR(base), false);
|
||||
ret |= GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true);
|
||||
ret |= GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false);
|
||||
ret |= GUC_MMIO_REG_ADD(regset, RING_IMR(base), false);
|
||||
|
||||
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
|
||||
GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg);
|
||||
ret |= GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg);
|
||||
|
||||
/* Be extra paranoid and include all whitelist registers. */
|
||||
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
|
||||
GUC_MMIO_REG_ADD(regset,
|
||||
RING_FORCE_TO_NONPRIV(base, i),
|
||||
false);
|
||||
ret |= GUC_MMIO_REG_ADD(regset,
|
||||
RING_FORCE_TO_NONPRIV(base, i),
|
||||
false);
|
||||
|
||||
/* add in local MOCS registers */
|
||||
for (i = 0; i < GEN9_LNCFCMOCS_REG_COUNT; i++)
|
||||
GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false);
|
||||
ret |= GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false);
|
||||
|
||||
return ret ? -1 : 0;
|
||||
}
|
||||
|
||||
static int guc_mmio_reg_state_query(struct intel_guc *guc)
|
||||
|
Loading…
x
Reference in New Issue
Block a user