drm/i915/mtl: workaround coherency issue for Media
This patch implements Wa_22016122933. In MTL, memory writes initiated by the Media tile update the whole cache line, even for partial writes. This creates a coherency problem for cacheable memory if both CPU and GPU are writing data to different locations within a single cache line. This patch circumvents the issue by making CPU/GPU shared memory uncacheable (WC on CPU side, and PAT index 2 for GPU). Additionally, it ensures that CPU writes are visible to the GPU with an intel_guc_write_barrier(). While fixing the CTB issue, we noticed some random GSC firmware loading failure because the share buffers are cacheable (WB) on CPU side but uncached on GPU side. To fix these issues we need to map such shared buffers as WC on CPU side. Since such allocations are not all done through GuC allocator, to avoid too many code changes, the i915_coherent_map_type() is now hard coded to return WC for MTL. v2: Simplify the commit message(Matt). BSpec: 45101 Signed-off-by: Fei Yang <fei.yang@intel.com> Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com> Acked-by: Nirmoy Das <nirmoy.das@intel.com> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230424182902.3663500-3-fei.yang@intel.com
This commit is contained in:
parent
341ad0e8e2
commit
a161b6dba6
@ -469,7 +469,10 @@ enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
|
||||
struct drm_i915_gem_object *obj,
|
||||
bool always_coherent)
|
||||
{
|
||||
if (i915_gem_object_is_lmem(obj))
|
||||
/*
|
||||
* Wa_22016122933: always return I915_MAP_WC for MTL
|
||||
*/
|
||||
if (i915_gem_object_is_lmem(obj) || IS_METEORLAKE(i915))
|
||||
return I915_MAP_WC;
|
||||
if (HAS_LLC(i915) || always_coherent)
|
||||
return I915_MAP_WB;
|
||||
|
@ -110,6 +110,13 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
|
||||
if (obj->base.size < gsc->fw.size)
|
||||
return -ENOSPC;
|
||||
|
||||
/*
|
||||
* Wa_22016122933: For MTL the shared memory needs to be mapped
|
||||
* as WC on CPU side and UC (PAT index 2) on GPU side
|
||||
*/
|
||||
if (IS_METEORLAKE(i915))
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
|
||||
|
||||
dst = i915_gem_object_pin_map_unlocked(obj,
|
||||
i915_coherent_map_type(i915, obj, true));
|
||||
if (IS_ERR(dst))
|
||||
@ -125,6 +132,12 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
|
||||
memset(dst, 0, obj->base.size);
|
||||
memcpy(dst, src, gsc->fw.size);
|
||||
|
||||
/*
|
||||
* Wa_22016122933: Making sure the data in dst is
|
||||
* visible to GSC right away
|
||||
*/
|
||||
intel_guc_write_barrier(>->uc.guc);
|
||||
|
||||
i915_gem_object_unpin_map(gsc->fw.obj);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
|
||||
|
@ -743,6 +743,13 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
/*
|
||||
* Wa_22016122933: For MTL the shared memory needs to be mapped
|
||||
* as WC on CPU side and UC (PAT index 2) on GPU side
|
||||
*/
|
||||
if (IS_METEORLAKE(gt->i915))
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
|
||||
|
||||
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
|
||||
if (IS_ERR(vma))
|
||||
goto err;
|
||||
|
@ -902,6 +902,12 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
|
||||
/* now update descriptor */
|
||||
WRITE_ONCE(desc->head, head);
|
||||
|
||||
/*
|
||||
* Wa_22016122933: Making sure the head update is
|
||||
* visible to GuC right away
|
||||
*/
|
||||
intel_guc_write_barrier(ct_to_guc(ct));
|
||||
|
||||
return available - len;
|
||||
|
||||
corrupted:
|
||||
|
Loading…
x
Reference in New Issue
Block a user