drm/xe: Use proper vram offset
In xe_migrate functions, use proper vram io offset of the tiles while calculating addresses. Reviewed-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
370997d168
commit
c33a721943
@ -1206,12 +1206,12 @@ struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_gt *gt,
|
||||
* XXX: This is in the VM bind data path, likely should calculate this once and
|
||||
* store, with a recalculation if the BO is moved.
|
||||
*/
|
||||
static uint64_t vram_region_io_offset(struct xe_bo *bo)
|
||||
uint64_t vram_region_io_offset(struct ttm_resource *res)
|
||||
{
|
||||
struct xe_device *xe = xe_bo_device(bo);
|
||||
struct xe_gt *gt = mem_type_to_gt(xe, bo->ttm.resource->mem_type);
|
||||
struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
|
||||
struct xe_gt *gt = mem_type_to_gt(xe, res->mem_type);
|
||||
|
||||
if (bo->ttm.resource->mem_type == XE_PL_STOLEN)
|
||||
if (res->mem_type == XE_PL_STOLEN)
|
||||
return xe_ttm_stolen_gpu_offset(xe);
|
||||
|
||||
return gt->mem.vram.io_start - xe->mem.vram.io_start;
|
||||
@ -1298,7 +1298,7 @@ int xe_bo_pin(struct xe_bo *bo)
|
||||
XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
|
||||
|
||||
place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &vram) -
|
||||
vram_region_io_offset(bo)) >> PAGE_SHIFT;
|
||||
vram_region_io_offset(bo->ttm.resource)) >> PAGE_SHIFT;
|
||||
place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
|
||||
|
||||
spin_lock(&xe->pinned.lock);
|
||||
@ -1442,7 +1442,7 @@ dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
|
||||
|
||||
xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
|
||||
page_size, &cur);
|
||||
return cur.start + offset + vram_region_io_offset(bo);
|
||||
return cur.start + offset + vram_region_io_offset(bo->ttm.resource);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -224,6 +224,7 @@ void xe_bo_vunmap(struct xe_bo *bo);
|
||||
bool mem_type_is_vram(u32 mem_type);
|
||||
bool xe_bo_is_vram(struct xe_bo *bo);
|
||||
bool xe_bo_is_stolen(struct xe_bo *bo);
|
||||
uint64_t vram_region_io_offset(struct ttm_resource *res);
|
||||
|
||||
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
|
||||
|
||||
|
@ -392,6 +392,7 @@ static u64 xe_migrate_res_sizes(struct xe_res_cursor *cur)
|
||||
|
||||
static u32 pte_update_size(struct xe_migrate *m,
|
||||
bool is_vram,
|
||||
struct ttm_resource *res,
|
||||
struct xe_res_cursor *cur,
|
||||
u64 *L0, u64 *L0_ofs, u32 *L0_pt,
|
||||
u32 cmd_size, u32 pt_ofs, u32 avail_pts)
|
||||
@ -417,7 +418,8 @@ static u32 pte_update_size(struct xe_migrate *m,
|
||||
cmds += cmd_size;
|
||||
} else {
|
||||
/* Offset into identity map. */
|
||||
*L0_ofs = xe_migrate_vram_ofs(cur->start);
|
||||
*L0_ofs = xe_migrate_vram_ofs(cur->start +
|
||||
vram_region_io_offset(res));
|
||||
cmds += cmd_size;
|
||||
}
|
||||
|
||||
@ -467,6 +469,7 @@ static void emit_pte(struct xe_migrate *m,
|
||||
addr |= GEN12_PTE_PS64;
|
||||
}
|
||||
|
||||
addr += vram_region_io_offset(bo->ttm.resource);
|
||||
addr |= GEN12_PPGTT_PTE_LM;
|
||||
}
|
||||
addr |= PPAT_CACHED | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
|
||||
@ -646,17 +649,17 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
|
||||
|
||||
src_L0 = min(src_L0, dst_L0);
|
||||
|
||||
batch_size += pte_update_size(m, src_is_vram, &src_it, &src_L0,
|
||||
batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
|
||||
&src_L0_ofs, &src_L0_pt, 0, 0,
|
||||
NUM_PT_PER_BLIT);
|
||||
|
||||
batch_size += pte_update_size(m, dst_is_vram, &dst_it, &src_L0,
|
||||
batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
|
||||
&dst_L0_ofs, &dst_L0_pt, 0,
|
||||
NUM_PT_PER_BLIT, NUM_PT_PER_BLIT);
|
||||
|
||||
if (copy_system_ccs) {
|
||||
ccs_size = xe_device_ccs_bytes(xe, src_L0);
|
||||
batch_size += pte_update_size(m, false, &ccs_it, &ccs_size,
|
||||
batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
|
||||
&ccs_ofs, &ccs_pt, 0,
|
||||
2 * NUM_PT_PER_BLIT,
|
||||
NUM_PT_PER_BLIT);
|
||||
@ -879,7 +882,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
|
||||
|
||||
/* Calculate final sizes and batch size.. */
|
||||
batch_size = 2 +
|
||||
pte_update_size(m, clear_vram, &src_it,
|
||||
pte_update_size(m, clear_vram, src, &src_it,
|
||||
&clear_L0, &clear_L0_ofs, &clear_L0_pt,
|
||||
emit_clear_cmd_len(xe), 0,
|
||||
NUM_PT_PER_BLIT);
|
||||
|
Loading…
x
Reference in New Issue
Block a user