drm/xe/migrate: Do not hand-encode pte
Instead of encoding the pte, call a new vfunc from xe_vm to handle that. The encoding may not be the same on every platform, so keeping it in one place helps to better support them. Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Link: https://lore.kernel.org/r/20230927193902.2849159-5-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
committed by
Rodrigo Vivi
parent
0e5e77bd97
commit
23c8495efe
@ -261,8 +261,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
|
||||
|
||||
level = 2;
|
||||
ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
|
||||
flags = XE_PAGE_RW | XE_PAGE_PRESENT | PPAT_CACHED |
|
||||
XE_PPGTT_PTE_DM | XE_PDPE_PS_1G;
|
||||
flags = vm->pt_ops->pte_encode_addr(0, XE_CACHE_WB, level, true, 0);
|
||||
|
||||
/*
|
||||
* Use 1GB pages, it shouldn't matter the physical amount of
|
||||
@ -483,7 +482,8 @@ static void emit_pte(struct xe_migrate *m,
|
||||
ptes -= chunk;
|
||||
|
||||
while (chunk--) {
|
||||
u64 addr;
|
||||
u64 addr, flags = 0;
|
||||
bool devmem = false;
|
||||
|
||||
addr = xe_res_dma(cur) & PAGE_MASK;
|
||||
if (is_vram) {
|
||||
@ -491,13 +491,15 @@ static void emit_pte(struct xe_migrate *m,
|
||||
if ((m->q->vm->flags & XE_VM_FLAG_64K) &&
|
||||
!(cur_ofs & (16 * 8 - 1))) {
|
||||
xe_tile_assert(m->tile, IS_ALIGNED(addr, SZ_64K));
|
||||
addr |= XE_PTE_PS64;
|
||||
flags |= XE_PTE_PS64;
|
||||
}
|
||||
|
||||
addr += vram_region_gpu_offset(bo->ttm.resource);
|
||||
addr |= XE_PPGTT_PTE_DM;
|
||||
devmem = true;
|
||||
}
|
||||
addr |= PPAT_CACHED | XE_PAGE_PRESENT | XE_PAGE_RW;
|
||||
|
||||
addr = m->q->vm->pt_ops->pte_encode_addr(addr, XE_CACHE_WB,
|
||||
0, devmem, flags);
|
||||
bb->cs[bb->len++] = lower_32_bits(addr);
|
||||
bb->cs[bb->len++] = upper_32_bits(addr);
|
||||
|
||||
|
@ -39,6 +39,8 @@ struct xe_pt_ops {
|
||||
enum xe_cache_level cache, u32 pt_level);
|
||||
u64 (*pte_encode_vma)(u64 pte, struct xe_vma *vma,
|
||||
enum xe_cache_level cache, u32 pt_level);
|
||||
u64 (*pte_encode_addr)(u64 addr, enum xe_cache_level cache,
|
||||
u32 pt_level, bool devmem, u64 flags);
|
||||
u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset,
|
||||
const enum xe_cache_level cache);
|
||||
};
|
||||
|
@ -1235,7 +1235,6 @@ static u64 pte_encode_cache(enum xe_cache_level cache)
|
||||
|
||||
static u64 pte_encode_ps(u32 pt_level)
|
||||
{
|
||||
/* XXX: Does hw support 1 GiB pages? */
|
||||
XE_WARN_ON(pt_level > 2);
|
||||
|
||||
if (pt_level == 1)
|
||||
@ -1291,9 +1290,31 @@ static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
|
||||
return pte;
|
||||
}
|
||||
|
||||
static u64 xelp_pte_encode_addr(u64 addr, enum xe_cache_level cache,
|
||||
u32 pt_level, bool devmem, u64 flags)
|
||||
{
|
||||
u64 pte;
|
||||
|
||||
/* Avoid passing random bits directly as flags */
|
||||
XE_WARN_ON(flags & ~XE_PTE_PS64);
|
||||
|
||||
pte = addr;
|
||||
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
|
||||
pte |= pte_encode_cache(cache);
|
||||
pte |= pte_encode_ps(pt_level);
|
||||
|
||||
if (devmem)
|
||||
pte |= XE_PPGTT_PTE_DM;
|
||||
|
||||
pte |= flags;
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
static const struct xe_pt_ops xelp_pt_ops = {
|
||||
.pte_encode_bo = xelp_pte_encode_bo,
|
||||
.pte_encode_vma = xelp_pte_encode_vma,
|
||||
.pte_encode_addr = xelp_pte_encode_addr,
|
||||
.pde_encode_bo = xelp_pde_encode_bo,
|
||||
};
|
||||
|
||||
|
Reference in New Issue
Block a user