Currently the KMD is using enum i915_cache_level to set caching policy for buffer objects. This is flaky because the PAT index which really controls the caching behavior in PTE has far more levels than what's defined in the enum. In addition, the PAT index is platform dependent, having to translate between i915_cache_level and PAT index is not reliable, and makes the code more complicated. From UMD's perspective there is also a necessity to set caching policy for performance fine tuning. It's much easier for the UMD to directly use PAT index because the behavior of each PAT index is clearly defined in Bspec. Having the abstracted i915_cache_level sitting in between would only cause more ambiguity. PAT is expected to work much like MOCS already works today, and by design userspace is expected to select the index that exactly matches the desired behavior described in the hardware specification. For these reasons this patch replaces i915_cache_level with PAT index. Also note, the cache_level is not completely removed yet, because the KMD still has the need of creating buffer objects with simple cache settings such as cached, uncached, or writethrough. For kernel objects, cache_level is used for simplicity and backward compatibility. For Pre-gen12 platforms PAT can have 1:1 mapping to i915_cache_level, so these two are interchangeable. see the use of LEGACY_CACHELEVEL. One consequence of this change is that gen8_pte_encode is no longer working for gen12 platforms due to the fact that gen12 platforms has different PAT definitions. In the meantime the mtl_pte_encode introduced specfically for MTL becomes generic for all gen12 platforms. This patch renames the MTL PTE encode function into gen12_pte_encode and apply it to all gen12. Even though this change looks unrelated, but separating them would temporarily break gen12 PTE encoding, thus squash them in one patch. Special note: this patch changes the way caching behavior is controlled in the sense that some objects are left to be managed by userspace. For such objects we need to be careful not to change the userspace settings.There are kerneldoc and comments added around obj->cache_coherent, cache_dirty, and how to bypass the checkings by i915_gem_object_has_cache_level. For full understanding, these changes need to be looked at together with the two follow-up patches, one disables the {set|get}_caching ioctl's and the other adds set_pat extension to the GEM_CREATE uAPI. Bspec: 63019 Cc: Chris Wilson <chris.p.wilson@linux.intel.com> Signed-off-by: Fei Yang <fei.yang@intel.com> Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com> Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
324 lines
6.7 KiB
C
324 lines
6.7 KiB
C
// SPDX-License-Identifier: MIT
|
|
/*
|
|
* Copyright © 2020 Intel Corporation
|
|
*/
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include "gem/i915_gem_lmem.h"
|
|
|
|
#include "i915_trace.h"
|
|
#include "intel_gtt.h"
|
|
#include "gen6_ppgtt.h"
|
|
#include "gen8_ppgtt.h"
|
|
|
|
struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz)
|
|
{
|
|
struct i915_page_table *pt;
|
|
|
|
pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
|
|
if (unlikely(!pt))
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
pt->base = vm->alloc_pt_dma(vm, sz);
|
|
if (IS_ERR(pt->base)) {
|
|
kfree(pt);
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
pt->is_compact = false;
|
|
atomic_set(&pt->used, 0);
|
|
return pt;
|
|
}
|
|
|
|
struct i915_page_directory *__alloc_pd(int count)
|
|
{
|
|
struct i915_page_directory *pd;
|
|
|
|
pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
|
|
if (unlikely(!pd))
|
|
return NULL;
|
|
|
|
pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
|
|
if (unlikely(!pd->entry)) {
|
|
kfree(pd);
|
|
return NULL;
|
|
}
|
|
|
|
spin_lock_init(&pd->lock);
|
|
return pd;
|
|
}
|
|
|
|
struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
|
|
{
|
|
struct i915_page_directory *pd;
|
|
|
|
pd = __alloc_pd(I915_PDES);
|
|
if (unlikely(!pd))
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
|
|
if (IS_ERR(pd->pt.base)) {
|
|
kfree(pd->entry);
|
|
kfree(pd);
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
return pd;
|
|
}
|
|
|
|
void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
|
|
{
|
|
BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
|
|
|
|
if (lvl) {
|
|
struct i915_page_directory *pd =
|
|
container_of(pt, typeof(*pd), pt);
|
|
kfree(pd->entry);
|
|
}
|
|
|
|
if (pt->base)
|
|
i915_gem_object_put(pt->base);
|
|
|
|
kfree(pt);
|
|
}
|
|
|
|
static void
|
|
write_dma_entry(struct drm_i915_gem_object * const pdma,
|
|
const unsigned short idx,
|
|
const u64 encoded_entry)
|
|
{
|
|
u64 * const vaddr = __px_vaddr(pdma);
|
|
|
|
vaddr[idx] = encoded_entry;
|
|
drm_clflush_virt_range(&vaddr[idx], sizeof(u64));
|
|
}
|
|
|
|
void
|
|
__set_pd_entry(struct i915_page_directory * const pd,
|
|
const unsigned short idx,
|
|
struct i915_page_table * const to,
|
|
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
|
|
{
|
|
/* Each thread pre-pins the pd, and we may have a thread per pde. */
|
|
GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
|
|
|
|
atomic_inc(px_used(pd));
|
|
pd->entry[idx] = to;
|
|
write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
|
|
}
|
|
|
|
void
|
|
clear_pd_entry(struct i915_page_directory * const pd,
|
|
const unsigned short idx,
|
|
const struct drm_i915_gem_object * const scratch)
|
|
{
|
|
GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
|
|
|
|
write_dma_entry(px_base(pd), idx, scratch->encode);
|
|
pd->entry[idx] = NULL;
|
|
atomic_dec(px_used(pd));
|
|
}
|
|
|
|
bool
|
|
release_pd_entry(struct i915_page_directory * const pd,
|
|
const unsigned short idx,
|
|
struct i915_page_table * const pt,
|
|
const struct drm_i915_gem_object * const scratch)
|
|
{
|
|
bool free = false;
|
|
|
|
if (atomic_add_unless(&pt->used, -1, 1))
|
|
return false;
|
|
|
|
spin_lock(&pd->lock);
|
|
if (atomic_dec_and_test(&pt->used)) {
|
|
clear_pd_entry(pd, idx, scratch);
|
|
free = true;
|
|
}
|
|
spin_unlock(&pd->lock);
|
|
|
|
return free;
|
|
}
|
|
|
|
int i915_ppgtt_init_hw(struct intel_gt *gt)
|
|
{
|
|
struct drm_i915_private *i915 = gt->i915;
|
|
|
|
gtt_write_workarounds(gt);
|
|
|
|
if (GRAPHICS_VER(i915) == 6)
|
|
gen6_ppgtt_enable(gt);
|
|
else if (GRAPHICS_VER(i915) == 7)
|
|
gen7_ppgtt_enable(gt);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct i915_ppgtt *
|
|
__ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags)
|
|
{
|
|
if (GRAPHICS_VER(gt->i915) < 8)
|
|
return gen6_ppgtt_create(gt);
|
|
else
|
|
return gen8_ppgtt_create(gt, lmem_pt_obj_flags);
|
|
}
|
|
|
|
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
|
|
unsigned long lmem_pt_obj_flags)
|
|
{
|
|
struct i915_ppgtt *ppgtt;
|
|
|
|
ppgtt = __ppgtt_create(gt, lmem_pt_obj_flags);
|
|
if (IS_ERR(ppgtt))
|
|
return ppgtt;
|
|
|
|
trace_i915_ppgtt_create(&ppgtt->vm);
|
|
|
|
return ppgtt;
|
|
}
|
|
|
|
void ppgtt_bind_vma(struct i915_address_space *vm,
|
|
struct i915_vm_pt_stash *stash,
|
|
struct i915_vma_resource *vma_res,
|
|
unsigned int pat_index,
|
|
u32 flags)
|
|
{
|
|
u32 pte_flags;
|
|
|
|
if (!vma_res->allocated) {
|
|
vm->allocate_va_range(vm, stash, vma_res->start,
|
|
vma_res->vma_size);
|
|
vma_res->allocated = true;
|
|
}
|
|
|
|
/* Applicable to VLV, and gen8+ */
|
|
pte_flags = 0;
|
|
if (vma_res->bi.readonly)
|
|
pte_flags |= PTE_READ_ONLY;
|
|
if (vma_res->bi.lmem)
|
|
pte_flags |= PTE_LM;
|
|
|
|
vm->insert_entries(vm, vma_res, pat_index, pte_flags);
|
|
wmb();
|
|
}
|
|
|
|
void ppgtt_unbind_vma(struct i915_address_space *vm,
|
|
struct i915_vma_resource *vma_res)
|
|
{
|
|
if (!vma_res->allocated)
|
|
return;
|
|
|
|
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
|
|
if (vma_res->tlb)
|
|
vma_invalidate_tlb(vm, vma_res->tlb);
|
|
}
|
|
|
|
static unsigned long pd_count(u64 size, int shift)
|
|
{
|
|
/* Beware later misalignment */
|
|
return (size + 2 * (BIT_ULL(shift) - 1)) >> shift;
|
|
}
|
|
|
|
int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
|
|
struct i915_vm_pt_stash *stash,
|
|
u64 size)
|
|
{
|
|
unsigned long count;
|
|
int shift, n, pt_sz;
|
|
|
|
shift = vm->pd_shift;
|
|
if (!shift)
|
|
return 0;
|
|
|
|
pt_sz = stash->pt_sz;
|
|
if (!pt_sz)
|
|
pt_sz = I915_GTT_PAGE_SIZE_4K;
|
|
else
|
|
GEM_BUG_ON(!IS_DGFX(vm->i915));
|
|
|
|
GEM_BUG_ON(!is_power_of_2(pt_sz));
|
|
|
|
count = pd_count(size, shift);
|
|
while (count--) {
|
|
struct i915_page_table *pt;
|
|
|
|
pt = alloc_pt(vm, pt_sz);
|
|
if (IS_ERR(pt)) {
|
|
i915_vm_free_pt_stash(vm, stash);
|
|
return PTR_ERR(pt);
|
|
}
|
|
|
|
pt->stash = stash->pt[0];
|
|
stash->pt[0] = pt;
|
|
}
|
|
|
|
for (n = 1; n < vm->top; n++) {
|
|
shift += ilog2(I915_PDES); /* Each PD holds 512 entries */
|
|
count = pd_count(size, shift);
|
|
while (count--) {
|
|
struct i915_page_directory *pd;
|
|
|
|
pd = alloc_pd(vm);
|
|
if (IS_ERR(pd)) {
|
|
i915_vm_free_pt_stash(vm, stash);
|
|
return PTR_ERR(pd);
|
|
}
|
|
|
|
pd->pt.stash = stash->pt[1];
|
|
stash->pt[1] = &pd->pt;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int i915_vm_map_pt_stash(struct i915_address_space *vm,
|
|
struct i915_vm_pt_stash *stash)
|
|
{
|
|
struct i915_page_table *pt;
|
|
int n, err;
|
|
|
|
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
|
|
for (pt = stash->pt[n]; pt; pt = pt->stash) {
|
|
err = map_pt_dma_locked(vm, pt->base);
|
|
if (err)
|
|
return err;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void i915_vm_free_pt_stash(struct i915_address_space *vm,
|
|
struct i915_vm_pt_stash *stash)
|
|
{
|
|
struct i915_page_table *pt;
|
|
int n;
|
|
|
|
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
|
|
while ((pt = stash->pt[n])) {
|
|
stash->pt[n] = pt->stash;
|
|
free_px(vm, pt, n);
|
|
}
|
|
}
|
|
}
|
|
|
|
void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
|
|
unsigned long lmem_pt_obj_flags)
|
|
{
|
|
struct drm_i915_private *i915 = gt->i915;
|
|
|
|
ppgtt->vm.gt = gt;
|
|
ppgtt->vm.i915 = i915;
|
|
ppgtt->vm.dma = i915->drm.dev;
|
|
ppgtt->vm.total = BIT_ULL(RUNTIME_INFO(i915)->ppgtt_size);
|
|
ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
|
|
|
|
dma_resv_init(&ppgtt->vm._resv);
|
|
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
|
|
|
|
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
|
|
ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
|
|
}
|