linux/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
Chris Wilson 89351925a4 drm/i915/gt: Switch to object allocations for page directories
The GEM object is grossly overweight for the practicality of tracking
large numbers of individual pages, yet it is currently our only
abstraction for tracking DMA allocations. Since those allocations need
to be reserved upfront before an operation, and that we need to break
away from simple system memory, we need to ditch using plain struct page
wrappers.

In the process, we drop the WC mapping as we ended up clflushing
everything anyway due to various issues across a wider range of
platforms. Though in a future step, we need to drop the kmap_atomic
approach which suggests we need to pre-map all the pages and keep them
mapped.

v2: Verify our large scratch page is suitably DMA aligned; and manually
clear the scratch since we are allocating plain struct pages full of
prior content.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200729164219.5737-2-chris@chris-wilson.co.uk
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2020-09-07 14:24:08 +03:00

78 lines
2.1 KiB
C

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2020 Intel Corporation
*/
#ifndef __GEN6_PPGTT_H__
#define __GEN6_PPGTT_H__
#include "intel_gtt.h"
struct gen6_ppgtt {
struct i915_ppgtt base;
struct mutex flush;
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
u32 pp_dir;
atomic_t pin_count;
struct mutex pin_mutex;
bool scan_for_unused_pt;
};
static inline u32 gen6_pte_index(u32 addr)
{
return i915_pte_index(addr, GEN6_PDE_SHIFT);
}
static inline u32 gen6_pte_count(u32 addr, u32 length)
{
return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
}
static inline u32 gen6_pde_index(u32 addr)
{
return i915_pde_index(addr, GEN6_PDE_SHIFT);
}
#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
{
BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
return __to_gen6_ppgtt(base);
}
/*
* gen6_for_each_pde() iterates over every pde from start until start+length.
* If start and start+length are not perfectly divisible, the macro will round
* down and up as needed. Start=0 and length=2G effectively iterates over
* every PDE in the system. The macro modifies ALL its parameters except 'pd',
* so each of the other parameters should preferably be a simple variable, or
* at most an lvalue with no side-effects!
*/
#define gen6_for_each_pde(pt, pd, start, length, iter) \
for (iter = gen6_pde_index(start); \
length > 0 && iter < I915_PDES && \
(pt = i915_pt_entry(pd, iter), true); \
({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
#define gen6_for_all_pdes(pt, pd, iter) \
for (iter = 0; \
iter < I915_PDES && \
(pt = i915_pt_entry(pd, iter), true); \
++iter)
int gen6_ppgtt_pin(struct i915_ppgtt *base);
void gen6_ppgtt_unpin(struct i915_ppgtt *base);
void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
void gen6_ppgtt_enable(struct intel_gt *gt);
void gen7_ppgtt_enable(struct intel_gt *gt);
struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt);
#endif