Merge tag 'drm-intel-gt-next-2023-12-15' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Driver Changes: - Eliminate use of kmap_atomic() in i915 (Zhao) - Add Wa_14019877138 for DG2 (Haridhar) - Static checker and spelling fixes (Colin, Karthik, Randy) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/ZXxCibZZQqlqhDN3@jlahtine-mobl.ger.corp.intel.com
This commit is contained in:
commit
b76c01f1d9
@ -1159,7 +1159,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache)
|
||||
|
||||
vaddr = unmask_page(cache->vaddr);
|
||||
if (cache->vaddr & KMAP)
|
||||
kunmap_atomic(vaddr);
|
||||
kunmap_local(vaddr);
|
||||
else
|
||||
io_mapping_unmap_atomic((void __iomem *)vaddr);
|
||||
}
|
||||
@ -1175,7 +1175,7 @@ static void reloc_cache_remap(struct reloc_cache *cache,
|
||||
if (cache->vaddr & KMAP) {
|
||||
struct page *page = i915_gem_object_get_page(obj, cache->page);
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
vaddr = kmap_local_page(page);
|
||||
cache->vaddr = unmask_flags(cache->vaddr) |
|
||||
(unsigned long)vaddr;
|
||||
} else {
|
||||
@ -1205,7 +1205,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer
|
||||
if (cache->vaddr & CLFLUSH_AFTER)
|
||||
mb();
|
||||
|
||||
kunmap_atomic(vaddr);
|
||||
kunmap_local(vaddr);
|
||||
i915_gem_object_finish_access(obj);
|
||||
} else {
|
||||
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
||||
@ -1237,7 +1237,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
|
||||
struct page *page;
|
||||
|
||||
if (cache->vaddr) {
|
||||
kunmap_atomic(unmask_page(cache->vaddr));
|
||||
kunmap_local(unmask_page(cache->vaddr));
|
||||
} else {
|
||||
unsigned int flushes;
|
||||
int err;
|
||||
@ -1259,7 +1259,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
|
||||
if (!obj->mm.dirty)
|
||||
set_page_dirty(page);
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
vaddr = kmap_local_page(page);
|
||||
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
|
||||
cache->page = pageno;
|
||||
|
||||
|
@ -500,17 +500,15 @@ static void
|
||||
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
|
||||
{
|
||||
pgoff_t idx = offset >> PAGE_SHIFT;
|
||||
void *src_map;
|
||||
void *src_ptr;
|
||||
|
||||
src_map = kmap_atomic(i915_gem_object_get_page(obj, idx));
|
||||
|
||||
src_ptr = src_map + offset_in_page(offset);
|
||||
src_ptr = kmap_local_page(i915_gem_object_get_page(obj, idx))
|
||||
+ offset_in_page(offset);
|
||||
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
|
||||
drm_clflush_virt_range(src_ptr, size);
|
||||
memcpy(dst, src_ptr, size);
|
||||
|
||||
kunmap_atomic(src_map);
|
||||
kunmap_local(src_ptr);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -65,16 +65,13 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
|
||||
dst = vaddr;
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
struct page *page;
|
||||
void *src;
|
||||
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page))
|
||||
goto err_st;
|
||||
|
||||
src = kmap_atomic(page);
|
||||
memcpy(dst, src, PAGE_SIZE);
|
||||
memcpy_from_page(dst, page, 0, PAGE_SIZE);
|
||||
drm_clflush_virt_range(dst, PAGE_SIZE);
|
||||
kunmap_atomic(src);
|
||||
|
||||
put_page(page);
|
||||
dst += PAGE_SIZE;
|
||||
@ -113,16 +110,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
|
||||
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
struct page *page;
|
||||
char *dst;
|
||||
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page))
|
||||
continue;
|
||||
|
||||
dst = kmap_atomic(page);
|
||||
drm_clflush_virt_range(src, PAGE_SIZE);
|
||||
memcpy(dst, src, PAGE_SIZE);
|
||||
kunmap_atomic(dst);
|
||||
memcpy_to_page(page, 0, src, PAGE_SIZE);
|
||||
|
||||
set_page_dirty(page);
|
||||
if (obj->mm.madv == I915_MADV_WILLNEED)
|
||||
|
@ -485,11 +485,13 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
vaddr = kmap_local_page(page);
|
||||
pagefault_disable();
|
||||
unwritten = __copy_from_user_inatomic(vaddr + pg,
|
||||
user_data,
|
||||
len);
|
||||
kunmap_atomic(vaddr);
|
||||
pagefault_enable();
|
||||
kunmap_local(vaddr);
|
||||
|
||||
err = aops->write_end(obj->base.filp, mapping, offset, len,
|
||||
len - unwritten, page, data);
|
||||
|
@ -1082,7 +1082,7 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
||||
goto err_unlock;
|
||||
|
||||
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
|
||||
u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
|
||||
u32 *ptr = kmap_local_page(i915_gem_object_get_page(obj, n));
|
||||
|
||||
if (needs_flush & CLFLUSH_BEFORE)
|
||||
drm_clflush_virt_range(ptr, PAGE_SIZE);
|
||||
@ -1090,12 +1090,12 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
||||
if (ptr[dword] != val) {
|
||||
pr_err("n=%lu ptr[%u]=%u, val=%u\n",
|
||||
n, dword, ptr[dword], val);
|
||||
kunmap_atomic(ptr);
|
||||
kunmap_local(ptr);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
kunmap_atomic(ptr);
|
||||
kunmap_local(ptr);
|
||||
}
|
||||
|
||||
i915_gem_object_finish_access(obj);
|
||||
|
@ -24,7 +24,6 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
|
||||
{
|
||||
unsigned int needs_clflush;
|
||||
struct page *page;
|
||||
void *map;
|
||||
u32 *cpu;
|
||||
int err;
|
||||
|
||||
@ -34,8 +33,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
|
||||
goto out;
|
||||
|
||||
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
|
||||
map = kmap_atomic(page);
|
||||
cpu = map + offset_in_page(offset);
|
||||
cpu = kmap_local_page(page) + offset_in_page(offset);
|
||||
|
||||
if (needs_clflush & CLFLUSH_BEFORE)
|
||||
drm_clflush_virt_range(cpu, sizeof(*cpu));
|
||||
@ -45,7 +43,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
|
||||
if (needs_clflush & CLFLUSH_AFTER)
|
||||
drm_clflush_virt_range(cpu, sizeof(*cpu));
|
||||
|
||||
kunmap_atomic(map);
|
||||
kunmap_local(cpu);
|
||||
i915_gem_object_finish_access(ctx->obj);
|
||||
|
||||
out:
|
||||
@ -57,7 +55,6 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
|
||||
{
|
||||
unsigned int needs_clflush;
|
||||
struct page *page;
|
||||
void *map;
|
||||
u32 *cpu;
|
||||
int err;
|
||||
|
||||
@ -67,15 +64,14 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
|
||||
goto out;
|
||||
|
||||
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
|
||||
map = kmap_atomic(page);
|
||||
cpu = map + offset_in_page(offset);
|
||||
cpu = kmap_local_page(page) + offset_in_page(offset);
|
||||
|
||||
if (needs_clflush & CLFLUSH_BEFORE)
|
||||
drm_clflush_virt_range(cpu, sizeof(*cpu));
|
||||
|
||||
*v = *cpu;
|
||||
|
||||
kunmap_atomic(map);
|
||||
kunmap_local(cpu);
|
||||
i915_gem_object_finish_access(ctx->obj);
|
||||
|
||||
out:
|
||||
|
@ -489,12 +489,12 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
|
||||
for (n = 0; n < real_page_count(obj); n++) {
|
||||
u32 *map;
|
||||
|
||||
map = kmap_atomic(i915_gem_object_get_page(obj, n));
|
||||
map = kmap_local_page(i915_gem_object_get_page(obj, n));
|
||||
for (m = 0; m < DW_PER_PAGE; m++)
|
||||
map[m] = value;
|
||||
if (!has_llc)
|
||||
drm_clflush_virt_range(map, PAGE_SIZE);
|
||||
kunmap_atomic(map);
|
||||
kunmap_local(map);
|
||||
}
|
||||
|
||||
i915_gem_object_finish_access(obj);
|
||||
@ -520,7 +520,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
|
||||
for (n = 0; n < real_page_count(obj); n++) {
|
||||
u32 *map, m;
|
||||
|
||||
map = kmap_atomic(i915_gem_object_get_page(obj, n));
|
||||
map = kmap_local_page(i915_gem_object_get_page(obj, n));
|
||||
if (needs_flush & CLFLUSH_BEFORE)
|
||||
drm_clflush_virt_range(map, PAGE_SIZE);
|
||||
|
||||
@ -546,7 +546,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
|
||||
out_unmap:
|
||||
kunmap_atomic(map);
|
||||
kunmap_local(map);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
@ -504,7 +504,7 @@ static int igt_dmabuf_export_vmap(void *arg)
|
||||
}
|
||||
|
||||
if (memchr_inv(ptr, 0, dmabuf->size)) {
|
||||
pr_err("Exported object not initialiased to zero!\n");
|
||||
pr_err("Exported object not initialised to zero!\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -469,6 +469,9 @@
|
||||
#define XEHP_PSS_MODE2 MCR_REG(0x703c)
|
||||
#define SCOREBOARD_STALL_FLUSH_CONTROL REG_BIT(5)
|
||||
|
||||
#define XEHP_PSS_CHICKEN MCR_REG(0x7044)
|
||||
#define FD_END_COLLECT REG_BIT(5)
|
||||
|
||||
#define GEN7_SC_INSTDONE _MMIO(0x7100)
|
||||
#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
|
||||
#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
|
||||
|
@ -777,6 +777,9 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
|
||||
|
||||
/* Wa_18019271663:dg2 */
|
||||
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
|
||||
|
||||
/* Wa_14019877138:dg2 */
|
||||
wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
|
||||
}
|
||||
|
||||
static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
|
||||
|
@ -1343,16 +1343,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
|
||||
|
||||
for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
|
||||
u32 len = min_t(u32, size, PAGE_SIZE - offset);
|
||||
void *vaddr;
|
||||
|
||||
if (idx > 0) {
|
||||
idx--;
|
||||
continue;
|
||||
}
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
memcpy(dst, vaddr + offset, len);
|
||||
kunmap_atomic(vaddr);
|
||||
memcpy_from_page(dst, page, offset, len);
|
||||
|
||||
offset = 0;
|
||||
dst += len;
|
||||
|
@ -1211,11 +1211,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
|
||||
for (n = offset >> PAGE_SHIFT; remain; n++) {
|
||||
int len = min(remain, PAGE_SIZE - x);
|
||||
|
||||
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
|
||||
src = kmap_local_page(i915_gem_object_get_page(src_obj, n));
|
||||
if (src_needs_clflush)
|
||||
drm_clflush_virt_range(src + x, len);
|
||||
memcpy(ptr, src + x, len);
|
||||
kunmap_atomic(src);
|
||||
kunmap_local(src);
|
||||
|
||||
ptr += len;
|
||||
remain -= len;
|
||||
|
@ -175,7 +175,7 @@ hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
|
||||
* tau4 = (4 | x) << y
|
||||
* but add 2 when doing the final right shift to account for units
|
||||
*/
|
||||
tau4 = ((1 << x_w) | x) << y;
|
||||
tau4 = (u64)((1 << x_w) | x) << y;
|
||||
/* val in hwmon interface units (millisec) */
|
||||
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
|
||||
|
||||
@ -211,7 +211,7 @@ hwm_power1_max_interval_store(struct device *dev,
|
||||
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
|
||||
x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
|
||||
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
|
||||
tau4 = ((1 << x_w) | x) << y;
|
||||
tau4 = (u64)((1 << x_w) | x) << y;
|
||||
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
|
||||
|
||||
if (val > max_win)
|
||||
|
@ -693,7 +693,7 @@ typedef struct drm_i915_irq_wait {
|
||||
#define I915_PARAM_HAS_EXEC_FENCE 44
|
||||
|
||||
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
|
||||
* user specified bufffers for post-mortem debugging of GPU hangs. See
|
||||
* user-specified buffers for post-mortem debugging of GPU hangs. See
|
||||
* EXEC_OBJECT_CAPTURE.
|
||||
*/
|
||||
#define I915_PARAM_HAS_EXEC_CAPTURE 45
|
||||
@ -1606,7 +1606,7 @@ struct drm_i915_gem_busy {
|
||||
* is accurate.
|
||||
*
|
||||
* The returned dword is split into two fields to indicate both
|
||||
* the engine classess on which the object is being read, and the
|
||||
* the engine classes on which the object is being read, and the
|
||||
* engine class on which it is currently being written (if any).
|
||||
*
|
||||
* The low word (bits 0:15) indicate if the object is being written
|
||||
@ -1815,7 +1815,7 @@ struct drm_i915_gem_madvise {
|
||||
__u32 handle;
|
||||
|
||||
/* Advice: either the buffer will be needed again in the near future,
|
||||
* or wont be and could be discarded under memory pressure.
|
||||
* or won't be and could be discarded under memory pressure.
|
||||
*/
|
||||
__u32 madv;
|
||||
|
||||
@ -3246,7 +3246,7 @@ struct drm_i915_query_topology_info {
|
||||
* // enough to hold our array of engines. The kernel will fill out the
|
||||
* // item.length for us, which is the number of bytes we need.
|
||||
* //
|
||||
* // Alternatively a large buffer can be allocated straight away enabling
|
||||
* // Alternatively a large buffer can be allocated straightaway enabling
|
||||
* // querying in one pass, in which case item.length should contain the
|
||||
* // length of the provided buffer.
|
||||
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
|
||||
@ -3256,7 +3256,7 @@ struct drm_i915_query_topology_info {
|
||||
* // Now that we allocated the required number of bytes, we call the ioctl
|
||||
* // again, this time with the data_ptr pointing to our newly allocated
|
||||
* // blob, which the kernel can then populate with info on all engines.
|
||||
* item.data_ptr = (uintptr_t)&info,
|
||||
* item.data_ptr = (uintptr_t)&info;
|
||||
*
|
||||
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
|
||||
* if (err) ...
|
||||
@ -3286,7 +3286,7 @@ struct drm_i915_query_topology_info {
|
||||
/**
|
||||
* struct drm_i915_engine_info
|
||||
*
|
||||
* Describes one engine and it's capabilities as known to the driver.
|
||||
* Describes one engine and its capabilities as known to the driver.
|
||||
*/
|
||||
struct drm_i915_engine_info {
|
||||
/** @engine: Engine class and instance. */
|
||||
|
Loading…
x
Reference in New Issue
Block a user