If the move or clear operation somehow fails, and the memory underneath is not cleared, like when moving to lmem, then we currently fallback to memcpy or memset. However with small-BAR systems this fallback might no longer be possible. For now we use the set_wedged sledgehammer if we ever encounter such a scenario, and mark the object as borked to plug any holes where access to the memory underneath can happen. Add some basic selftests to exercise this. v2: - In the selftests make sure we grab the runtime pm around the reset. Also make sure we grab the reset lock before checking if the device is wedged, since the wedge might still be in-progress and hence the bit might not be set yet. - Don't wedge or put the object into an unknown state, if the request construction fails (or similar). Just returning an error and skipping the fallback should be safe here. - Make sure we wedge each gt. (Thomas) - Peek at the unknown_state in io_reserve, that way we don't have to export or hand roll the fault_wait_for_idle. (Thomas) - Add the missing read-side barriers for the unknown_state. (Thomas) - Some kernel-doc fixes. (Thomas) v3: - Tweak the ordering of the set_wedged, also add FIXME. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Cc: Jon Bloomfield <jon.bloomfield@intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Jordan Justen <jordan.l.justen@intel.com> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220629174350.384910-11-matthew.auld@intel.com
43 lines
1.1 KiB
C
43 lines
1.1 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2021 Intel Corporation
|
|
*/
|
|
#ifndef _I915_GEM_TTM_MOVE_H_
|
|
#define _I915_GEM_TTM_MOVE_H_
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include "i915_selftest.h"
|
|
|
|
struct ttm_buffer_object;
|
|
struct ttm_operation_ctx;
|
|
struct ttm_place;
|
|
struct ttm_resource;
|
|
struct ttm_tt;
|
|
|
|
struct drm_i915_gem_object;
|
|
struct i915_refct_sgt;
|
|
|
|
int i915_ttm_move_notify(struct ttm_buffer_object *bo);
|
|
|
|
I915_SELFTEST_DECLARE(void i915_ttm_migrate_set_failure_modes(bool gpu_migration,
|
|
bool work_allocation));
|
|
I915_SELFTEST_DECLARE(void i915_ttm_migrate_set_ban_memcpy(bool ban));
|
|
|
|
int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
|
|
struct drm_i915_gem_object *src,
|
|
bool allow_accel, bool intr);
|
|
|
|
/* Internal I915 TTM declarations and definitions below. */
|
|
|
|
int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
|
|
struct ttm_operation_ctx *ctx,
|
|
struct ttm_resource *dst_mem,
|
|
struct ttm_place *hop);
|
|
|
|
void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj);
|
|
|
|
void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj);
|
|
|
|
#endif
|