drm/amdgpu: fix visible VRAM handling during faults
When we removed the hacky start code check we actually didn't took into
account that *all* VRAM pages needs to be CPU accessible.
Clean up the code and unify the handling into a single helper which
checks if the whole resource is CPU accessible.
The only place where a partial check would make sense is during
eviction, but that is neglitible.
Signed-off-by: Christian König <christian.koenig@amd.com>
Fixes: aed01a6804
("drm/amdgpu: Remove TTM resource->start visible VRAM condition v2")
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
CC: stable@vger.kernel.org
This commit is contained in:
parent
6fef2d4c00
commit
a6ff969fe9
@ -819,7 +819,7 @@ retry:
|
||||
|
||||
p->bytes_moved += ctx.bytes_moved;
|
||||
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||
amdgpu_bo_in_cpu_visible_vram(bo))
|
||||
amdgpu_res_cpu_visible(adev, bo->tbo.resource))
|
||||
p->bytes_moved_vis += ctx.bytes_moved;
|
||||
|
||||
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
||||
|
@ -617,8 +617,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
return r;
|
||||
|
||||
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
|
||||
amdgpu_bo_in_cpu_visible_vram(bo))
|
||||
amdgpu_res_cpu_visible(adev, bo->tbo.resource))
|
||||
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
|
||||
ctx.bytes_moved);
|
||||
else
|
||||
@ -1272,23 +1271,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
|
||||
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
|
||||
struct amdgpu_mem_stats *stats)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct ttm_resource *res = bo->tbo.resource;
|
||||
uint64_t size = amdgpu_bo_size(bo);
|
||||
struct drm_gem_object *obj;
|
||||
unsigned int domain;
|
||||
bool shared;
|
||||
|
||||
/* Abort if the BO doesn't currently have a backing store */
|
||||
if (!bo->tbo.resource)
|
||||
if (!res)
|
||||
return;
|
||||
|
||||
obj = &bo->tbo.base;
|
||||
shared = drm_gem_object_is_shared_for_memory_stats(obj);
|
||||
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
|
||||
domain = amdgpu_mem_type_to_domain(res->mem_type);
|
||||
switch (domain) {
|
||||
case AMDGPU_GEM_DOMAIN_VRAM:
|
||||
stats->vram += size;
|
||||
if (amdgpu_bo_in_cpu_visible_vram(bo))
|
||||
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
|
||||
stats->visible_vram += size;
|
||||
if (shared)
|
||||
stats->vram_shared += size;
|
||||
@ -1389,10 +1390,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
/* Remember that this BO was accessed by the CPU */
|
||||
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
|
||||
if (bo->resource->mem_type != TTM_PL_VRAM)
|
||||
return 0;
|
||||
|
||||
if (amdgpu_bo_in_cpu_visible_vram(abo))
|
||||
if (amdgpu_res_cpu_visible(adev, bo->resource))
|
||||
return 0;
|
||||
|
||||
/* Can't move a pinned BO to visible VRAM */
|
||||
@ -1415,7 +1413,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
|
||||
/* this should never happen */
|
||||
if (bo->resource->mem_type == TTM_PL_VRAM &&
|
||||
!amdgpu_bo_in_cpu_visible_vram(abo))
|
||||
!amdgpu_res_cpu_visible(adev, bo->resource))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
ttm_bo_move_to_lru_tail_unlocked(bo);
|
||||
@ -1579,6 +1577,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
|
||||
*/
|
||||
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct dma_buf_attachment *attachment;
|
||||
struct dma_buf *dma_buf;
|
||||
const char *placement;
|
||||
@ -1587,10 +1586,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
|
||||
|
||||
if (dma_resv_trylock(bo->tbo.base.resv)) {
|
||||
unsigned int domain;
|
||||
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
|
||||
switch (domain) {
|
||||
case AMDGPU_GEM_DOMAIN_VRAM:
|
||||
if (amdgpu_bo_in_cpu_visible_vram(bo))
|
||||
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
|
||||
placement = "VRAM VISIBLE";
|
||||
else
|
||||
placement = "VRAM";
|
||||
|
@ -250,28 +250,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
|
||||
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
|
||||
*/
|
||||
static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_res_cursor cursor;
|
||||
|
||||
if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
|
||||
return false;
|
||||
|
||||
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
|
||||
while (cursor.remaining) {
|
||||
if (cursor.start < adev->gmc.visible_vram_size)
|
||||
return true;
|
||||
|
||||
amdgpu_res_next(&cursor, cursor.size);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
|
||||
*/
|
||||
|
@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
||||
|
||||
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
|
||||
amdgpu_bo_in_cpu_visible_vram(abo)) {
|
||||
amdgpu_res_cpu_visible(adev, bo->resource)) {
|
||||
|
||||
/* Try evicting to the CPU inaccessible part of VRAM
|
||||
* first, but only set GTT as busy placement, so this
|
||||
@ -403,40 +403,55 @@ error:
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
|
||||
* @adev: amdgpu device
|
||||
* @res: the resource to check
|
||||
*
|
||||
* Returns: true if the full resource is CPU visible, false otherwise.
|
||||
*/
|
||||
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
struct amdgpu_res_cursor cursor;
|
||||
|
||||
if (!res)
|
||||
return false;
|
||||
|
||||
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
|
||||
res->mem_type == AMDGPU_PL_PREEMPT)
|
||||
return true;
|
||||
|
||||
if (res->mem_type != TTM_PL_VRAM)
|
||||
return false;
|
||||
|
||||
amdgpu_res_first(res, 0, res->size, &cursor);
|
||||
while (cursor.remaining) {
|
||||
if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
|
||||
return false;
|
||||
amdgpu_res_next(&cursor, cursor.size);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
|
||||
* amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
|
||||
*
|
||||
* Called by amdgpu_bo_move()
|
||||
*/
|
||||
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
|
||||
struct ttm_resource *mem)
|
||||
static bool amdgpu_res_copyable(struct amdgpu_device *adev,
|
||||
struct ttm_resource *mem)
|
||||
{
|
||||
u64 mem_size = (u64)mem->size;
|
||||
struct amdgpu_res_cursor cursor;
|
||||
u64 end;
|
||||
|
||||
if (mem->mem_type == TTM_PL_SYSTEM ||
|
||||
mem->mem_type == TTM_PL_TT)
|
||||
return true;
|
||||
if (mem->mem_type != TTM_PL_VRAM)
|
||||
if (!amdgpu_res_cpu_visible(adev, mem))
|
||||
return false;
|
||||
|
||||
amdgpu_res_first(mem, 0, mem_size, &cursor);
|
||||
end = cursor.start + cursor.size;
|
||||
while (cursor.remaining) {
|
||||
amdgpu_res_next(&cursor, cursor.size);
|
||||
/* ttm_resource_ioremap only supports contiguous memory */
|
||||
if (mem->mem_type == TTM_PL_VRAM &&
|
||||
!(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
|
||||
return false;
|
||||
|
||||
if (!cursor.remaining)
|
||||
break;
|
||||
|
||||
/* ttm_resource_ioremap only supports contiguous memory */
|
||||
if (end != cursor.start)
|
||||
return false;
|
||||
|
||||
end = cursor.start + cursor.size;
|
||||
}
|
||||
|
||||
return end <= adev->gmc.visible_vram_size;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -529,8 +544,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
|
||||
|
||||
if (r) {
|
||||
/* Check that all memory is CPU accessible */
|
||||
if (!amdgpu_mem_visible(adev, old_mem) ||
|
||||
!amdgpu_mem_visible(adev, new_mem)) {
|
||||
if (!amdgpu_res_copyable(adev, old_mem) ||
|
||||
!amdgpu_res_copyable(adev, new_mem)) {
|
||||
pr_err("Move buffer fallback to memcpy unavailable\n");
|
||||
return r;
|
||||
}
|
||||
|
@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
|
||||
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
|
||||
uint64_t start);
|
||||
|
||||
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
|
||||
struct ttm_resource *res);
|
||||
|
||||
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
|
||||
|
Loading…
Reference in New Issue
Block a user