linux/drivers/gpu/drm/ttm/ttm_bo_util.c
Matthew Auld c63462184b drm/ttm: stop passing NULL fence in ttm_bo_move_sync_cleanup
If we hit the sync case, like when skipping clearing for kernel internal
objects, or when falling back to cpu clearing, like in i915, we end up
trying to add a NULL fence, but with some recent changes in this area
this now just results in NULL deref in dma_resv_add_fence:

<1>[    5.466383] BUG: kernel NULL pointer dereference, address: 0000000000000008
<1>[    5.466384] #PF: supervisor read access in kernel mode
<1>[    5.466385] #PF: error_code(0x0000) - not-present page
<6>[    5.466386] PGD 0 P4D 0
<4>[    5.466387] Oops: 0000 [#1] PREEMPT SMP NOPTI
<4>[    5.466389] CPU: 5 PID: 267 Comm: modprobe Not tainted 5.18.0-rc2-CI-CI_DRM_11481+ #1
<4>[    5.466391] RIP: 0010:dma_resv_add_fence+0x63/0x260
<4>[    5.466395] Code: 38 85 c0 0f 84 df 01 00 00 0f 88 e8 01 00 00 83 c0 01 0f 88 df 01 00 00 8b 05 35 89 10 01 49 8d 5e 68 85 c0 0f 85 45 01 00 00 <48> 8b 45 08 48 3d c0 a5 0a 82 0f 84 5c 01 00 00 48 3d 60 a5 0a 82
<4>[    5.466396] RSP: 0018:ffffc90000e974f8 EFLAGS: 00010202
<4>[    5.466397] RAX: 0000000000000001 RBX: ffff888123e88b28 RCX: 00000000ffffffff
<4>[    5.466398] RDX: 0000000000000001 RSI: ffffffff822e4f50 RDI: ffffffff8233f087
<4>[    5.466399] RBP: 0000000000000000 R08: ffff8881313dbc80 R09: 0000000000000001
<4>[    5.466399] R10: 0000000000000001 R11: 00000000da354294 R12: 0000000000000000
<4>[    5.466400] R13: ffff88810927dc58 R14: ffff888123e88ac0 R15: ffff88810a88d600
<4>[    5.466401] FS:  00007f5fa1193540(0000) GS:ffff88845d880000(0000) knlGS:0000000000000000
<4>[    5.466402] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
<4>[    5.466402] CR2: 0000000000000008 CR3: 0000000106dd6003 CR4: 00000000003706e0
<4>[    5.466403] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
<4>[    5.466404] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
<4>[    5.466404] Call Trace:
<4>[    5.466405]  <TASK>
<4>[    5.466406]  ttm_bo_move_accel_cleanup+0x62/0x270 [ttm]
<4>[    5.466411]  ? i915_rsgt_from_buddy_resource+0x185/0x1e0 [i915]
<4>[    5.466529]  i915_ttm_move+0xfd/0x430 [i915]
<4>[    5.466833]  ? dma_resv_reserve_fences+0x4e/0x320
<4>[    5.466836]  ? ttm_bo_add_move_fence.constprop.20+0xf7/0x140 [ttm]
<4>[    5.466841]  ttm_bo_handle_move_mem+0xa1/0x140 [ttm]
<4>[    5.466845]  ttm_bo_validate+0xee/0x160 [ttm]
<4>[    5.466849]  __i915_ttm_get_pages+0x4f/0x210 [i915]
<4>[    5.466976]  i915_ttm_get_pages+0xad/0x140 [i915]
<4>[    5.467094]  ____i915_gem_object_get_pages+0x32/0xf0 [i915]
<4>[    5.467210]  __i915_gem_object_get_pages+0x89/0xa0 [i915]
<4>[    5.467323]  i915_vma_get_pages+0x114/0x1d0 [i915]
<4>[    5.467446]  i915_vma_pin_ww+0xd3/0xa90 [i915]
<4>[    5.467570]  i915_vma_pin.constprop.10+0x119/0x1b0 [i915]
<4>[    5.467700]  ? __mutex_unlock_slowpath+0x3e/0x2b0
<4>[    5.467704]  intel_alloc_initial_plane_obj.isra.6+0x1a9/0x390 [i915]
<4>[    5.467833]  intel_crtc_initial_plane_config+0x83/0x340 [i915]

In the ttm_bo_move_sync_cleanup() case it seems we only really care
about calling ttm_bo_wait_free_node(), so let's instead just call that
directly.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Nirmoy Das <nirmoy.das@linux.intel.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220411085603.58156-1-matthew.auld@intel.com
2022-04-12 14:35:53 +02:00

668 lines
17 KiB
C

/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_cache.h>
#include <drm/drm_vma_manager.h>
#include <linux/iosys-map.h>
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/dma-resv.h>
struct ttm_transfer_obj {
struct ttm_buffer_object base;
struct ttm_buffer_object *bo;
};
int ttm_mem_io_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
if (mem->bus.offset || mem->bus.addr)
return 0;
mem->bus.is_iomem = false;
if (!bdev->funcs->io_mem_reserve)
return 0;
return bdev->funcs->io_mem_reserve(bdev, mem);
}
void ttm_mem_io_free(struct ttm_device *bdev,
struct ttm_resource *mem)
{
if (!mem)
return;
if (!mem->bus.offset && !mem->bus.addr)
return;
if (bdev->funcs->io_mem_free)
bdev->funcs->io_mem_free(bdev, mem);
mem->bus.offset = 0;
mem->bus.addr = NULL;
}
/**
* ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
* @clear: Whether to clear rather than copy.
* @num_pages: Number of pages of the operation.
* @dst_iter: A struct ttm_kmap_iter representing the destination resource.
* @src_iter: A struct ttm_kmap_iter representing the source resource.
*
* This function is intended to be able to move out async under a
* dma-fence if desired.
*/
void ttm_move_memcpy(bool clear,
u32 num_pages,
struct ttm_kmap_iter *dst_iter,
struct ttm_kmap_iter *src_iter)
{
const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
struct iosys_map src_map, dst_map;
pgoff_t i;
/* Single TTM move. NOP */
if (dst_ops->maps_tt && src_ops->maps_tt)
return;
/* Don't move nonexistent data. Clear destination instead. */
if (clear) {
for (i = 0; i < num_pages; ++i) {
dst_ops->map_local(dst_iter, &dst_map, i);
if (dst_map.is_iomem)
memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
else
memset(dst_map.vaddr, 0, PAGE_SIZE);
if (dst_ops->unmap_local)
dst_ops->unmap_local(dst_iter, &dst_map);
}
return;
}
for (i = 0; i < num_pages; ++i) {
dst_ops->map_local(dst_iter, &dst_map, i);
src_ops->map_local(src_iter, &src_map, i);
drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
if (src_ops->unmap_local)
src_ops->unmap_local(src_iter, &src_map);
if (dst_ops->unmap_local)
dst_ops->unmap_local(dst_iter, &dst_map);
}
}
EXPORT_SYMBOL(ttm_move_memcpy);
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
struct ttm_resource *dst_mem)
{
struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *dst_man =
ttm_manager_type(bo->bdev, dst_mem->mem_type);
struct ttm_tt *ttm = bo->ttm;
struct ttm_resource *src_mem = bo->resource;
struct ttm_resource_manager *src_man =
ttm_manager_type(bdev, src_mem->mem_type);
union {
struct ttm_kmap_iter_tt tt;
struct ttm_kmap_iter_linear_io io;
} _dst_iter, _src_iter;
struct ttm_kmap_iter *dst_iter, *src_iter;
bool clear;
int ret = 0;
if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
dst_man->use_tt)) {
ret = ttm_tt_populate(bdev, ttm, ctx);
if (ret)
return ret;
}
dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
if (IS_ERR(dst_iter))
return PTR_ERR(dst_iter);
src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
if (IS_ERR(src_iter)) {
ret = PTR_ERR(src_iter);
goto out_src_iter;
}
clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
if (!src_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
ttm_bo_move_sync_cleanup(bo, dst_mem);
out_src_iter:
if (!dst_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
struct ttm_transfer_obj *fbo;
fbo = container_of(bo, struct ttm_transfer_obj, base);
dma_resv_fini(&fbo->base.base._resv);
ttm_bo_put(fbo->bo);
kfree(fbo);
}
/**
* ttm_buffer_object_transfer
*
* @bo: A pointer to a struct ttm_buffer_object.
* @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
* holding the data of @bo with the old placement.
*
* This is a utility function that may be called after an accelerated move
* has been scheduled. A new buffer object is created as a placeholder for
* the old data while it's being copied. When that buffer object is idle,
* it can be destroyed, releasing the space of the old placement.
* Returns:
* !0: Failure.
*/
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object **new_obj)
{
struct ttm_transfer_obj *fbo;
int ret;
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
return -ENOMEM;
fbo->base = *bo;
/**
* Fix up members that we shouldn't copy directly:
* TODO: Explicit member copy would probably be better here.
*/
atomic_inc(&ttm_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
drm_vma_node_reset(&fbo->base.base.vma_node);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.pin_count = 0;
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
if (fbo->base.resource) {
ttm_resource_set_bo(fbo->base.resource, &fbo->base);
bo->resource = NULL;
}
dma_resv_init(&fbo->base.base._resv);
fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
if (ret) {
kfree(fbo);
return ret;
}
ttm_bo_get(bo);
fbo->bo = bo;
ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
*new_obj = &fbo->base;
return 0;
}
pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
pgprot_t tmp)
{
struct ttm_resource_manager *man;
enum ttm_caching caching;
man = ttm_manager_type(bo->bdev, res->mem_type);
caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
return ttm_prot_from_caching(caching, tmp);
}
EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long offset,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
struct ttm_resource *mem = bo->resource;
if (bo->resource->bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
} else {
resource_size_t res = bo->resource->bus.offset + offset;
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->bus.caching == ttm_write_combined)
map->virtual = ioremap_wc(res, size);
#ifdef CONFIG_X86
else if (mem->bus.caching == ttm_cached)
map->virtual = ioremap_cache(res, size);
#endif
else
map->virtual = ioremap(res, size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long start_page,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
struct ttm_resource *mem = bo->resource;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
struct ttm_tt *ttm = bo->ttm;
pgprot_t prot;
int ret;
BUG_ON(!ttm);
ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
if (ret)
return ret;
if (num_pages == 1 && ttm->caching == ttm_cached) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
*/
map->bo_kmap_type = ttm_bo_map_kmap;
map->page = ttm->pages[start_page];
map->virtual = kmap(map->page);
} else {
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
}
return (!map->virtual) ? -ENOMEM : 0;
}
int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
unsigned long offset, size;
int ret;
map->virtual = NULL;
map->bo = bo;
if (num_pages > bo->resource->num_pages)
return -EINVAL;
if ((start_page + num_pages) > bo->resource->num_pages)
return -EINVAL;
ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
if (ret)
return ret;
if (!bo->resource->bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
offset = start_page << PAGE_SHIFT;
size = num_pages << PAGE_SHIFT;
return ttm_bo_ioremap(bo, offset, size, map);
}
}
EXPORT_SYMBOL(ttm_bo_kmap);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
break;
case ttm_bo_map_kmap:
kunmap(map->page);
break;
case ttm_bo_map_premapped:
break;
default:
BUG();
}
ttm_mem_io_free(map->bo->bdev, map->bo->resource);
map->virtual = NULL;
map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
int ret;
ret = ttm_mem_io_reserve(bo->bdev, mem);
if (ret)
return ret;
if (mem->bus.is_iomem) {
void __iomem *vaddr_iomem;
if (mem->bus.addr)
vaddr_iomem = (void __iomem *)mem->bus.addr;
else if (mem->bus.caching == ttm_write_combined)
vaddr_iomem = ioremap_wc(mem->bus.offset,
bo->base.size);
#ifdef CONFIG_X86
else if (mem->bus.caching == ttm_cached)
vaddr_iomem = ioremap_cache(mem->bus.offset,
bo->base.size);
#endif
else
vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
if (!vaddr_iomem)
return -ENOMEM;
iosys_map_set_vaddr_iomem(map, vaddr_iomem);
} else {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
struct ttm_tt *ttm = bo->ttm;
pgprot_t prot;
void *vaddr;
ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
if (ret)
return ret;
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
if (!vaddr)
return -ENOMEM;
iosys_map_set_vaddr(map, vaddr);
}
return 0;
}
EXPORT_SYMBOL(ttm_bo_vmap);
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
if (iosys_map_is_null(map))
return;
if (!map->is_iomem)
vunmap(map->vaddr);
else if (!mem->bus.addr)
iounmap(map->vaddr_iomem);
iosys_map_clear(map);
ttm_mem_io_free(bo->bdev, bo->resource);
}
EXPORT_SYMBOL(ttm_bo_vunmap);
static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
bool dst_use_tt)
{
int ret;
ret = ttm_bo_wait(bo, false, false);
if (ret)
return ret;
if (!dst_use_tt)
ttm_bo_tt_destroy(bo);
ttm_resource_free(bo, &bo->resource);
return 0;
}
static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
struct dma_fence *fence,
bool dst_use_tt)
{
struct ttm_buffer_object *ghost_obj;
int ret;
/**
* This should help pipeline ordinary buffer moves.
*
* Hang old buffer memory on a new buffer object,
* and leave it to be released when the GPU
* operation has completed.
*/
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
dma_resv_add_fence(&ghost_obj->base._resv, fence,
DMA_RESV_USAGE_KERNEL);
/**
* If we're not moving to fixed memory, the TTM object
* needs to stay alive. Otherwhise hang it on the ghost
* bo to be unbound and destroyed.
*/
if (dst_use_tt)
ghost_obj->ttm = NULL;
else
bo->ttm = NULL;
dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
return 0;
}
static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
struct dma_fence *fence)
{
struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *from;
from = ttm_manager_type(bdev, bo->resource->mem_type);
/**
* BO doesn't have a TTM we need to bind/unbind. Just remember
* this eviction and free up the allocation
*/
spin_lock(&from->move_lock);
if (!from->move || dma_fence_is_later(fence, from->move)) {
dma_fence_put(from->move);
from->move = dma_fence_get(fence);
}
spin_unlock(&from->move_lock);
ttm_resource_free(bo, &bo->resource);
}
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct dma_fence *fence,
bool evict,
bool pipeline,
struct ttm_resource *new_mem)
{
struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0;
dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
if (!evict)
ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
else if (!from->use_tt && pipeline)
ttm_bo_move_pipeline_evict(bo, fence);
else
ret = ttm_bo_wait_free_node(bo, man->use_tt);
if (ret)
return ret;
ttm_bo_assign_mem(bo, new_mem);
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret;
ret = ttm_bo_wait_free_node(bo, man->use_tt);
if (WARN_ON(ret))
return;
ttm_bo_assign_mem(bo, new_mem);
}
EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
/**
* ttm_bo_pipeline_gutting - purge the contents of a bo
* @bo: The buffer object
*
* Purge the contents of a bo, async if the bo is not idle.
* After a successful call, the bo is left unpopulated in
* system placement. The function may wait uninterruptible
* for idle on OOM.
*
* Return: 0 if successful, negative error code on failure.
*/
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
struct ttm_buffer_object *ghost;
struct ttm_resource *sys_res;
struct ttm_tt *ttm;
int ret;
ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
if (ret)
return ret;
/* If already idle, no need for ghost object dance. */
ret = ttm_bo_wait(bo, false, true);
if (ret != -EBUSY) {
if (!bo->ttm) {
/* See comment below about clearing. */
ret = ttm_tt_create(bo, true);
if (ret)
goto error_free_sys_mem;
} else {
ttm_tt_unpopulate(bo->bdev, bo->ttm);
if (bo->type == ttm_bo_type_device)
ttm_tt_mark_for_clear(bo->ttm);
}
ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, sys_res);
return 0;
}
/*
* We need an unpopulated ttm_tt after giving our current one,
* if any, to the ghost object. And we can't afford to fail
* creating one *after* the operation. If the bo subsequently gets
* resurrected, make sure it's cleared (if ttm_bo_type_device)
* to avoid leaking sensitive information to user-space.
*/
ttm = bo->ttm;
bo->ttm = NULL;
ret = ttm_tt_create(bo, true);
swap(bo->ttm, ttm);
if (ret)
goto error_free_sys_mem;
ret = ttm_buffer_object_transfer(bo, &ghost);
if (ret)
goto error_destroy_tt;
ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret)
ttm_bo_wait(bo, false, false);
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
bo->ttm = ttm;
ttm_bo_assign_mem(bo, sys_res);
return 0;
error_destroy_tt:
ttm_tt_destroy(bo->bdev, ttm);
error_free_sys_mem:
ttm_resource_free(bo, &sys_res);
return ret;
}