dma-buf: drop the _rcu postfix on function names v3
The functions can be called both in _rcu context as well as while holding the lock. v2: add some kerneldoc as suggested by Daniel v3: fix indentation Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Jason Ekstrand <jason@jlekstrand.net> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-7-christian.koenig@amd.com
This commit is contained in:
parent
6b41323a26
commit
d3fae3b3da
@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
|||||||
long ret;
|
long ret;
|
||||||
|
|
||||||
/* Wait on any implicit rendering fences */
|
/* Wait on any implicit rendering fences */
|
||||||
ret = dma_resv_wait_timeout_rcu(resv, write, true,
|
ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -396,7 +396,7 @@ retry:
|
|||||||
EXPORT_SYMBOL(dma_resv_copy_fences);
|
EXPORT_SYMBOL(dma_resv_copy_fences);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_resv_get_fences_rcu - Get an object's shared and exclusive
|
* dma_resv_get_fences - Get an object's shared and exclusive
|
||||||
* fences without update side lock held
|
* fences without update side lock held
|
||||||
* @obj: the reservation object
|
* @obj: the reservation object
|
||||||
* @pfence_excl: the returned exclusive fence (or NULL)
|
* @pfence_excl: the returned exclusive fence (or NULL)
|
||||||
@ -408,10 +408,9 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
|
|||||||
* exclusive fence is not specified the fence is put into the array of the
|
* exclusive fence is not specified the fence is put into the array of the
|
||||||
* shared fences as well. Returns either zero or -ENOMEM.
|
* shared fences as well. Returns either zero or -ENOMEM.
|
||||||
*/
|
*/
|
||||||
int dma_resv_get_fences_rcu(struct dma_resv *obj,
|
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
|
||||||
struct dma_fence **pfence_excl,
|
unsigned int *pshared_count,
|
||||||
unsigned int *pshared_count,
|
struct dma_fence ***pshared)
|
||||||
struct dma_fence ***pshared)
|
|
||||||
{
|
{
|
||||||
struct dma_fence **shared = NULL;
|
struct dma_fence **shared = NULL;
|
||||||
struct dma_fence *fence_excl;
|
struct dma_fence *fence_excl;
|
||||||
@ -494,23 +493,24 @@ unlock:
|
|||||||
*pshared = shared;
|
*pshared = shared;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
|
EXPORT_SYMBOL_GPL(dma_resv_get_fences);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_resv_wait_timeout_rcu - Wait on reservation's objects
|
* dma_resv_wait_timeout - Wait on reservation's objects
|
||||||
* shared and/or exclusive fences.
|
* shared and/or exclusive fences.
|
||||||
* @obj: the reservation object
|
* @obj: the reservation object
|
||||||
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
|
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
|
||||||
* @intr: if true, do interruptible wait
|
* @intr: if true, do interruptible wait
|
||||||
* @timeout: timeout value in jiffies or zero to return immediately
|
* @timeout: timeout value in jiffies or zero to return immediately
|
||||||
*
|
*
|
||||||
|
* Callers are not required to hold specific locks, but maybe hold
|
||||||
|
* dma_resv_lock() already
|
||||||
* RETURNS
|
* RETURNS
|
||||||
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
|
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
|
||||||
* greater than zer on success.
|
* greater than zer on success.
|
||||||
*/
|
*/
|
||||||
long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
|
long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
|
||||||
bool wait_all, bool intr,
|
unsigned long timeout)
|
||||||
unsigned long timeout)
|
|
||||||
{
|
{
|
||||||
long ret = timeout ? timeout : 1;
|
long ret = timeout ? timeout : 1;
|
||||||
unsigned int seq, shared_count;
|
unsigned int seq, shared_count;
|
||||||
@ -582,7 +582,7 @@ unlock_retry:
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
|
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
|
||||||
|
|
||||||
|
|
||||||
static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
|
static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
|
||||||
@ -602,16 +602,18 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_resv_test_signaled_rcu - Test if a reservation object's
|
* dma_resv_test_signaled - Test if a reservation object's fences have been
|
||||||
* fences have been signaled.
|
* signaled.
|
||||||
* @obj: the reservation object
|
* @obj: the reservation object
|
||||||
* @test_all: if true, test all fences, otherwise only test the exclusive
|
* @test_all: if true, test all fences, otherwise only test the exclusive
|
||||||
* fence
|
* fence
|
||||||
*
|
*
|
||||||
|
* Callers are not required to hold specific locks, but maybe hold
|
||||||
|
* dma_resv_lock() already
|
||||||
* RETURNS
|
* RETURNS
|
||||||
* true if all fences signaled, else false
|
* true if all fences signaled, else false
|
||||||
*/
|
*/
|
||||||
bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
|
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
|
||||||
{
|
{
|
||||||
unsigned int seq, shared_count;
|
unsigned int seq, shared_count;
|
||||||
int ret;
|
int ret;
|
||||||
@ -660,7 +662,7 @@ retry:
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
|
EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_LOCKDEP)
|
#if IS_ENABLED(CONFIG_LOCKDEP)
|
||||||
static int __init dma_resv_lockdep(void)
|
static int __init dma_resv_lockdep(void)
|
||||||
|
@ -203,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
|||||||
goto unpin;
|
goto unpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
|
r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
|
||||||
&work->shared_count,
|
&work->shared_count, &work->shared);
|
||||||
&work->shared);
|
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
DRM_ERROR("failed to get fences for buffer\n");
|
DRM_ERROR("failed to get fences for buffer\n");
|
||||||
goto unpin;
|
goto unpin;
|
||||||
|
@ -52,7 +52,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
|
|||||||
if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
|
if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
|
r = dma_resv_get_fences(obj, NULL, &count, &fences);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -526,8 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
|||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
robj = gem_to_amdgpu_bo(gobj);
|
robj = gem_to_amdgpu_bo(gobj);
|
||||||
ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
|
ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
|
||||||
timeout);
|
|
||||||
|
|
||||||
/* ret == 0 means not signaled,
|
/* ret == 0 means not signaled,
|
||||||
* ret > 0 means signaled
|
* ret > 0 means signaled
|
||||||
|
@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
|
|||||||
unsigned count;
|
unsigned count;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
|
r = dma_resv_get_fences(resv, NULL, &count, &fences);
|
||||||
if (r)
|
if (r)
|
||||||
goto fallback;
|
goto fallback;
|
||||||
|
|
||||||
@ -156,8 +156,7 @@ fallback:
|
|||||||
/* Not enough memory for the delayed delete, as last resort
|
/* Not enough memory for the delayed delete, as last resort
|
||||||
* block for all the fences to complete.
|
* block for all the fences to complete.
|
||||||
*/
|
*/
|
||||||
dma_resv_wait_timeout_rcu(resv, true, false,
|
dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
|
||||||
amdgpu_pasid_free(pasid);
|
amdgpu_pasid_free(pasid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
|
|||||||
|
|
||||||
mmu_interval_set_seq(mni, cur_seq);
|
mmu_interval_set_seq(mni, cur_seq);
|
||||||
|
|
||||||
r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
|
r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
mutex_unlock(&adev->notifier_lock);
|
mutex_unlock(&adev->notifier_lock);
|
||||||
if (r <= 0)
|
if (r <= 0)
|
||||||
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
|
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
|
||||||
|
@ -756,8 +756,8 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
|
r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -1126,9 +1126,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
|||||||
ib->length_dw = 16;
|
ib->length_dw = 16;
|
||||||
|
|
||||||
if (direct) {
|
if (direct) {
|
||||||
r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
|
r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
|
||||||
true, false,
|
msecs_to_jiffies(10));
|
||||||
msecs_to_jiffies(10));
|
|
||||||
if (r == 0)
|
if (r == 0)
|
||||||
r = -ETIMEDOUT;
|
r = -ETIMEDOUT;
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
|
@ -2022,13 +2022,12 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||||||
unsigned i, shared_count;
|
unsigned i, shared_count;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = dma_resv_get_fences_rcu(resv, &excl,
|
r = dma_resv_get_fences(resv, &excl, &shared_count, &shared);
|
||||||
&shared_count, &shared);
|
|
||||||
if (r) {
|
if (r) {
|
||||||
/* Not enough memory to grab the fence list, as last resort
|
/* Not enough memory to grab the fence list, as last resort
|
||||||
* block for all the fences to complete.
|
* block for all the fences to complete.
|
||||||
*/
|
*/
|
||||||
dma_resv_wait_timeout_rcu(resv, true, false,
|
dma_resv_wait_timeout(resv, true, false,
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -2640,7 +2639,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
|
|||||||
return true;
|
return true;
|
||||||
|
|
||||||
/* Don't evict VM page tables while they are busy */
|
/* Don't evict VM page tables while they are busy */
|
||||||
if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
|
if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* Try to block ongoing updates */
|
/* Try to block ongoing updates */
|
||||||
@ -2820,8 +2819,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
|
|||||||
*/
|
*/
|
||||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
||||||
{
|
{
|
||||||
timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
|
timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true,
|
||||||
true, true, timeout);
|
true, timeout);
|
||||||
if (timeout <= 0)
|
if (timeout <= 0)
|
||||||
return timeout;
|
return timeout;
|
||||||
|
|
||||||
|
@ -8400,9 +8400,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||||||
* deadlock during GPU reset when this fence will not signal
|
* deadlock during GPU reset when this fence will not signal
|
||||||
* but we hold reservation lock for the BO.
|
* but we hold reservation lock for the BO.
|
||||||
*/
|
*/
|
||||||
r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
|
r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
|
||||||
false,
|
msecs_to_jiffies(5000));
|
||||||
msecs_to_jiffies(5000));
|
|
||||||
if (unlikely(r <= 0))
|
if (unlikely(r <= 0))
|
||||||
DRM_ERROR("Waiting for fences timed out!");
|
DRM_ERROR("Waiting for fences timed out!");
|
||||||
|
|
||||||
|
@ -770,8 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
|
ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
|
||||||
true, timeout);
|
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = -ETIME;
|
ret = -ETIME;
|
||||||
else if (ret > 0)
|
else if (ret > 0)
|
||||||
@ -1380,7 +1379,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
|
|||||||
return drm_gem_fence_array_add(fence_array, fence);
|
return drm_gem_fence_array_add(fence_array, fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = dma_resv_get_fences_rcu(obj->resv, NULL,
|
ret = dma_resv_get_fences(obj->resv, NULL,
|
||||||
&fence_count, &fences);
|
&fence_count, &fences);
|
||||||
if (ret || !fence_count)
|
if (ret || !fence_count)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -390,14 +390,12 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (op & ETNA_PREP_NOSYNC) {
|
if (op & ETNA_PREP_NOSYNC) {
|
||||||
if (!dma_resv_test_signaled_rcu(obj->resv,
|
if (!dma_resv_test_signaled(obj->resv, write))
|
||||||
write))
|
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
} else {
|
} else {
|
||||||
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
|
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
|
||||||
|
|
||||||
ret = dma_resv_wait_timeout_rcu(obj->resv,
|
ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
|
||||||
write, true, remain);
|
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
return ret == 0 ? -ETIMEDOUT : ret;
|
return ret == 0 ? -ETIMEDOUT : ret;
|
||||||
}
|
}
|
||||||
|
@ -189,9 +189,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
|
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
|
||||||
ret = dma_resv_get_fences_rcu(robj, &bo->excl,
|
ret = dma_resv_get_fences(robj, &bo->excl,
|
||||||
&bo->nr_shared,
|
&bo->nr_shared,
|
||||||
&bo->shared);
|
&bo->shared);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
} else {
|
} else {
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
void dma_resv_prune(struct dma_resv *resv)
|
void dma_resv_prune(struct dma_resv *resv)
|
||||||
{
|
{
|
||||||
if (dma_resv_trylock(resv)) {
|
if (dma_resv_trylock(resv)) {
|
||||||
if (dma_resv_test_signaled_rcu(resv, true))
|
if (dma_resv_test_signaled(resv, true))
|
||||||
dma_resv_add_excl_fence(resv, NULL);
|
dma_resv_add_excl_fence(resv, NULL);
|
||||||
dma_resv_unlock(resv);
|
dma_resv_unlock(resv);
|
||||||
}
|
}
|
||||||
|
@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|||||||
* Alternatively, we can trade that extra information on read/write
|
* Alternatively, we can trade that extra information on read/write
|
||||||
* activity with
|
* activity with
|
||||||
* args->busy =
|
* args->busy =
|
||||||
* !dma_resv_test_signaled_rcu(obj->resv, true);
|
* !dma_resv_test_signaled(obj->resv, true);
|
||||||
* to report the overall busyness. This is what the wait-ioctl does.
|
* to report the overall busyness. This is what the wait-ioctl does.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma)
|
|||||||
if (DBG_FORCE_RELOC)
|
if (DBG_FORCE_RELOC)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return !dma_resv_test_signaled_rcu(vma->resv, true);
|
return !dma_resv_test_signaled(vma->resv, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
|
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
|
||||||
|
@ -85,8 +85,8 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
|
|||||||
return true;
|
return true;
|
||||||
|
|
||||||
/* we will unbind on next submission, still have userptr pins */
|
/* we will unbind on next submission, still have userptr pins */
|
||||||
r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
|
r = dma_resv_wait_timeout(obj->base.resv, true, false,
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
if (r <= 0)
|
if (r <= 0)
|
||||||
drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
|
drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
|
|||||||
unsigned int count, i;
|
unsigned int count, i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
|
ret = dma_resv_get_fences(resv, &excl, &count, &shared);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -158,8 +158,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
|
|||||||
unsigned int count, i;
|
unsigned int count, i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = dma_resv_get_fences_rcu(obj->base.resv,
|
ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
|
||||||
&excl, &count, &shared);
|
&shared);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1594,8 +1594,8 @@ i915_request_await_object(struct i915_request *to,
|
|||||||
struct dma_fence **shared;
|
struct dma_fence **shared;
|
||||||
unsigned int count, i;
|
unsigned int count, i;
|
||||||
|
|
||||||
ret = dma_resv_get_fences_rcu(obj->base.resv,
|
ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
|
||||||
&excl, &count, &shared);
|
&shared);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
|
|||||||
struct dma_fence **shared;
|
struct dma_fence **shared;
|
||||||
unsigned int count, i;
|
unsigned int count, i;
|
||||||
|
|
||||||
ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
|
ret = dma_resv_get_fences(resv, &excl, &count, &shared);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -915,8 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
|
|||||||
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
|
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
|
||||||
long ret;
|
long ret;
|
||||||
|
|
||||||
ret = dma_resv_wait_timeout_rcu(obj->resv, write,
|
ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
|
||||||
true, remain);
|
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
return remain == 0 ? -EBUSY : -ETIMEDOUT;
|
return remain == 0 ? -EBUSY : -ETIMEDOUT;
|
||||||
else if (ret < 0)
|
else if (ret < 0)
|
||||||
|
@ -964,8 +964,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
|
|||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
nvbo = nouveau_gem_object(gem);
|
nvbo = nouveau_gem_object(gem);
|
||||||
|
|
||||||
lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
|
lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
|
||||||
no_wait ? 0 : 30 * HZ);
|
no_wait ? 0 : 30 * HZ);
|
||||||
if (!lret)
|
if (!lret)
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
else if (lret > 0)
|
else if (lret > 0)
|
||||||
|
@ -312,8 +312,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
|
|||||||
if (!gem_obj)
|
if (!gem_obj)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
|
ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout);
|
||||||
true, timeout);
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = timeout ? -ETIMEDOUT : -EBUSY;
|
ret = timeout ? -ETIMEDOUT : -EBUSY;
|
||||||
|
|
||||||
|
@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
|
|||||||
}
|
}
|
||||||
if (domain == RADEON_GEM_DOMAIN_CPU) {
|
if (domain == RADEON_GEM_DOMAIN_CPU) {
|
||||||
/* Asking for cpu access wait for object idle */
|
/* Asking for cpu access wait for object idle */
|
||||||
r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
|
r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
|
||||||
if (!r)
|
if (!r)
|
||||||
r = -EBUSY;
|
r = -EBUSY;
|
||||||
|
|
||||||
@ -523,7 +523,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
robj = gem_to_radeon_bo(gobj);
|
robj = gem_to_radeon_bo(gobj);
|
||||||
|
|
||||||
r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
|
r = dma_resv_test_signaled(robj->tbo.base.resv, true);
|
||||||
if (r == 0)
|
if (r == 0)
|
||||||
r = -EBUSY;
|
r = -EBUSY;
|
||||||
else
|
else
|
||||||
@ -552,7 +552,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
robj = gem_to_radeon_bo(gobj);
|
robj = gem_to_radeon_bo(gobj);
|
||||||
|
|
||||||
ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
|
ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
r = -EBUSY;
|
r = -EBUSY;
|
||||||
else if (ret < 0)
|
else if (ret < 0)
|
||||||
|
@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
|
r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
if (r <= 0)
|
if (r <= 0)
|
||||||
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
|
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
|
||||||
|
|
||||||
|
@ -296,7 +296,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
|||||||
struct dma_resv *resv = &bo->base._resv;
|
struct dma_resv *resv = &bo->base._resv;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (dma_resv_test_signaled_rcu(resv, true))
|
if (dma_resv_test_signaled(resv, true))
|
||||||
ret = 0;
|
ret = 0;
|
||||||
else
|
else
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
@ -308,8 +308,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
|||||||
dma_resv_unlock(bo->base.resv);
|
dma_resv_unlock(bo->base.resv);
|
||||||
spin_unlock(&bo->bdev->lru_lock);
|
spin_unlock(&bo->bdev->lru_lock);
|
||||||
|
|
||||||
lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
|
lret = dma_resv_wait_timeout(resv, true, interruptible,
|
||||||
30 * HZ);
|
30 * HZ);
|
||||||
|
|
||||||
if (lret < 0)
|
if (lret < 0)
|
||||||
return lret;
|
return lret;
|
||||||
@ -411,8 +411,8 @@ static void ttm_bo_release(struct kref *kref)
|
|||||||
/* Last resort, if we fail to allocate memory for the
|
/* Last resort, if we fail to allocate memory for the
|
||||||
* fences block for the BO to become idle
|
* fences block for the BO to become idle
|
||||||
*/
|
*/
|
||||||
dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
|
dma_resv_wait_timeout(bo->base.resv, true, false,
|
||||||
30 * HZ);
|
30 * HZ);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bo->bdev->funcs->release_notify)
|
if (bo->bdev->funcs->release_notify)
|
||||||
@ -422,7 +422,7 @@ static void ttm_bo_release(struct kref *kref)
|
|||||||
ttm_mem_io_free(bdev, bo->resource);
|
ttm_mem_io_free(bdev, bo->resource);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
|
if (!dma_resv_test_signaled(bo->base.resv, true) ||
|
||||||
!dma_resv_trylock(bo->base.resv)) {
|
!dma_resv_trylock(bo->base.resv)) {
|
||||||
/* The BO is not idle, resurrect it for delayed destroy */
|
/* The BO is not idle, resurrect it for delayed destroy */
|
||||||
ttm_bo_flush_all_fences(bo);
|
ttm_bo_flush_all_fences(bo);
|
||||||
@ -1094,14 +1094,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
|
|||||||
long timeout = 15 * HZ;
|
long timeout = 15 * HZ;
|
||||||
|
|
||||||
if (no_wait) {
|
if (no_wait) {
|
||||||
if (dma_resv_test_signaled_rcu(bo->base.resv, true))
|
if (dma_resv_test_signaled(bo->base.resv, true))
|
||||||
return 0;
|
return 0;
|
||||||
else
|
else
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
|
timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible,
|
||||||
interruptible, timeout);
|
timeout);
|
||||||
if (timeout < 0)
|
if (timeout < 0)
|
||||||
return timeout;
|
return timeout;
|
||||||
|
|
||||||
|
@ -151,8 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
|
|||||||
|
|
||||||
/* Check for a conflicting fence */
|
/* Check for a conflicting fence */
|
||||||
resv = obj->resv;
|
resv = obj->resv;
|
||||||
if (!dma_resv_test_signaled_rcu(resv,
|
if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) {
|
||||||
arg->flags & VGEM_FENCE_WRITE)) {
|
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto err_fence;
|
goto err_fence;
|
||||||
}
|
}
|
||||||
|
@ -451,10 +451,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
|
|||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
if (args->flags & VIRTGPU_WAIT_NOWAIT) {
|
if (args->flags & VIRTGPU_WAIT_NOWAIT) {
|
||||||
ret = dma_resv_test_signaled_rcu(obj->resv, true);
|
ret = dma_resv_test_signaled(obj->resv, true);
|
||||||
} else {
|
} else {
|
||||||
ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
|
ret = dma_resv_wait_timeout(obj->resv, true, true, timeout);
|
||||||
timeout);
|
|
||||||
}
|
}
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
|
@ -743,9 +743,9 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
|
|||||||
if (flags & drm_vmw_synccpu_allow_cs) {
|
if (flags & drm_vmw_synccpu_allow_cs) {
|
||||||
long lret;
|
long lret;
|
||||||
|
|
||||||
lret = dma_resv_wait_timeout_rcu
|
lret = dma_resv_wait_timeout(bo->base.resv, true, true,
|
||||||
(bo->base.resv, true, true,
|
nonblock ? 0 :
|
||||||
nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
if (!lret)
|
if (!lret)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
else if (lret < 0)
|
else if (lret < 0)
|
||||||
|
@ -271,19 +271,12 @@ void dma_resv_init(struct dma_resv *obj);
|
|||||||
void dma_resv_fini(struct dma_resv *obj);
|
void dma_resv_fini(struct dma_resv *obj);
|
||||||
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
|
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
|
||||||
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
|
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
|
||||||
|
|
||||||
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
|
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
|
||||||
|
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
|
||||||
int dma_resv_get_fences_rcu(struct dma_resv *obj,
|
unsigned *pshared_count, struct dma_fence ***pshared);
|
||||||
struct dma_fence **pfence_excl,
|
|
||||||
unsigned *pshared_count,
|
|
||||||
struct dma_fence ***pshared);
|
|
||||||
|
|
||||||
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
|
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
|
||||||
|
long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
|
||||||
long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
|
unsigned long timeout);
|
||||||
unsigned long timeout);
|
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
|
||||||
|
|
||||||
bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
|
|
||||||
|
|
||||||
#endif /* _LINUX_RESERVATION_H */
|
#endif /* _LINUX_RESERVATION_H */
|
||||||
|
Loading…
Reference in New Issue
Block a user