dma-buf: nuke DMA_FENCE_TRACE macros v2
Only the DRM GPU scheduler, radeon and amdgpu where using them and they depend on a non existing config option to actually emit some code. v2: keep the signal path as is for now Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20210818105443.1578-1-christian.koenig@amd.com
This commit is contained in:
parent
3605eacc8a
commit
d72277b6c3
@ -246,7 +246,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
|||||||
struct amdgpu_fence_driver *drv = &ring->fence_drv;
|
struct amdgpu_fence_driver *drv = &ring->fence_drv;
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
uint32_t seq, last_seq;
|
uint32_t seq, last_seq;
|
||||||
int r;
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
last_seq = atomic_read(&ring->fence_drv.last_seq);
|
last_seq = atomic_read(&ring->fence_drv.last_seq);
|
||||||
@ -278,12 +277,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
|||||||
if (!fence)
|
if (!fence)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
r = dma_fence_signal(fence);
|
dma_fence_signal(fence);
|
||||||
if (!r)
|
|
||||||
DMA_FENCE_TRACE(fence, "signaled from irq context\n");
|
|
||||||
else
|
|
||||||
BUG();
|
|
||||||
|
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
@ -639,8 +633,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
|
|||||||
if (!timer_pending(&ring->fence_drv.fallback_timer))
|
if (!timer_pending(&ring->fence_drv.fallback_timer))
|
||||||
amdgpu_fence_schedule_fallback(ring);
|
amdgpu_fence_schedule_fallback(ring);
|
||||||
|
|
||||||
DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,18 +176,11 @@ static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode,
|
|||||||
*/
|
*/
|
||||||
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
|
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
|
||||||
if (seq >= fence->seq) {
|
if (seq >= fence->seq) {
|
||||||
int ret = dma_fence_signal_locked(&fence->base);
|
dma_fence_signal_locked(&fence->base);
|
||||||
|
|
||||||
if (!ret)
|
|
||||||
DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
|
|
||||||
else
|
|
||||||
DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
|
|
||||||
|
|
||||||
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
|
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
|
||||||
__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
|
__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
|
||||||
dma_fence_put(&fence->base);
|
dma_fence_put(&fence->base);
|
||||||
} else
|
}
|
||||||
DMA_FENCE_TRACE(&fence->base, "pending\n");
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -422,8 +415,6 @@ static bool radeon_fence_enable_signaling(struct dma_fence *f)
|
|||||||
fence->fence_wake.func = radeon_fence_check_signaled;
|
fence->fence_wake.func = radeon_fence_check_signaled;
|
||||||
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
|
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
|
||||||
dma_fence_get(f);
|
dma_fence_get(f);
|
||||||
|
|
||||||
DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -441,11 +432,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
|
|||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
|
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
|
||||||
int ret;
|
dma_fence_signal(&fence->base);
|
||||||
|
|
||||||
ret = dma_fence_signal(&fence->base);
|
|
||||||
if (!ret)
|
|
||||||
DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -550,7 +537,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
|
|||||||
{
|
{
|
||||||
uint64_t seq[RADEON_NUM_RINGS] = {};
|
uint64_t seq[RADEON_NUM_RINGS] = {};
|
||||||
long r;
|
long r;
|
||||||
int r_sig;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function should not be called on !radeon fences.
|
* This function should not be called on !radeon fences.
|
||||||
@ -567,9 +553,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r_sig = dma_fence_signal(&fence->base);
|
dma_fence_signal(&fence->base);
|
||||||
if (!r_sig)
|
|
||||||
DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,26 +50,12 @@ static void __exit drm_sched_fence_slab_fini(void)
|
|||||||
|
|
||||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
|
void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
|
||||||
{
|
{
|
||||||
int ret = dma_fence_signal(&fence->scheduled);
|
dma_fence_signal(&fence->scheduled);
|
||||||
|
|
||||||
if (!ret)
|
|
||||||
DMA_FENCE_TRACE(&fence->scheduled,
|
|
||||||
"signaled from irq context\n");
|
|
||||||
else
|
|
||||||
DMA_FENCE_TRACE(&fence->scheduled,
|
|
||||||
"was already signaled\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void drm_sched_fence_finished(struct drm_sched_fence *fence)
|
void drm_sched_fence_finished(struct drm_sched_fence *fence)
|
||||||
{
|
{
|
||||||
int ret = dma_fence_signal(&fence->finished);
|
dma_fence_signal(&fence->finished);
|
||||||
|
|
||||||
if (!ret)
|
|
||||||
DMA_FENCE_TRACE(&fence->finished,
|
|
||||||
"signaled from irq context\n");
|
|
||||||
else
|
|
||||||
DMA_FENCE_TRACE(&fence->finished,
|
|
||||||
"was already signaled\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
|
static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
|
||||||
|
@ -590,26 +590,4 @@ struct dma_fence *dma_fence_get_stub(void);
|
|||||||
struct dma_fence *dma_fence_allocate_private_stub(void);
|
struct dma_fence *dma_fence_allocate_private_stub(void);
|
||||||
u64 dma_fence_context_alloc(unsigned num);
|
u64 dma_fence_context_alloc(unsigned num);
|
||||||
|
|
||||||
#define DMA_FENCE_TRACE(f, fmt, args...) \
|
|
||||||
do { \
|
|
||||||
struct dma_fence *__ff = (f); \
|
|
||||||
if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
|
|
||||||
pr_info("f %llu#%llu: " fmt, \
|
|
||||||
__ff->context, __ff->seqno, ##args); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define DMA_FENCE_WARN(f, fmt, args...) \
|
|
||||||
do { \
|
|
||||||
struct dma_fence *__ff = (f); \
|
|
||||||
pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
|
|
||||||
##args); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define DMA_FENCE_ERR(f, fmt, args...) \
|
|
||||||
do { \
|
|
||||||
struct dma_fence *__ff = (f); \
|
|
||||||
pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \
|
|
||||||
##args); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#endif /* __LINUX_DMA_FENCE_H */
|
#endif /* __LINUX_DMA_FENCE_H */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user