a61acbbe9c
Previously the (non-fd) fence returned from submit ioctl was a raw seqno, which is scoped to the ring. But from UABI standpoint, the ioctls related to seqno fences all specify a submitqueue. We can take advantage of that to replace the seqno fences with a cyclic idr handle. This is in preperation for moving to drm scheduler, at which point the submit ioctl will return after queuing the submit job to the scheduler, but before the submit is written into the ring (and therefore before a ring seqno has been assigned). Which means we need to replace the dma_fence that userspace may need to wait on with a scheduler fence. Signed-off-by: Rob Clark <robdclark@chromium.org> Acked-by: Christian König <christian.koenig@amd.com> Link: https://lore.kernel.org/r/20210728010632.2633470-8-robdclark@gmail.com Signed-off-by: Rob Clark <robdclark@chromium.org>
104 lines
2.3 KiB
C
104 lines
2.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2013-2016 Red Hat
|
|
* Author: Rob Clark <robdclark@gmail.com>
|
|
*/
|
|
|
|
#include <linux/dma-fence.h>
|
|
|
|
#include "msm_drv.h"
|
|
#include "msm_fence.h"
|
|
|
|
|
|
struct msm_fence_context *
|
|
msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
|
|
const char *name)
|
|
{
|
|
struct msm_fence_context *fctx;
|
|
|
|
fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
|
|
if (!fctx)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
fctx->dev = dev;
|
|
strncpy(fctx->name, name, sizeof(fctx->name));
|
|
fctx->context = dma_fence_context_alloc(1);
|
|
fctx->fenceptr = fenceptr;
|
|
spin_lock_init(&fctx->spinlock);
|
|
|
|
return fctx;
|
|
}
|
|
|
|
void msm_fence_context_free(struct msm_fence_context *fctx)
|
|
{
|
|
kfree(fctx);
|
|
}
|
|
|
|
static inline bool fence_completed(struct msm_fence_context *fctx, uint32_t fence)
|
|
{
|
|
/*
|
|
* Note: Check completed_fence first, as fenceptr is in a write-combine
|
|
* mapping, so it will be more expensive to read.
|
|
*/
|
|
return (int32_t)(fctx->completed_fence - fence) >= 0 ||
|
|
(int32_t)(*fctx->fenceptr - fence) >= 0;
|
|
}
|
|
|
|
/* called from workqueue */
|
|
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
|
|
{
|
|
spin_lock(&fctx->spinlock);
|
|
fctx->completed_fence = max(fence, fctx->completed_fence);
|
|
spin_unlock(&fctx->spinlock);
|
|
}
|
|
|
|
struct msm_fence {
|
|
struct dma_fence base;
|
|
struct msm_fence_context *fctx;
|
|
};
|
|
|
|
static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
|
|
{
|
|
return container_of(fence, struct msm_fence, base);
|
|
}
|
|
|
|
static const char *msm_fence_get_driver_name(struct dma_fence *fence)
|
|
{
|
|
return "msm";
|
|
}
|
|
|
|
static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
|
|
{
|
|
struct msm_fence *f = to_msm_fence(fence);
|
|
return f->fctx->name;
|
|
}
|
|
|
|
static bool msm_fence_signaled(struct dma_fence *fence)
|
|
{
|
|
struct msm_fence *f = to_msm_fence(fence);
|
|
return fence_completed(f->fctx, f->base.seqno);
|
|
}
|
|
|
|
static const struct dma_fence_ops msm_fence_ops = {
|
|
.get_driver_name = msm_fence_get_driver_name,
|
|
.get_timeline_name = msm_fence_get_timeline_name,
|
|
.signaled = msm_fence_signaled,
|
|
};
|
|
|
|
struct dma_fence *
|
|
msm_fence_alloc(struct msm_fence_context *fctx)
|
|
{
|
|
struct msm_fence *f;
|
|
|
|
f = kzalloc(sizeof(*f), GFP_KERNEL);
|
|
if (!f)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
f->fctx = fctx;
|
|
|
|
dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
|
|
fctx->context, ++fctx->last_fence);
|
|
|
|
return &f->base;
|
|
}
|