Previously the (non-fd) fence returned from submit ioctl was a raw seqno, which is scoped to the ring. But from UABI standpoint, the ioctls related to seqno fences all specify a submitqueue. We can take advantage of that to replace the seqno fences with a cyclic idr handle. This is in preperation for moving to drm scheduler, at which point the submit ioctl will return after queuing the submit job to the scheduler, but before the submit is written into the ring (and therefore before a ring seqno has been assigned). Which means we need to replace the dma_fence that userspace may need to wait on with a scheduler fence. Signed-off-by: Rob Clark <robdclark@chromium.org> Acked-by: Christian König <christian.koenig@amd.com> Link: https://lore.kernel.org/r/20210728010632.2633470-8-robdclark@gmail.com Signed-off-by: Rob Clark <robdclark@chromium.org>
64 lines
1.6 KiB
C
64 lines
1.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2013-2016 Red Hat
|
|
* Author: Rob Clark <robdclark@gmail.com>
|
|
*/
|
|
|
|
#ifndef __MSM_FENCE_H__
|
|
#define __MSM_FENCE_H__
|
|
|
|
#include "msm_drv.h"
|
|
|
|
/**
|
|
* struct msm_fence_context - fence context for gpu
|
|
*
|
|
* Each ringbuffer has a single fence context, with the GPU writing an
|
|
* incrementing fence seqno at the end of each submit
|
|
*/
|
|
struct msm_fence_context {
|
|
struct drm_device *dev;
|
|
/** name: human readable name for fence timeline */
|
|
char name[32];
|
|
/** context: see dma_fence_context_alloc() */
|
|
unsigned context;
|
|
|
|
/**
|
|
* last_fence:
|
|
*
|
|
* Last assigned fence, incremented each time a fence is created
|
|
* on this fence context. If last_fence == completed_fence,
|
|
* there is no remaining pending work
|
|
*/
|
|
uint32_t last_fence;
|
|
|
|
/**
|
|
* completed_fence:
|
|
*
|
|
* The last completed fence, updated from the CPU after interrupt
|
|
* from GPU
|
|
*/
|
|
uint32_t completed_fence;
|
|
|
|
/**
|
|
* fenceptr:
|
|
*
|
|
* The address that the GPU directly writes with completed fence
|
|
* seqno. This can be ahead of completed_fence. We can peek at
|
|
* this to see if a fence has already signaled but the CPU hasn't
|
|
* gotten around to handling the irq and updating completed_fence
|
|
*/
|
|
volatile uint32_t *fenceptr;
|
|
|
|
spinlock_t spinlock;
|
|
};
|
|
|
|
struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
|
|
volatile uint32_t *fenceptr, const char *name);
|
|
void msm_fence_context_free(struct msm_fence_context *fctx);
|
|
|
|
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
|
|
|
|
struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
|
|
|
|
#endif
|