drm/scheduler: modify args of drm_sched_entity_init

replace run queue by a list of run queues and remove the
sched arg as that is part of run queue itself

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Acked-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Nayan Deshmukh 2018-07-13 15:21:14 +05:30 committed by Alex Deucher
parent 8dc9fbbf27
commit aa16b6c6b4
11 changed files with 33 additions and 33 deletions

View File

@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (ring == &adev->gfx.kiq.ring)
continue;
r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
rq, &ctx->guilty);
r = drm_sched_entity_init(&ctx->rings[i].entity,
&rq, 1, &ctx->guilty);
if (r)
goto failed;
}

View File

@ -1918,8 +1918,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
ring = adev->mman.buffer_funcs_ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
rq, NULL);
r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
r);

View File

@ -266,8 +266,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
ring = &adev->uvd.inst[j].ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
rq, NULL);
r = drm_sched_entity_init(&adev->uvd.inst[j].entity, &rq,
1, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
return r;

View File

@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
ring = &adev->vce.ring[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
rq, NULL);
r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n");
return r;

View File

@ -2564,8 +2564,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ring_instance %= adev->vm_manager.vm_pte_num_rings;
ring = adev->vm_manager.vm_pte_rings[ring_instance];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
r = drm_sched_entity_init(&ring->sched, &vm->entity,
rq, NULL);
r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
if (r)
return r;

View File

@ -430,8 +430,8 @@ static int uvd_v6_0_sw_init(void *handle)
struct drm_sched_rq *rq;
ring = &adev->uvd.inst->ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
rq, NULL);
r = drm_sched_entity_init(&adev->uvd.inst->entity_enc,
&rq, 1, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
return r;

View File

@ -432,8 +432,8 @@ static int uvd_v7_0_sw_init(void *handle)
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
ring = &adev->uvd.inst[j].ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
rq, NULL);
r = drm_sched_entity_init(&adev->uvd.inst[j].entity_enc,
&rq, 1, NULL);
if (r) {
DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
return r;

View File

@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
struct drm_sched_rq *rq;
if (gpu) {
drm_sched_entity_init(&gpu->sched,
&ctx->sched_entity[i],
&gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
NULL);
rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
drm_sched_entity_init(&ctx->sched_entity[i],
&rq, 1, NULL);
}
}

View File

@ -162,26 +162,30 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
* drm_sched_entity_init - Init a context entity used by scheduler when
* submit to HW ring.
*
* @sched: scheduler instance
* @entity: scheduler entity to init
* @rq: the run queue this entity belongs
* @rq_list: the list of run queue on which jobs from this
* entity can be submitted
* @num_rq_list: number of run queue in rq_list
* @guilty: atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout
*
* Note: the rq_list should have atleast one element to schedule
* the entity
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
struct drm_sched_rq *rq,
int drm_sched_entity_init(struct drm_sched_entity *entity,
struct drm_sched_rq **rq_list,
unsigned int num_rq_list,
atomic_t *guilty)
{
if (!(sched && entity && rq))
if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = rq;
entity->sched = sched;
entity->rq = rq_list[0];
entity->sched = rq_list[0]->sched;
entity->guilty = guilty;
entity->last_scheduled = NULL;

View File

@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv;
struct drm_sched_rq *rq;
int i;
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
drm_sched_entity_init(&v3d->queue[i].sched,
&v3d_priv->sched_entity[i],
&v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
NULL);
rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
}
file->driver_priv = v3d_priv;

View File

@ -282,9 +282,9 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
const char *name);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
struct drm_sched_rq *rq,
int drm_sched_entity_init(struct drm_sched_entity *entity,
struct drm_sched_rq **rq_list,
unsigned int num_rq_list,
atomic_t *guilty);
long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, long timeout);