drm/amdgpu/sdma5: add mes support for sdma ib test
Add MES support for sdma ib test. Signed-off-by: Jack Xiao <Jack.Xiao@amd.com> Acked-by: Christian König <christian.koenig@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
@ -1098,7 +1098,23 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||||||
long r;
|
long r;
|
||||||
u32 tmp = 0;
|
u32 tmp = 0;
|
||||||
u64 gpu_addr;
|
u64 gpu_addr;
|
||||||
|
volatile uint32_t *cpu_ptr = NULL;
|
||||||
|
|
||||||
|
tmp = 0xCAFEDEAD;
|
||||||
|
memset(&ib, 0, sizeof(ib));
|
||||||
|
|
||||||
|
if (ring->is_mes_queue) {
|
||||||
|
uint32_t offset = 0;
|
||||||
|
offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
|
||||||
|
ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
|
||||||
|
ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
|
||||||
|
|
||||||
|
offset = amdgpu_mes_ctx_get_offs(ring,
|
||||||
|
AMDGPU_MES_CTX_PADDING_OFFS);
|
||||||
|
gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
|
||||||
|
cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
|
||||||
|
*cpu_ptr = tmp;
|
||||||
|
} else {
|
||||||
r = amdgpu_device_wb_get(adev, &index);
|
r = amdgpu_device_wb_get(adev, &index);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
|
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
|
||||||
@ -1106,15 +1122,15 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||||||
}
|
}
|
||||||
|
|
||||||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||||
tmp = 0xCAFEDEAD;
|
|
||||||
adev->wb.wb[index] = cpu_to_le32(tmp);
|
adev->wb.wb[index] = cpu_to_le32(tmp);
|
||||||
memset(&ib, 0, sizeof(ib));
|
|
||||||
r = amdgpu_ib_get(adev, NULL, 256,
|
r = amdgpu_ib_get(adev, NULL, 256,
|
||||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
|
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
|
||||||
goto err0;
|
goto err0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
|
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
|
||||||
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
|
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
|
||||||
@ -1140,7 +1156,12 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||||
goto err1;
|
goto err1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ring->is_mes_queue)
|
||||||
|
tmp = le32_to_cpu(*cpu_ptr);
|
||||||
|
else
|
||||||
tmp = le32_to_cpu(adev->wb.wb[index]);
|
tmp = le32_to_cpu(adev->wb.wb[index]);
|
||||||
|
|
||||||
if (tmp == 0xDEADBEEF)
|
if (tmp == 0xDEADBEEF)
|
||||||
r = 0;
|
r = 0;
|
||||||
else
|
else
|
||||||
@ -1150,6 +1171,7 @@ err1:
|
|||||||
amdgpu_ib_free(adev, &ib, NULL);
|
amdgpu_ib_free(adev, &ib, NULL);
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
err0:
|
err0:
|
||||||
|
if (!ring->is_mes_queue)
|
||||||
amdgpu_device_wb_free(adev, index);
|
amdgpu_device_wb_free(adev, index);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user