drm/amdgpu: move vmhub out of amdgpu_ring_funcs (v4)
It looks better to place this field in ring structure. Also drop the repeated ring funcs definitions if there's no difference except for vmhub field. v2: rename the field to vm_hub like others (Le) v3: apply the changes to new ip blocks (Hawking) v4: fix vcn sw ring (Alex) Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
b805d8d785
commit
0530553ba8
@ -305,6 +305,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
|
||||
ring->ring_obj = NULL;
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = adev->doorbell_index.kiq;
|
||||
ring->vm_hub = AMDGPU_GFXHUB_0;
|
||||
|
||||
r = amdgpu_gfx_kiq_acquire(adev, ring);
|
||||
if (r)
|
||||
|
@ -554,7 +554,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
||||
|
||||
for (i = 0; i < adev->num_rings; ++i) {
|
||||
ring = adev->rings[i];
|
||||
vmhub = ring->funcs->vmhub;
|
||||
vmhub = ring->vm_hub;
|
||||
|
||||
if (ring == &adev->mes.ring)
|
||||
continue;
|
||||
@ -570,7 +570,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
||||
vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
|
||||
|
||||
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
|
||||
ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
|
||||
ring->name, ring->vm_inv_eng, ring->vm_hub);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -267,7 +267,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
if (r) {
|
||||
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
||||
if (job && job->vmid)
|
||||
amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
|
||||
amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid);
|
||||
amdgpu_ring_undo(ring);
|
||||
return r;
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
unsigned vmhub = ring->vm_hub;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
struct dma_fence **fences;
|
||||
unsigned i;
|
||||
@ -277,7 +277,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
unsigned vmhub = ring->vm_hub;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
uint64_t fence_context = adev->fence_context + ring->idx;
|
||||
bool needs_flush = vm->use_cpu_for_update;
|
||||
@ -338,7 +338,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
unsigned vmhub = ring->vm_hub;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
uint64_t fence_context = adev->fence_context + ring->idx;
|
||||
uint64_t updates = amdgpu_vm_tlb_seq(vm);
|
||||
@ -398,7 +398,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_job *job, struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
unsigned vmhub = ring->vm_hub;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
struct amdgpu_vmid *idle = NULL;
|
||||
struct amdgpu_vmid *id = NULL;
|
||||
|
@ -165,7 +165,6 @@ struct amdgpu_ring_funcs {
|
||||
bool support_64bit_ptrs;
|
||||
bool no_user_fence;
|
||||
bool secure_submission_supported;
|
||||
unsigned vmhub;
|
||||
unsigned extra_dw;
|
||||
|
||||
/* ring read/write ptr handling */
|
||||
@ -275,6 +274,7 @@ struct amdgpu_ring {
|
||||
unsigned cond_exe_offs;
|
||||
u64 cond_exe_gpu_addr;
|
||||
volatile u32 *cond_exe_cpu_addr;
|
||||
unsigned vm_hub;
|
||||
unsigned vm_inv_eng;
|
||||
struct dma_fence *vmid_wait;
|
||||
bool has_compute_vm_bug;
|
||||
|
@ -233,7 +233,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
|
||||
__entry->pasid = vm->pasid;
|
||||
__assign_str(ring, ring->name);
|
||||
__entry->vmid = job->vmid;
|
||||
__entry->vm_hub = ring->funcs->vmhub,
|
||||
__entry->vm_hub = ring->vm_hub,
|
||||
__entry->pd_addr = job->vm_pd_addr;
|
||||
__entry->needs_flush = job->vm_needs_flush;
|
||||
),
|
||||
@ -427,7 +427,7 @@ TRACE_EVENT(amdgpu_vm_flush,
|
||||
TP_fast_assign(
|
||||
__assign_str(ring, ring->name);
|
||||
__entry->vmid = vmid;
|
||||
__entry->vm_hub = ring->funcs->vmhub;
|
||||
__entry->vm_hub = ring->vm_hub;
|
||||
__entry->pd_addr = pd_addr;
|
||||
),
|
||||
TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx",
|
||||
|
@ -483,7 +483,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
||||
struct amdgpu_job *job)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
unsigned vmhub = ring->vm_hub;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
|
||||
if (job->vmid == 0)
|
||||
@ -517,7 +517,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||
bool need_pipe_sync)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
unsigned vmhub = ring->vm_hub;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
|
||||
bool spm_update_needed = job->spm_update_needed;
|
||||
|
@ -4461,6 +4461,7 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
|
||||
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
|
||||
else
|
||||
ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
|
||||
ring->vm_hub = AMDGPU_GFXHUB_0;
|
||||
sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||
|
||||
irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
|
||||
@ -4489,6 +4490,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
||||
ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
|
||||
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
|
||||
+ (ring_id * GFX10_MEC_HPD_SIZE);
|
||||
ring->vm_hub = AMDGPU_GFXHUB_0;
|
||||
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||
|
||||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
|
||||
@ -9249,7 +9251,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = gfx_v10_0_ring_get_rptr_gfx,
|
||||
.get_wptr = gfx_v10_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v10_0_ring_set_wptr_gfx,
|
||||
@ -9304,7 +9305,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.support_64bit_ptrs = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = gfx_v10_0_ring_get_rptr_compute,
|
||||
.get_wptr = gfx_v10_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v10_0_ring_set_wptr_compute,
|
||||
@ -9340,7 +9340,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.support_64bit_ptrs = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = gfx_v10_0_ring_get_rptr_compute,
|
||||
.get_wptr = gfx_v10_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v10_0_ring_set_wptr_compute,
|
||||
|
@ -866,6 +866,7 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
|
||||
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
|
||||
else
|
||||
ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
|
||||
ring->vm_hub = AMDGPU_GFXHUB_0;
|
||||
sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||
|
||||
irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
|
||||
@ -896,6 +897,7 @@ static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
||||
ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
|
||||
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
|
||||
+ (ring_id * GFX11_MEC_HPD_SIZE);
|
||||
ring->vm_hub = AMDGPU_GFXHUB_0;
|
||||
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||
|
||||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
|
||||
@ -6204,7 +6206,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = gfx_v11_0_ring_get_rptr_gfx,
|
||||
.get_wptr = gfx_v11_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v11_0_ring_set_wptr_gfx,
|
||||
@ -6252,7 +6253,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.support_64bit_ptrs = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = gfx_v11_0_ring_get_rptr_compute,
|
||||
.get_wptr = gfx_v11_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v11_0_ring_set_wptr_compute,
|
||||
@ -6288,7 +6288,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.support_64bit_ptrs = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = gfx_v11_0_ring_get_rptr_compute,
|
||||
.get_wptr = gfx_v11_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v11_0_ring_set_wptr_compute,
|
||||
|
@ -2005,6 +2005,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
||||
ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
|
||||
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
|
||||
+ (ring_id * GFX9_MEC_HPD_SIZE);
|
||||
ring->vm_hub = AMDGPU_GFXHUB_0;
|
||||
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||
|
||||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
|
||||
@ -2104,6 +2105,7 @@ static int gfx_v9_0_sw_init(void *handle)
|
||||
|
||||
/* disable scheduler on the real ring */
|
||||
ring->no_scheduler = true;
|
||||
ring->vm_hub = AMDGPU_GFXHUB_0;
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
|
||||
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
|
||||
AMDGPU_RING_PRIO_DEFAULT, NULL);
|
||||
@ -2121,6 +2123,7 @@ static int gfx_v9_0_sw_init(void *handle)
|
||||
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
|
||||
ring->is_sw_ring = true;
|
||||
hw_prio = amdgpu_sw_ring_priority(i);
|
||||
ring->vm_hub = AMDGPU_GFXHUB_0;
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
|
||||
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio,
|
||||
NULL);
|
||||
@ -6790,7 +6793,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
|
||||
.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v9_0_ring_set_wptr_gfx,
|
||||
@ -6844,7 +6846,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = amdgpu_sw_ring_get_rptr_gfx,
|
||||
.get_wptr = amdgpu_sw_ring_get_wptr_gfx,
|
||||
.set_wptr = amdgpu_sw_ring_set_wptr_gfx,
|
||||
@ -6898,7 +6899,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.support_64bit_ptrs = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = gfx_v9_0_ring_get_rptr_compute,
|
||||
.get_wptr = gfx_v9_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v9_0_ring_set_wptr_compute,
|
||||
@ -6937,7 +6937,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.support_64bit_ptrs = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = gfx_v9_0_ring_get_rptr_compute,
|
||||
.get_wptr = gfx_v9_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v9_0_ring_set_wptr_compute,
|
||||
|
@ -479,8 +479,8 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr)
|
||||
{
|
||||
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
|
||||
unsigned eng = ring->vm_inv_eng;
|
||||
|
||||
@ -534,7 +534,7 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
|
||||
if (ring->is_mes_queue)
|
||||
return;
|
||||
|
||||
if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
|
||||
if (ring->vm_hub == AMDGPU_GFXHUB_0)
|
||||
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
|
||||
else
|
||||
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
|
||||
|
@ -378,8 +378,8 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr)
|
||||
{
|
||||
bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
|
||||
unsigned eng = ring->vm_inv_eng;
|
||||
|
||||
@ -433,7 +433,7 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
|
||||
if (ring->is_mes_queue)
|
||||
return;
|
||||
|
||||
if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
|
||||
if (ring->vm_hub == AMDGPU_GFXHUB_0)
|
||||
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
|
||||
else
|
||||
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
|
||||
|
@ -1007,9 +1007,9 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr)
|
||||
{
|
||||
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
|
||||
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
|
||||
uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
|
||||
unsigned eng = ring->vm_inv_eng;
|
||||
|
||||
@ -1060,10 +1060,10 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
|
||||
uint32_t reg;
|
||||
|
||||
/* Do nothing because there's no lut register for mmhub1. */
|
||||
if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
|
||||
if (ring->vm_hub == AMDGPU_MMHUB_1)
|
||||
return;
|
||||
|
||||
if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
|
||||
if (ring->vm_hub == AMDGPU_GFXHUB_0)
|
||||
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
|
||||
else
|
||||
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
|
||||
|
@ -376,7 +376,7 @@ static void jpeg_v1_0_decode_ring_emit_reg_wait(struct amdgpu_ring *ring,
|
||||
static void jpeg_v1_0_decode_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
uint32_t data0, data1, mask;
|
||||
|
||||
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
|
||||
@ -485,6 +485,7 @@ int jpeg_v1_0_sw_init(void *handle)
|
||||
return r;
|
||||
|
||||
ring = &adev->jpeg.inst->ring_dec;
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
sprintf(ring->name, "jpeg_dec");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
|
||||
0, AMDGPU_RING_PRIO_DEFAULT, NULL);
|
||||
@ -548,7 +549,6 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
|
||||
.nop = PACKET0(0x81ff, 0),
|
||||
.support_64bit_ptrs = false,
|
||||
.no_user_fence = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.extra_dw = 64,
|
||||
.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
|
||||
.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
|
||||
|
@ -86,6 +86,7 @@ static int jpeg_v2_0_sw_init(void *handle)
|
||||
ring = &adev->jpeg.inst->ring_dec;
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
sprintf(ring->name, "jpeg_dec");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
|
||||
0, AMDGPU_RING_PRIO_DEFAULT, NULL);
|
||||
@ -613,7 +614,7 @@ void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
uint32_t data0, data1, mask;
|
||||
|
||||
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
|
||||
@ -762,7 +763,6 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
|
||||
static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_JPEG,
|
||||
.align_mask = 0xf,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = jpeg_v2_0_dec_ring_get_rptr,
|
||||
.get_wptr = jpeg_v2_0_dec_ring_get_wptr,
|
||||
.set_wptr = jpeg_v2_0_dec_ring_set_wptr,
|
||||
|
@ -127,6 +127,10 @@ static int jpeg_v2_5_sw_init(void *handle)
|
||||
|
||||
ring = &adev->jpeg.inst[i].ring_dec;
|
||||
ring->use_doorbell = true;
|
||||
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
|
||||
ring->vm_hub = AMDGPU_MMHUB_1;
|
||||
else
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
|
||||
sprintf(ring->name, "jpeg_dec_%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
|
||||
@ -645,7 +649,6 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
|
||||
static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_JPEG,
|
||||
.align_mask = 0xf,
|
||||
.vmhub = AMDGPU_MMHUB_1,
|
||||
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
|
||||
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
|
||||
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
|
||||
@ -675,7 +678,6 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
|
||||
static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_JPEG,
|
||||
.align_mask = 0xf,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
|
||||
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
|
||||
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
|
||||
|
@ -100,6 +100,7 @@ static int jpeg_v3_0_sw_init(void *handle)
|
||||
ring = &adev->jpeg.inst->ring_dec;
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
sprintf(ring->name, "jpeg_dec");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT, NULL);
|
||||
@ -559,7 +560,6 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
|
||||
static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_JPEG,
|
||||
.align_mask = 0xf,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
|
||||
.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
|
||||
.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
|
||||
|
@ -108,6 +108,7 @@ static int jpeg_v4_0_sw_init(void *handle)
|
||||
ring = &adev->jpeg.inst->ring_dec;
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = amdgpu_sriov_vf(adev) ? (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1);
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
|
||||
sprintf(ring->name, "jpeg_dec");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
|
||||
@ -715,7 +716,6 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
|
||||
static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_JPEG,
|
||||
.align_mask = 0xf,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = jpeg_v4_0_dec_ring_get_rptr,
|
||||
.get_wptr = jpeg_v4_0_dec_ring_get_wptr,
|
||||
.set_wptr = jpeg_v4_0_dec_ring_set_wptr,
|
||||
|
@ -1823,6 +1823,15 @@ static int sdma_v4_0_sw_init(void *handle)
|
||||
/* doorbell size is 2 dwords, get DWORD offset */
|
||||
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
|
||||
|
||||
/*
|
||||
* On Arcturus, SDMA instance 5~7 has a different vmhub
|
||||
* type(AMDGPU_MMHUB_1).
|
||||
*/
|
||||
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
|
||||
ring->vm_hub = AMDGPU_MMHUB_1;
|
||||
else
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
|
||||
@ -1841,6 +1850,11 @@ static int sdma_v4_0_sw_init(void *handle)
|
||||
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
|
||||
ring->doorbell_index += 0x400;
|
||||
|
||||
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
|
||||
ring->vm_hub = AMDGPU_MMHUB_1;
|
||||
else
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
|
||||
sprintf(ring->name, "page%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->sdma.trap_irq,
|
||||
@ -2294,44 +2308,6 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = sdma_v4_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v4_0_ring_get_wptr,
|
||||
.set_wptr = sdma_v4_0_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
6 + /* sdma_v4_0_ring_emit_hdp_flush */
|
||||
3 + /* hdp invalidate */
|
||||
6 + /* sdma_v4_0_ring_emit_pipeline_sync */
|
||||
/* sdma_v4_0_ring_emit_vm_flush */
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
|
||||
10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
|
||||
.emit_ib = sdma_v4_0_ring_emit_ib,
|
||||
.emit_fence = sdma_v4_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
|
||||
.emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
|
||||
.emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
|
||||
.test_ring = sdma_v4_0_ring_test_ring,
|
||||
.test_ib = sdma_v4_0_ring_test_ib,
|
||||
.insert_nop = sdma_v4_0_ring_insert_nop,
|
||||
.pad_ib = sdma_v4_0_ring_pad_ib,
|
||||
.emit_wreg = sdma_v4_0_ring_emit_wreg,
|
||||
.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
/*
|
||||
* On Arcturus, SDMA instance 5~7 has a different vmhub type(AMDGPU_MMHUB_1).
|
||||
* So create a individual constant ring_funcs for those instances.
|
||||
*/
|
||||
static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_1,
|
||||
.get_rptr = sdma_v4_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v4_0_ring_get_wptr,
|
||||
.set_wptr = sdma_v4_0_ring_set_wptr,
|
||||
@ -2364,40 +2340,6 @@ static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = sdma_v4_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v4_0_page_ring_get_wptr,
|
||||
.set_wptr = sdma_v4_0_page_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
6 + /* sdma_v4_0_ring_emit_hdp_flush */
|
||||
3 + /* hdp invalidate */
|
||||
6 + /* sdma_v4_0_ring_emit_pipeline_sync */
|
||||
/* sdma_v4_0_ring_emit_vm_flush */
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
|
||||
10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
|
||||
.emit_ib = sdma_v4_0_ring_emit_ib,
|
||||
.emit_fence = sdma_v4_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
|
||||
.emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
|
||||
.emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
|
||||
.test_ring = sdma_v4_0_ring_test_ring,
|
||||
.test_ib = sdma_v4_0_ring_test_ib,
|
||||
.insert_nop = sdma_v4_0_ring_insert_nop,
|
||||
.pad_ib = sdma_v4_0_ring_pad_ib,
|
||||
.emit_wreg = sdma_v4_0_ring_emit_wreg,
|
||||
.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs_2nd_mmhub = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_1,
|
||||
.get_rptr = sdma_v4_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v4_0_page_ring_get_wptr,
|
||||
.set_wptr = sdma_v4_0_page_ring_set_wptr,
|
||||
@ -2429,19 +2371,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
|
||||
adev->sdma.instance[i].ring.funcs =
|
||||
&sdma_v4_0_ring_funcs_2nd_mmhub;
|
||||
else
|
||||
adev->sdma.instance[i].ring.funcs =
|
||||
&sdma_v4_0_ring_funcs;
|
||||
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
|
||||
adev->sdma.instance[i].ring.me = i;
|
||||
if (adev->sdma.has_page_queue) {
|
||||
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
|
||||
adev->sdma.instance[i].page.funcs =
|
||||
&sdma_v4_0_page_ring_funcs_2nd_mmhub;
|
||||
else
|
||||
adev->sdma.instance[i].page.funcs =
|
||||
adev->sdma.instance[i].page.funcs =
|
||||
&sdma_v4_0_page_ring_funcs;
|
||||
adev->sdma.instance[i].page.me = i;
|
||||
}
|
||||
|
@ -1309,6 +1309,7 @@ static int sdma_v4_4_2_sw_init(void *handle)
|
||||
|
||||
/* doorbell size is 2 dwords, get DWORD offset */
|
||||
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
|
||||
@ -1327,6 +1328,7 @@ static int sdma_v4_4_2_sw_init(void *handle)
|
||||
*/
|
||||
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
|
||||
ring->doorbell_index += 0x400;
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
|
||||
sprintf(ring->name, "page%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
@ -1741,7 +1743,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
|
||||
.align_mask = 0xf,
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = sdma_v4_4_2_ring_get_rptr,
|
||||
.get_wptr = sdma_v4_4_2_ring_get_wptr,
|
||||
.set_wptr = sdma_v4_4_2_ring_set_wptr,
|
||||
@ -1773,7 +1774,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
|
||||
.align_mask = 0xf,
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = sdma_v4_4_2_ring_get_rptr,
|
||||
.get_wptr = sdma_v4_4_2_page_ring_get_wptr,
|
||||
.set_wptr = sdma_v4_4_2_page_ring_set_wptr,
|
||||
|
@ -1389,6 +1389,7 @@ static int sdma_v5_0_sw_init(void *handle)
|
||||
(adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset
|
||||
: (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
|
||||
|
||||
ring->vm_hub = AMDGPU_GFXHUB_0;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
|
||||
(i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
@ -1765,7 +1766,6 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = sdma_v5_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v5_0_ring_get_wptr,
|
||||
.set_wptr = sdma_v5_0_ring_set_wptr,
|
||||
|
@ -1253,6 +1253,7 @@ static int sdma_v5_2_sw_init(void *handle)
|
||||
ring->doorbell_index =
|
||||
(adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset
|
||||
|
||||
ring->vm_hub = AMDGPU_GFXHUB_0;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
|
||||
@ -1653,7 +1654,6 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = sdma_v5_2_ring_get_rptr,
|
||||
.get_wptr = sdma_v5_2_ring_get_wptr,
|
||||
.set_wptr = sdma_v5_2_ring_set_wptr,
|
||||
|
@ -1181,7 +1181,7 @@ static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
static void sdma_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
|
||||
|
||||
/* Update the PD address for this VMID. */
|
||||
@ -1301,6 +1301,7 @@ static int sdma_v6_0_sw_init(void *handle)
|
||||
ring->doorbell_index =
|
||||
(adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset
|
||||
|
||||
ring->vm_hub = AMDGPU_GFXHUB_0;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->sdma.trap_irq,
|
||||
@ -1557,7 +1558,6 @@ static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_GFXHUB_0,
|
||||
.get_rptr = sdma_v6_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v6_0_ring_get_wptr,
|
||||
.set_wptr = sdma_v6_0_ring_set_wptr,
|
||||
|
@ -444,6 +444,7 @@ static int uvd_v7_0_sw_init(void *handle)
|
||||
continue;
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
ring = &adev->uvd.inst[j].ring;
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
sprintf(ring->name, "uvd_%d", ring->me);
|
||||
r = amdgpu_ring_init(adev, ring, 512,
|
||||
&adev->uvd.inst[j].irq, 0,
|
||||
@ -454,6 +455,7 @@ static int uvd_v7_0_sw_init(void *handle)
|
||||
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||
ring = &adev->uvd.inst[j].ring_enc[i];
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
ring->use_doorbell = true;
|
||||
@ -1397,7 +1399,7 @@ static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
uint32_t data0, data1, mask;
|
||||
|
||||
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
|
||||
@ -1440,7 +1442,7 @@ static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
|
||||
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned int vmid, uint64_t pd_addr)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
|
||||
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
|
||||
|
||||
@ -1802,7 +1804,6 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
|
||||
.align_mask = 0xf,
|
||||
.support_64bit_ptrs = false,
|
||||
.no_user_fence = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = uvd_v7_0_ring_get_rptr,
|
||||
.get_wptr = uvd_v7_0_ring_get_wptr,
|
||||
.set_wptr = uvd_v7_0_ring_set_wptr,
|
||||
@ -1835,7 +1836,6 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
|
||||
.nop = HEVC_ENC_CMD_NO_OP,
|
||||
.support_64bit_ptrs = false,
|
||||
.no_user_fence = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = uvd_v7_0_enc_ring_get_rptr,
|
||||
.get_wptr = uvd_v7_0_enc_ring_get_wptr,
|
||||
.set_wptr = uvd_v7_0_enc_ring_set_wptr,
|
||||
|
@ -466,6 +466,7 @@ static int vce_v4_0_sw_init(void *handle)
|
||||
enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
|
||||
|
||||
ring = &adev->vce.ring[i];
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
sprintf(ring->name, "vce%d", i);
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
/* DOORBELL only works under SRIOV */
|
||||
@ -1021,7 +1022,7 @@ static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned int vmid, uint64_t pd_addr)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
|
||||
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
|
||||
|
||||
@ -1103,7 +1104,6 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
|
||||
.nop = VCE_CMD_NO_OP,
|
||||
.support_64bit_ptrs = false,
|
||||
.no_user_fence = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = vce_v4_0_ring_get_rptr,
|
||||
.get_wptr = vce_v4_0_ring_get_wptr,
|
||||
.set_wptr = vce_v4_0_ring_set_wptr,
|
||||
|
@ -65,7 +65,7 @@ void vcn_dec_sw_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
void vcn_dec_sw_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
uint32_t vmid, uint64_t pd_addr)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
uint32_t data0, data1, mask;
|
||||
|
||||
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
|
||||
|
@ -120,6 +120,7 @@ static int vcn_v1_0_sw_init(void *handle)
|
||||
return r;
|
||||
|
||||
ring = &adev->vcn.inst->ring_dec;
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
sprintf(ring->name, "vcn_dec");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT, NULL);
|
||||
@ -141,6 +142,7 @@ static int vcn_v1_0_sw_init(void *handle)
|
||||
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
|
||||
|
||||
ring = &adev->vcn.inst->ring_enc[i];
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
sprintf(ring->name, "vcn_enc%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
|
||||
hw_prio, NULL);
|
||||
@ -1548,7 +1550,7 @@ static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
|
||||
static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
uint32_t data0, data1, mask;
|
||||
|
||||
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
|
||||
@ -1693,7 +1695,7 @@ static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
|
||||
static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned int vmid, uint64_t pd_addr)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
|
||||
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
|
||||
|
||||
@ -1977,7 +1979,6 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
|
||||
.support_64bit_ptrs = false,
|
||||
.no_user_fence = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
|
||||
.get_wptr = vcn_v1_0_dec_ring_get_wptr,
|
||||
.set_wptr = vcn_v1_0_dec_ring_set_wptr,
|
||||
@ -2012,7 +2013,6 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
|
||||
.nop = VCN_ENC_CMD_NO_OP,
|
||||
.support_64bit_ptrs = false,
|
||||
.no_user_fence = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = vcn_v1_0_enc_ring_get_rptr,
|
||||
.get_wptr = vcn_v1_0_enc_ring_get_wptr,
|
||||
.set_wptr = vcn_v1_0_enc_ring_set_wptr,
|
||||
|
@ -129,6 +129,7 @@ static int vcn_v2_0_sw_init(void *handle)
|
||||
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
|
||||
sprintf(ring->name, "vcn_dec");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
|
||||
@ -159,6 +160,7 @@ static int vcn_v2_0_sw_init(void *handle)
|
||||
|
||||
ring = &adev->vcn.inst->ring_enc[i];
|
||||
ring->use_doorbell = true;
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
|
||||
else
|
||||
@ -1511,7 +1513,7 @@ void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
uint32_t data0, data1, mask;
|
||||
|
||||
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
|
||||
@ -1671,7 +1673,7 @@ void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned int vmid, uint64_t pd_addr)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
|
||||
|
||||
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
|
||||
|
||||
@ -2014,7 +2016,6 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_DEC,
|
||||
.align_mask = 0xf,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = vcn_v2_0_dec_ring_get_rptr,
|
||||
.get_wptr = vcn_v2_0_dec_ring_get_wptr,
|
||||
.set_wptr = vcn_v2_0_dec_ring_set_wptr,
|
||||
@ -2045,7 +2046,6 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_ENC,
|
||||
.align_mask = 0x3f,
|
||||
.nop = VCN_ENC_CMD_NO_OP,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = vcn_v2_0_enc_ring_get_rptr,
|
||||
.get_wptr = vcn_v2_0_enc_ring_get_wptr,
|
||||
.set_wptr = vcn_v2_0_enc_ring_set_wptr,
|
||||
|
@ -186,6 +186,12 @@ static int vcn_v2_5_sw_init(void *handle)
|
||||
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
|
||||
(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
|
||||
|
||||
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
|
||||
ring->vm_hub = AMDGPU_MMHUB_1;
|
||||
else
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
|
||||
sprintf(ring->name, "vcn_dec_%d", j);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
|
||||
0, AMDGPU_RING_PRIO_DEFAULT, NULL);
|
||||
@ -201,6 +207,11 @@ static int vcn_v2_5_sw_init(void *handle)
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
|
||||
(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
|
||||
|
||||
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
|
||||
ring->vm_hub = AMDGPU_MMHUB_1;
|
||||
else
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
|
||||
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
|
||||
r = amdgpu_ring_init(adev, ring, 512,
|
||||
&adev->vcn.inst[j].irq, 0,
|
||||
@ -1562,38 +1573,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_DEC,
|
||||
.align_mask = 0xf,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_1,
|
||||
.get_rptr = vcn_v2_5_dec_ring_get_rptr,
|
||||
.get_wptr = vcn_v2_5_dec_ring_get_wptr,
|
||||
.set_wptr = vcn_v2_5_dec_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
|
||||
8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
|
||||
14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
|
||||
6,
|
||||
.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
|
||||
.emit_ib = vcn_v2_0_dec_ring_emit_ib,
|
||||
.emit_fence = vcn_v2_0_dec_ring_emit_fence,
|
||||
.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
|
||||
.test_ring = vcn_v2_0_dec_ring_test_ring,
|
||||
.test_ib = amdgpu_vcn_dec_ring_test_ib,
|
||||
.insert_nop = vcn_v2_0_dec_ring_insert_nop,
|
||||
.insert_start = vcn_v2_0_dec_ring_insert_start,
|
||||
.insert_end = vcn_v2_0_dec_ring_insert_end,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_vcn_ring_begin_use,
|
||||
.end_use = amdgpu_vcn_ring_end_use,
|
||||
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
|
||||
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_DEC,
|
||||
.align_mask = 0xf,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = vcn_v2_5_dec_ring_get_rptr,
|
||||
.get_wptr = vcn_v2_5_dec_ring_get_wptr,
|
||||
.set_wptr = vcn_v2_5_dec_ring_set_wptr,
|
||||
@ -1693,7 +1672,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_ENC,
|
||||
.align_mask = 0x3f,
|
||||
.nop = VCN_ENC_CMD_NO_OP,
|
||||
.vmhub = AMDGPU_MMHUB_1,
|
||||
.get_rptr = vcn_v2_5_enc_ring_get_rptr,
|
||||
.get_wptr = vcn_v2_5_enc_ring_get_wptr,
|
||||
.set_wptr = vcn_v2_5_enc_ring_set_wptr,
|
||||
@ -1719,36 +1697,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs vcn_v2_6_enc_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_ENC,
|
||||
.align_mask = 0x3f,
|
||||
.nop = VCN_ENC_CMD_NO_OP,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = vcn_v2_5_enc_ring_get_rptr,
|
||||
.get_wptr = vcn_v2_5_enc_ring_get_wptr,
|
||||
.set_wptr = vcn_v2_5_enc_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
|
||||
4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
|
||||
5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
|
||||
1, /* vcn_v2_0_enc_ring_insert_end */
|
||||
.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
|
||||
.emit_ib = vcn_v2_0_enc_ring_emit_ib,
|
||||
.emit_fence = vcn_v2_0_enc_ring_emit_fence,
|
||||
.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
|
||||
.test_ring = amdgpu_vcn_enc_ring_test_ring,
|
||||
.test_ib = amdgpu_vcn_enc_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.insert_end = vcn_v2_0_enc_ring_insert_end,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_vcn_ring_begin_use,
|
||||
.end_use = amdgpu_vcn_ring_end_use,
|
||||
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
|
||||
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
@ -1756,10 +1704,7 @@ static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
|
||||
adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
|
||||
else /* CHIP_ALDEBARAN */
|
||||
adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs;
|
||||
adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
|
||||
adev->vcn.inst[i].ring_dec.me = i;
|
||||
DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
|
||||
}
|
||||
@ -1773,10 +1718,7 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
|
||||
if (adev->vcn.harvest_config & (1 << j))
|
||||
continue;
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
|
||||
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
|
||||
else /* CHIP_ALDEBARAN */
|
||||
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs;
|
||||
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
|
||||
adev->vcn.inst[j].ring_enc[i].me = j;
|
||||
}
|
||||
DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
|
||||
|
@ -189,6 +189,7 @@ static int vcn_v3_0_sw_init(void *handle)
|
||||
} else {
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
|
||||
}
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
sprintf(ring->name, "vcn_dec_%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT,
|
||||
@ -212,6 +213,7 @@ static int vcn_v3_0_sw_init(void *handle)
|
||||
} else {
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
|
||||
}
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
sprintf(ring->name, "vcn_enc_%d.%d", i, j);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
|
||||
hw_prio, &adev->vcn.inst[i].sched_score);
|
||||
@ -1738,7 +1740,6 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
|
||||
.align_mask = 0x3f,
|
||||
.nop = VCN_DEC_SW_CMD_NO_OP,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = vcn_v3_0_dec_ring_get_rptr,
|
||||
.get_wptr = vcn_v3_0_dec_ring_get_wptr,
|
||||
.set_wptr = vcn_v3_0_dec_ring_set_wptr,
|
||||
@ -1899,7 +1900,6 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_DEC,
|
||||
.align_mask = 0xf,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = vcn_v3_0_dec_ring_get_rptr,
|
||||
.get_wptr = vcn_v3_0_dec_ring_get_wptr,
|
||||
.set_wptr = vcn_v3_0_dec_ring_set_wptr,
|
||||
@ -2000,7 +2000,6 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_ENC,
|
||||
.align_mask = 0x3f,
|
||||
.nop = VCN_ENC_CMD_NO_OP,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = vcn_v3_0_enc_ring_get_rptr,
|
||||
.get_wptr = vcn_v3_0_enc_ring_get_wptr,
|
||||
.set_wptr = vcn_v3_0_enc_ring_set_wptr,
|
||||
|
@ -149,7 +149,7 @@ static int vcn_v4_0_sw_init(void *handle)
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1;
|
||||
else
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
|
||||
|
||||
ring->vm_hub = AMDGPU_MMHUB_0;
|
||||
sprintf(ring->name, "vcn_unified_%d", i);
|
||||
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
|
||||
@ -1798,7 +1798,6 @@ static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_ENC,
|
||||
.align_mask = 0x3f,
|
||||
.nop = VCN_ENC_CMD_NO_OP,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
|
||||
.get_wptr = vcn_v4_0_unified_ring_get_wptr,
|
||||
.set_wptr = vcn_v4_0_unified_ring_set_wptr,
|
||||
|
Loading…
Reference in New Issue
Block a user