drm/amdgpu: Enable VCN for Beige Goby
Enabled VCN support for Beige Goby chip Signed-off-by: Veerabadhran Gopalakrishnan <veerabadhran.gopalakrishnan@amd.com> Reviewed-by: Leo Liu <leo.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
3df8ecc8a1
commit
f703d4b6f2
@ -48,6 +48,7 @@
|
||||
#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
|
||||
#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
|
||||
#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
|
||||
#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
|
||||
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN);
|
||||
MODULE_FIRMWARE(FIRMWARE_PICASSO);
|
||||
@ -63,6 +64,7 @@ MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
|
||||
MODULE_FIRMWARE(FIRMWARE_VANGOGH);
|
||||
MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
|
||||
MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
|
||||
|
||||
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
|
||||
|
||||
@ -151,6 +153,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
fw_name = FIRMWARE_BEIGE_GOBY;
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -967,6 +967,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
|
||||
is_support_sw_smu(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@ -1277,7 +1278,8 @@ static int nv_common_early_init(void *handle)
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG;
|
||||
adev->external_rev_id = adev->rev_id + 0x46;
|
||||
break;
|
||||
default:
|
||||
|
@ -91,6 +91,11 @@ static int vcn_v3_0_early_init(void *handle)
|
||||
adev->vcn.harvest_config = 0;
|
||||
adev->vcn.num_enc_rings = 1;
|
||||
|
||||
if (adev->asic_type == CHIP_BEIGE_GOBY) {
|
||||
adev->vcn.num_vcn_inst = 1;
|
||||
adev->vcn.num_enc_rings = 0;
|
||||
}
|
||||
|
||||
} else {
|
||||
if (adev->asic_type == CHIP_SIENNA_CICHLID) {
|
||||
u32 harvest;
|
||||
@ -110,7 +115,10 @@ static int vcn_v3_0_early_init(void *handle)
|
||||
} else
|
||||
adev->vcn.num_vcn_inst = 1;
|
||||
|
||||
adev->vcn.num_enc_rings = 2;
|
||||
if (adev->asic_type == CHIP_BEIGE_GOBY)
|
||||
adev->vcn.num_enc_rings = 0;
|
||||
else
|
||||
adev->vcn.num_enc_rings = 2;
|
||||
}
|
||||
|
||||
vcn_v3_0_set_dec_ring_funcs(adev);
|
||||
@ -1261,23 +1269,25 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
|
||||
fw_shared->rb.wptr = lower_32_bits(ring->wptr);
|
||||
fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[i].ring_enc[0];
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
if (adev->asic_type != CHIP_BEIGE_GOBY) {
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[i].ring_enc[0];
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[i].ring_enc[1];
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[i].ring_enc[1];
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1657,31 +1667,33 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
|
||||
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
|
||||
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
|
||||
|
||||
/* Restore */
|
||||
fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
|
||||
ring->wptr = 0;
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
if (adev->asic_type != CHIP_BEIGE_GOBY) {
|
||||
/* Restore */
|
||||
fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
|
||||
ring->wptr = 0;
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
|
||||
ring->wptr = 0;
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
|
||||
ring->wptr = 0;
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
|
||||
/* restore wptr/rptr with pointers saved in FW shared memory*/
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
|
||||
/* restore wptr/rptr with pointers saved in FW shared memory*/
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
|
||||
}
|
||||
|
||||
/* Unstall DPG */
|
||||
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
|
||||
@ -2138,7 +2150,8 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
|
||||
adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
|
||||
adev->vcn.inst[i].ring_enc[j].me = i;
|
||||
}
|
||||
DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i);
|
||||
if (adev->vcn.num_enc_rings > 0)
|
||||
DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user