Merge branch 'drm-next-4.18' of git://people.freedesktop.org/~agd5f/linux into drm-next
Main changes for 4.18. I'd like to do a separate pull for vega20 later this week or next. Highlights: - Reserve pre-OS scanout buffer during init for seemless transition from console to driver - VEGAM support - Improved GPU scheduler documentation - Initial gfxoff support for raven - SR-IOV fixes - Default to non-AGP on PowerPC for radeon - Fine grained clock voltage control for vega10 - Power profiles for vega10 - Further clean up of powerplay/driver interface - Underlay fixes - Display link bw updates - Gamma fixes - Scatter/Gather display support on CZ/ST - Misc bug fixes and clean ups [airlied: fixup v3d vs scheduler API change] Link: https://patchwork.freedesktop.org/patch/msgid/20180515185450.1113-1-alexander.deucher@amd.com Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
commit
95d2c3e15d
@ -64,6 +64,10 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
|
||||
amdgpu-y += \
|
||||
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
|
||||
|
||||
# add DF block
|
||||
amdgpu-y += \
|
||||
df_v1_7.o
|
||||
|
||||
# add GMC block
|
||||
amdgpu-y += \
|
||||
gmc_v7_0.o \
|
||||
|
@ -129,6 +129,7 @@ extern int amdgpu_lbpw;
|
||||
extern int amdgpu_compute_multipipe;
|
||||
extern int amdgpu_gpu_recovery;
|
||||
extern int amdgpu_emu_mode;
|
||||
extern uint amdgpu_smu_memory_pool_size;
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
extern int amdgpu_si_support;
|
||||
@ -137,6 +138,7 @@ extern int amdgpu_si_support;
|
||||
extern int amdgpu_cik_support;
|
||||
#endif
|
||||
|
||||
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
|
||||
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
|
||||
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
|
||||
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||
@ -222,10 +224,10 @@ enum amdgpu_kiq_irq {
|
||||
AMDGPU_CP_KIQ_IRQ_LAST
|
||||
};
|
||||
|
||||
int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
|
||||
int amdgpu_device_ip_set_clockgating_state(void *dev,
|
||||
enum amd_ip_block_type block_type,
|
||||
enum amd_clockgating_state state);
|
||||
int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
|
||||
int amdgpu_device_ip_set_powergating_state(void *dev,
|
||||
enum amd_ip_block_type block_type,
|
||||
enum amd_powergating_state state);
|
||||
void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
|
||||
@ -681,6 +683,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
|
||||
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
|
||||
|
||||
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
|
||||
void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr);
|
||||
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
|
||||
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
|
||||
|
||||
|
||||
@ -771,9 +775,18 @@ struct amdgpu_rlc {
|
||||
u32 starting_offsets_start;
|
||||
u32 reg_list_format_size_bytes;
|
||||
u32 reg_list_size_bytes;
|
||||
u32 reg_list_format_direct_reg_list_length;
|
||||
u32 save_restore_list_cntl_size_bytes;
|
||||
u32 save_restore_list_gpm_size_bytes;
|
||||
u32 save_restore_list_srm_size_bytes;
|
||||
|
||||
u32 *register_list_format;
|
||||
u32 *register_restore;
|
||||
u8 *save_restore_list_cntl;
|
||||
u8 *save_restore_list_gpm;
|
||||
u8 *save_restore_list_srm;
|
||||
|
||||
bool is_rlc_v2_1;
|
||||
};
|
||||
|
||||
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
|
||||
@ -867,6 +880,8 @@ struct amdgpu_gfx_config {
|
||||
|
||||
/* gfx configure feature */
|
||||
uint32_t double_offchip_lds_buf;
|
||||
/* cached value of DB_DEBUG2 */
|
||||
uint32_t db_debug2;
|
||||
};
|
||||
|
||||
struct amdgpu_cu_info {
|
||||
@ -938,6 +953,12 @@ struct amdgpu_gfx {
|
||||
uint32_t ce_feature_version;
|
||||
uint32_t pfp_feature_version;
|
||||
uint32_t rlc_feature_version;
|
||||
uint32_t rlc_srlc_fw_version;
|
||||
uint32_t rlc_srlc_feature_version;
|
||||
uint32_t rlc_srlg_fw_version;
|
||||
uint32_t rlc_srlg_feature_version;
|
||||
uint32_t rlc_srls_fw_version;
|
||||
uint32_t rlc_srls_feature_version;
|
||||
uint32_t mec_feature_version;
|
||||
uint32_t mec2_feature_version;
|
||||
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
|
||||
@ -1204,6 +1225,8 @@ struct amdgpu_asic_funcs {
|
||||
/* invalidate hdp read cache */
|
||||
void (*invalidate_hdp)(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring);
|
||||
/* check if the asic needs a full reset of if soft reset will work */
|
||||
bool (*need_full_reset)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1368,7 +1391,17 @@ struct amdgpu_nbio_funcs {
|
||||
void (*detect_hw_virt)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
|
||||
struct amdgpu_df_funcs {
|
||||
void (*init)(struct amdgpu_device *adev);
|
||||
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
|
||||
u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
|
||||
void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
void (*get_clockgating_state)(struct amdgpu_device *adev,
|
||||
u32 *flags);
|
||||
};
|
||||
/* Define the HW IP blocks will be used in driver , add more if necessary */
|
||||
enum amd_hw_ip_block_type {
|
||||
GC_HWIP = 1,
|
||||
@ -1398,6 +1431,7 @@ enum amd_hw_ip_block_type {
|
||||
struct amd_powerplay {
|
||||
void *pp_handle;
|
||||
const struct amd_pm_funcs *pp_funcs;
|
||||
uint32_t pp_feature;
|
||||
};
|
||||
|
||||
#define AMDGPU_RESET_MAGIC_NUM 64
|
||||
@ -1590,6 +1624,7 @@ struct amdgpu_device {
|
||||
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
|
||||
|
||||
const struct amdgpu_nbio_funcs *nbio_funcs;
|
||||
const struct amdgpu_df_funcs *df_funcs;
|
||||
|
||||
/* delayed work_func for deferring clockgating during resume */
|
||||
struct delayed_work late_init_work;
|
||||
@ -1764,6 +1799,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
||||
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
|
||||
#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
|
||||
#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
|
||||
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
|
||||
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
|
||||
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
|
||||
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
|
||||
@ -1790,6 +1826,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
||||
#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
|
||||
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
|
||||
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
|
||||
#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
|
||||
#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
|
||||
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
|
||||
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
|
||||
|
@ -290,12 +290,11 @@ static int acp_hw_init(void *handle)
|
||||
else if (r)
|
||||
return r;
|
||||
|
||||
r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO,
|
||||
0x5289, 0, &acp_base);
|
||||
if (r == -ENODEV)
|
||||
return 0;
|
||||
else if (r)
|
||||
return r;
|
||||
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
|
||||
return -EINVAL;
|
||||
|
||||
acp_base = adev->rmmio_base;
|
||||
|
||||
if (adev->asic_type != CHIP_STONEY) {
|
||||
adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
|
||||
if (adev->acp.acp_genpd == NULL)
|
||||
|
@ -243,13 +243,19 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
struct amdgpu_bo_param bp;
|
||||
int r;
|
||||
uint64_t gpu_addr_tmp = 0;
|
||||
void *cpu_ptr_tmp = NULL;
|
||||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
|
||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel,
|
||||
NULL, &bo);
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = size;
|
||||
bp.byte_align = PAGE_SIZE;
|
||||
bp.domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = NULL;
|
||||
r = amdgpu_bo_create(adev, &bp, &bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
"failed to allocate BO for amdkfd (%d)\n", r);
|
||||
|
@ -1143,6 +1143,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||
struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
|
||||
uint64_t user_addr = 0;
|
||||
struct amdgpu_bo *bo;
|
||||
struct amdgpu_bo_param bp;
|
||||
int byte_align;
|
||||
u32 domain, alloc_domain;
|
||||
u64 alloc_flags;
|
||||
@ -1215,8 +1216,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
|
||||
va, size, domain_string(alloc_domain));
|
||||
|
||||
ret = amdgpu_bo_create(adev, size, byte_align,
|
||||
alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = size;
|
||||
bp.byte_align = byte_align;
|
||||
bp.domain = alloc_domain;
|
||||
bp.flags = alloc_flags;
|
||||
bp.type = ttm_bo_type_device;
|
||||
bp.resv = NULL;
|
||||
ret = amdgpu_bo_create(adev, &bp, &bo);
|
||||
if (ret) {
|
||||
pr_debug("Failed to create BO on domain %s. ret %d\n",
|
||||
domain_string(alloc_domain), ret);
|
||||
|
@ -550,7 +550,7 @@ static int amdgpu_atpx_init(void)
|
||||
* look up whether we are the integrated or discrete GPU (all asics).
|
||||
* Returns the client id.
|
||||
*/
|
||||
static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
|
||||
static enum vga_switcheroo_client_id amdgpu_atpx_get_client_id(struct pci_dev *pdev)
|
||||
{
|
||||
if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
|
||||
return VGA_SWITCHEROO_IGD;
|
||||
|
@ -75,13 +75,20 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
||||
{
|
||||
struct amdgpu_bo *dobj = NULL;
|
||||
struct amdgpu_bo *sobj = NULL;
|
||||
struct amdgpu_bo_param bp;
|
||||
uint64_t saddr, daddr;
|
||||
int r, n;
|
||||
int time;
|
||||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = size;
|
||||
bp.byte_align = PAGE_SIZE;
|
||||
bp.domain = sdomain;
|
||||
bp.flags = 0;
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = NULL;
|
||||
n = AMDGPU_BENCHMARK_ITERATIONS;
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0,
|
||||
ttm_bo_type_kernel, NULL, &sobj);
|
||||
r = amdgpu_bo_create(adev, &bp, &sobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
@ -93,8 +100,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0,
|
||||
ttm_bo_type_kernel, NULL, &dobj);
|
||||
bp.domain = ddomain;
|
||||
r = amdgpu_bo_create(adev, &bp, &dobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
@ -23,7 +23,6 @@
|
||||
*/
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pci.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
@ -109,121 +108,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
|
||||
WARN(1, "Invalid indirect register space");
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
|
||||
enum cgs_resource_type resource_type,
|
||||
uint64_t size,
|
||||
uint64_t offset,
|
||||
uint64_t *resource_base)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
|
||||
if (resource_base == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
switch (resource_type) {
|
||||
case CGS_RESOURCE_TYPE_MMIO:
|
||||
if (adev->rmmio_size == 0)
|
||||
return -ENOENT;
|
||||
if ((offset + size) > adev->rmmio_size)
|
||||
return -EINVAL;
|
||||
*resource_base = adev->rmmio_base;
|
||||
return 0;
|
||||
case CGS_RESOURCE_TYPE_DOORBELL:
|
||||
if (adev->doorbell.size == 0)
|
||||
return -ENOENT;
|
||||
if ((offset + size) > adev->doorbell.size)
|
||||
return -EINVAL;
|
||||
*resource_base = adev->doorbell.base;
|
||||
return 0;
|
||||
case CGS_RESOURCE_TYPE_FB:
|
||||
case CGS_RESOURCE_TYPE_IO:
|
||||
case CGS_RESOURCE_TYPE_ROM:
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
|
||||
unsigned table, uint16_t *size,
|
||||
uint8_t *frev, uint8_t *crev)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
uint16_t data_start;
|
||||
|
||||
if (amdgpu_atom_parse_data_header(
|
||||
adev->mode_info.atom_context, table, size,
|
||||
frev, crev, &data_start))
|
||||
return (uint8_t*)adev->mode_info.atom_context->bios +
|
||||
data_start;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
|
||||
uint8_t *frev, uint8_t *crev)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
|
||||
if (amdgpu_atom_parse_cmd_header(
|
||||
adev->mode_info.atom_context, table,
|
||||
frev, crev))
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
|
||||
void *args)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
|
||||
return amdgpu_atom_execute_table(
|
||||
adev->mode_info.atom_context, table, args);
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
|
||||
enum amd_ip_block_type block_type,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
int i, r = -1;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
|
||||
if (adev->ip_blocks[i].version->type == block_type) {
|
||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
|
||||
(void *)adev,
|
||||
state);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
|
||||
enum amd_ip_block_type block_type,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
int i, r = -1;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
|
||||
if (adev->ip_blocks[i].version->type == block_type) {
|
||||
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
|
||||
(void *)adev,
|
||||
state);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
@ -271,18 +155,6 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
|
||||
return result;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
|
||||
release_firmware(adev->pm.fw);
|
||||
adev->pm.fw = NULL;
|
||||
return 0;
|
||||
}
|
||||
/* cannot release other firmware because they are not created by cgs */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
|
||||
enum cgs_ucode_id type)
|
||||
{
|
||||
@ -326,34 +198,6 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
|
||||
return fw_version;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
|
||||
bool en)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
|
||||
if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
|
||||
adev->gfx.rlc.funcs->exit_safe_mode == NULL)
|
||||
return 0;
|
||||
|
||||
if (en)
|
||||
adev->gfx.rlc.funcs->enter_safe_mode(adev);
|
||||
else
|
||||
adev->gfx.rlc.funcs->exit_safe_mode(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
|
||||
bool lock)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
|
||||
if (lock)
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
else
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
||||
enum cgs_ucode_id type,
|
||||
struct cgs_firmware_info *info)
|
||||
@ -541,6 +385,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
||||
case CHIP_POLARIS12:
|
||||
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
|
||||
break;
|
||||
case CHIP_VEGAM:
|
||||
strcpy(fw_name, "amdgpu/vegam_smc.bin");
|
||||
break;
|
||||
case CHIP_VEGA10:
|
||||
if ((adev->pdev->device == 0x687f) &&
|
||||
((adev->pdev->revision == 0xc0) ||
|
||||
@ -598,97 +445,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
return amdgpu_sriov_vf(adev);
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
|
||||
struct cgs_display_info *info)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
struct cgs_mode_info *mode_info;
|
||||
|
||||
if (info == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
mode_info = info->mode_info;
|
||||
if (mode_info)
|
||||
/* if the displays are off, vblank time is max */
|
||||
mode_info->vblank_time_us = 0xffffffff;
|
||||
|
||||
if (!amdgpu_device_has_dc_support(adev)) {
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
struct drm_device *ddev = adev->ddev;
|
||||
struct drm_crtc *crtc;
|
||||
uint32_t line_time_us, vblank_lines;
|
||||
|
||||
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
|
||||
list_for_each_entry(crtc,
|
||||
&ddev->mode_config.crtc_list, head) {
|
||||
amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
if (crtc->enabled) {
|
||||
info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
|
||||
info->display_count++;
|
||||
}
|
||||
if (mode_info != NULL &&
|
||||
crtc->enabled && amdgpu_crtc->enabled &&
|
||||
amdgpu_crtc->hw_mode.clock) {
|
||||
line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
|
||||
amdgpu_crtc->hw_mode.clock;
|
||||
vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
|
||||
amdgpu_crtc->hw_mode.crtc_vdisplay +
|
||||
(amdgpu_crtc->v_border * 2);
|
||||
mode_info->vblank_time_us = vblank_lines * line_time_us;
|
||||
mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
|
||||
/* we have issues with mclk switching with refresh rates
|
||||
* over 120 hz on the non-DC code.
|
||||
*/
|
||||
if (mode_info->refresh_rate > 120)
|
||||
mode_info->vblank_time_us = 0;
|
||||
mode_info = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info->display_count = adev->pm.pm_display_cfg.num_display;
|
||||
if (mode_info != NULL) {
|
||||
mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
|
||||
mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
|
||||
adev->pm.dpm_enabled = enabled;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct cgs_ops amdgpu_cgs_ops = {
|
||||
.read_register = amdgpu_cgs_read_register,
|
||||
.write_register = amdgpu_cgs_write_register,
|
||||
.read_ind_register = amdgpu_cgs_read_ind_register,
|
||||
.write_ind_register = amdgpu_cgs_write_ind_register,
|
||||
.get_pci_resource = amdgpu_cgs_get_pci_resource,
|
||||
.atom_get_data_table = amdgpu_cgs_atom_get_data_table,
|
||||
.atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
|
||||
.atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
|
||||
.get_firmware_info = amdgpu_cgs_get_firmware_info,
|
||||
.rel_firmware = amdgpu_cgs_rel_firmware,
|
||||
.set_powergating_state = amdgpu_cgs_set_powergating_state,
|
||||
.set_clockgating_state = amdgpu_cgs_set_clockgating_state,
|
||||
.get_active_displays_info = amdgpu_cgs_get_active_displays_info,
|
||||
.notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
|
||||
.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
|
||||
.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
|
||||
.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
|
||||
};
|
||||
|
||||
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
|
||||
|
@ -691,7 +691,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
|
||||
static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
|
||||
@ -843,7 +843,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
|
||||
static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
@ -1172,7 +1172,7 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
|
||||
amdgpu_connector->use_digital = true;
|
||||
}
|
||||
|
||||
static int amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
|
||||
static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
@ -1448,7 +1448,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
|
||||
static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
@ -382,8 +382,7 @@ retry:
|
||||
|
||||
p->bytes_moved += ctx.bytes_moved;
|
||||
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
|
||||
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
|
||||
amdgpu_bo_in_cpu_visible_vram(bo))
|
||||
p->bytes_moved_vis += ctx.bytes_moved;
|
||||
|
||||
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
||||
@ -411,7 +410,6 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_bo_list_entry *candidate = p->evictable;
|
||||
struct amdgpu_bo *bo = candidate->robj;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
u64 initial_bytes_moved, bytes_moved;
|
||||
bool update_bytes_moved_vis;
|
||||
uint32_t other;
|
||||
|
||||
@ -435,18 +433,14 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
||||
continue;
|
||||
|
||||
/* Good we can try to move this BO somewhere else */
|
||||
amdgpu_ttm_placement_from_domain(bo, other);
|
||||
update_bytes_moved_vis =
|
||||
adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
|
||||
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
|
||||
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
||||
amdgpu_bo_in_cpu_visible_vram(bo);
|
||||
amdgpu_ttm_placement_from_domain(bo, other);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
|
||||
initial_bytes_moved;
|
||||
p->bytes_moved += bytes_moved;
|
||||
p->bytes_moved += ctx.bytes_moved;
|
||||
if (update_bytes_moved_vis)
|
||||
p->bytes_moved_vis += bytes_moved;
|
||||
p->bytes_moved_vis += ctx.bytes_moved;
|
||||
|
||||
if (unlikely(r))
|
||||
break;
|
||||
|
@ -91,7 +91,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
||||
continue;
|
||||
|
||||
r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
|
||||
rq, amdgpu_sched_jobs, &ctx->guilty);
|
||||
rq, &ctx->guilty);
|
||||
if (r)
|
||||
goto failed;
|
||||
}
|
||||
@ -111,8 +111,9 @@ failed:
|
||||
return r;
|
||||
}
|
||||
|
||||
static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
|
||||
static void amdgpu_ctx_fini(struct kref *ref)
|
||||
{
|
||||
struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
|
||||
struct amdgpu_device *adev = ctx->adev;
|
||||
unsigned i, j;
|
||||
|
||||
@ -125,13 +126,11 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
|
||||
kfree(ctx->fences);
|
||||
ctx->fences = NULL;
|
||||
|
||||
for (i = 0; i < adev->num_rings; i++)
|
||||
drm_sched_entity_fini(&adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity);
|
||||
|
||||
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
|
||||
|
||||
mutex_destroy(&ctx->lock);
|
||||
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
|
||||
@ -170,12 +169,15 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
|
||||
static void amdgpu_ctx_do_release(struct kref *ref)
|
||||
{
|
||||
struct amdgpu_ctx *ctx;
|
||||
u32 i;
|
||||
|
||||
ctx = container_of(ref, struct amdgpu_ctx, refcount);
|
||||
|
||||
amdgpu_ctx_fini(ctx);
|
||||
for (i = 0; i < ctx->adev->num_rings; i++)
|
||||
drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity);
|
||||
|
||||
kfree(ctx);
|
||||
amdgpu_ctx_fini(ref);
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
|
||||
@ -419,9 +421,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
|
||||
|
||||
if (other) {
|
||||
signed long r;
|
||||
r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
|
||||
r = dma_fence_wait(other, true);
|
||||
if (r < 0) {
|
||||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
|
||||
|
||||
return r;
|
||||
}
|
||||
}
|
||||
@ -435,16 +439,62 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
|
||||
idr_init(&mgr->ctx_handles);
|
||||
}
|
||||
|
||||
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
|
||||
{
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct idr *idp;
|
||||
uint32_t id, i;
|
||||
|
||||
idp = &mgr->ctx_handles;
|
||||
|
||||
idr_for_each_entry(idp, ctx, id) {
|
||||
|
||||
if (!ctx->adev)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ctx->adev->num_rings; i++)
|
||||
if (kref_read(&ctx->refcount) == 1)
|
||||
drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity);
|
||||
else
|
||||
DRM_ERROR("ctx %p is still alive\n", ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
|
||||
{
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct idr *idp;
|
||||
uint32_t id, i;
|
||||
|
||||
idp = &mgr->ctx_handles;
|
||||
|
||||
idr_for_each_entry(idp, ctx, id) {
|
||||
|
||||
if (!ctx->adev)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ctx->adev->num_rings; i++)
|
||||
if (kref_read(&ctx->refcount) == 1)
|
||||
drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity);
|
||||
else
|
||||
DRM_ERROR("ctx %p is still alive\n", ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
|
||||
{
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct idr *idp;
|
||||
uint32_t id;
|
||||
|
||||
amdgpu_ctx_mgr_entity_cleanup(mgr);
|
||||
|
||||
idp = &mgr->ctx_handles;
|
||||
|
||||
idr_for_each_entry(idp, ctx, id) {
|
||||
if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
|
||||
if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
|
||||
DRM_ERROR("ctx %p is still alive\n", ctx);
|
||||
}
|
||||
|
||||
|
@ -28,8 +28,13 @@
|
||||
#include <linux/debugfs.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
/*
|
||||
* Debugfs
|
||||
/**
|
||||
* amdgpu_debugfs_add_files - Add simple debugfs entries
|
||||
*
|
||||
* @adev: Device to attach debugfs entries to
|
||||
* @files: Array of function callbacks that respond to reads
|
||||
* @nfiles: Number of callbacks to register
|
||||
*
|
||||
*/
|
||||
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
|
||||
const struct drm_info_list *files,
|
||||
@ -64,7 +69,33 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
|
||||
*
|
||||
* @read: True if reading
|
||||
* @f: open file handle
|
||||
* @buf: User buffer to write/read to
|
||||
* @size: Number of bytes to write/read
|
||||
* @pos: Offset to seek to
|
||||
*
|
||||
* This debugfs entry has special meaning on the offset being sought.
|
||||
* Various bits have different meanings:
|
||||
*
|
||||
* Bit 62: Indicates a GRBM bank switch is needed
|
||||
* Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is
|
||||
* zero)
|
||||
* Bits 24..33: The SE or ME selector if needed
|
||||
* Bits 34..43: The SH (or SA) or PIPE selector if needed
|
||||
* Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
|
||||
*
|
||||
* Bit 23: Indicates that the PM power gating lock should be held
|
||||
* This is necessary to read registers that might be
|
||||
* unreliable during a power gating transistion.
|
||||
*
|
||||
* The lower bits are the BYTE offset of the register to read. This
|
||||
* allows reading multiple registers in a single call and having
|
||||
* the returned size reflect that.
|
||||
*/
|
||||
static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
|
||||
char __user *buf, size_t size, loff_t *pos)
|
||||
{
|
||||
@ -164,19 +195,37 @@ end:
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_regs_read - Callback for reading MMIO registers
|
||||
*/
|
||||
static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_regs_write - Callback for writing MMIO registers
|
||||
*/
|
||||
static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
|
||||
*
|
||||
* @f: open file handle
|
||||
* @buf: User buffer to store read data in
|
||||
* @size: Number of bytes to read
|
||||
* @pos: Offset to seek to
|
||||
*
|
||||
* The lower bits are the BYTE offset of the register to read. This
|
||||
* allows reading multiple registers in a single call and having
|
||||
* the returned size reflect that.
|
||||
*/
|
||||
static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
@ -204,6 +253,18 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_regs_pcie_write - Write to a PCIE register
|
||||
*
|
||||
* @f: open file handle
|
||||
* @buf: User buffer to write data from
|
||||
* @size: Number of bytes to write
|
||||
* @pos: Offset to seek to
|
||||
*
|
||||
* The lower bits are the BYTE offset of the register to write. This
|
||||
* allows writing multiple registers in a single call and having
|
||||
* the returned size reflect that.
|
||||
*/
|
||||
static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
@ -232,6 +293,18 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_regs_didt_read - Read from a DIDT register
|
||||
*
|
||||
* @f: open file handle
|
||||
* @buf: User buffer to store read data in
|
||||
* @size: Number of bytes to read
|
||||
* @pos: Offset to seek to
|
||||
*
|
||||
* The lower bits are the BYTE offset of the register to read. This
|
||||
* allows reading multiple registers in a single call and having
|
||||
* the returned size reflect that.
|
||||
*/
|
||||
static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
@ -259,6 +332,18 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_regs_didt_write - Write to a DIDT register
|
||||
*
|
||||
* @f: open file handle
|
||||
* @buf: User buffer to write data from
|
||||
* @size: Number of bytes to write
|
||||
* @pos: Offset to seek to
|
||||
*
|
||||
* The lower bits are the BYTE offset of the register to write. This
|
||||
* allows writing multiple registers in a single call and having
|
||||
* the returned size reflect that.
|
||||
*/
|
||||
static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
@ -287,6 +372,18 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_regs_smc_read - Read from a SMC register
|
||||
*
|
||||
* @f: open file handle
|
||||
* @buf: User buffer to store read data in
|
||||
* @size: Number of bytes to read
|
||||
* @pos: Offset to seek to
|
||||
*
|
||||
* The lower bits are the BYTE offset of the register to read. This
|
||||
* allows reading multiple registers in a single call and having
|
||||
* the returned size reflect that.
|
||||
*/
|
||||
static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
@ -314,6 +411,18 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_regs_smc_write - Write to a SMC register
|
||||
*
|
||||
* @f: open file handle
|
||||
* @buf: User buffer to write data from
|
||||
* @size: Number of bytes to write
|
||||
* @pos: Offset to seek to
|
||||
*
|
||||
* The lower bits are the BYTE offset of the register to write. This
|
||||
* allows writing multiple registers in a single call and having
|
||||
* the returned size reflect that.
|
||||
*/
|
||||
static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
@ -342,6 +451,20 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_gca_config_read - Read from gfx config data
|
||||
*
|
||||
* @f: open file handle
|
||||
* @buf: User buffer to store read data in
|
||||
* @size: Number of bytes to read
|
||||
* @pos: Offset to seek to
|
||||
*
|
||||
* This file is used to access configuration data in a somewhat
|
||||
* stable fashion. The format is a series of DWORDs with the first
|
||||
* indicating which revision it is. New content is appended to the
|
||||
* end so that older software can still read the data.
|
||||
*/
|
||||
|
||||
static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
@ -418,6 +541,19 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_sensor_read - Read from the powerplay sensors
|
||||
*
|
||||
* @f: open file handle
|
||||
* @buf: User buffer to store read data in
|
||||
* @size: Number of bytes to read
|
||||
* @pos: Offset to seek to
|
||||
*
|
||||
* The offset is treated as the BYTE address of one of the sensors
|
||||
* enumerated in amd/include/kgd_pp_interface.h under the
|
||||
* 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK
|
||||
* you would use the offset 3 * 4 = 12.
|
||||
*/
|
||||
static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
@ -428,7 +564,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
|
||||
if (size & 3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
if (amdgpu_dpm == 0)
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
/* convert offset to sensor number */
|
||||
@ -457,6 +593,27 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
|
||||
return !r ? outsize : r;
|
||||
}
|
||||
|
||||
/** amdgpu_debugfs_wave_read - Read WAVE STATUS data
|
||||
*
|
||||
* @f: open file handle
|
||||
* @buf: User buffer to store read data in
|
||||
* @size: Number of bytes to read
|
||||
* @pos: Offset to seek to
|
||||
*
|
||||
* The offset being sought changes which wave that the status data
|
||||
* will be returned for. The bits are used as follows:
|
||||
*
|
||||
* Bits 0..6: Byte offset into data
|
||||
* Bits 7..14: SE selector
|
||||
* Bits 15..22: SH/SA selector
|
||||
* Bits 23..30: CU/{WGP+SIMD} selector
|
||||
* Bits 31..36: WAVE ID selector
|
||||
* Bits 37..44: SIMD ID selector
|
||||
*
|
||||
* The returned data begins with one DWORD of version information
|
||||
* Followed by WAVE STATUS registers relevant to the GFX IP version
|
||||
* being used. See gfx_v8_0_read_wave_data() for an example output.
|
||||
*/
|
||||
static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
@ -507,6 +664,28 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
|
||||
return result;
|
||||
}
|
||||
|
||||
/** amdgpu_debugfs_gpr_read - Read wave gprs
|
||||
*
|
||||
* @f: open file handle
|
||||
* @buf: User buffer to store read data in
|
||||
* @size: Number of bytes to read
|
||||
* @pos: Offset to seek to
|
||||
*
|
||||
* The offset being sought changes which wave that the status data
|
||||
* will be returned for. The bits are used as follows:
|
||||
*
|
||||
* Bits 0..11: Byte offset into data
|
||||
* Bits 12..19: SE selector
|
||||
* Bits 20..27: SH/SA selector
|
||||
* Bits 28..35: CU/{WGP+SIMD} selector
|
||||
* Bits 36..43: WAVE ID selector
|
||||
* Bits 37..44: SIMD ID selector
|
||||
* Bits 52..59: Thread selector
|
||||
* Bits 60..61: Bank selector (VGPR=0,SGPR=1)
|
||||
*
|
||||
* The return data comes from the SGPR or VGPR register bank for
|
||||
* the selected operational unit.
|
||||
*/
|
||||
static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
@ -637,6 +816,12 @@ static const char *debugfs_regs_names[] = {
|
||||
"amdgpu_gpr",
|
||||
};
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_regs_init - Initialize debugfs entries that provide
|
||||
* register access.
|
||||
*
|
||||
* @adev: The device to attach the debugfs entries to
|
||||
*/
|
||||
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_minor *minor = adev->ddev->primary;
|
||||
|
@ -83,6 +83,7 @@ static const char *amdgpu_asic_name[] = {
|
||||
"POLARIS10",
|
||||
"POLARIS11",
|
||||
"POLARIS12",
|
||||
"VEGAM",
|
||||
"VEGA10",
|
||||
"VEGA12",
|
||||
"RAVEN",
|
||||
@ -690,6 +691,8 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
|
||||
{
|
||||
u64 size_af, size_bf;
|
||||
|
||||
mc->gart_size += adev->pm.smu_prv_buffer_size;
|
||||
|
||||
size_af = adev->gmc.mc_mask - mc->vram_end;
|
||||
size_bf = mc->vram_start;
|
||||
if (size_bf > size_af) {
|
||||
@ -907,6 +910,46 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
|
||||
{
|
||||
struct sysinfo si;
|
||||
bool is_os_64 = (sizeof(void *) == 8) ? true : false;
|
||||
uint64_t total_memory;
|
||||
uint64_t dram_size_seven_GB = 0x1B8000000;
|
||||
uint64_t dram_size_three_GB = 0xB8000000;
|
||||
|
||||
if (amdgpu_smu_memory_pool_size == 0)
|
||||
return;
|
||||
|
||||
if (!is_os_64) {
|
||||
DRM_WARN("Not 64-bit OS, feature not supported\n");
|
||||
goto def_value;
|
||||
}
|
||||
si_meminfo(&si);
|
||||
total_memory = (uint64_t)si.totalram * si.mem_unit;
|
||||
|
||||
if ((amdgpu_smu_memory_pool_size == 1) ||
|
||||
(amdgpu_smu_memory_pool_size == 2)) {
|
||||
if (total_memory < dram_size_three_GB)
|
||||
goto def_value1;
|
||||
} else if ((amdgpu_smu_memory_pool_size == 4) ||
|
||||
(amdgpu_smu_memory_pool_size == 8)) {
|
||||
if (total_memory < dram_size_seven_GB)
|
||||
goto def_value1;
|
||||
} else {
|
||||
DRM_WARN("Smu memory pool size not supported\n");
|
||||
goto def_value;
|
||||
}
|
||||
adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
|
||||
|
||||
return;
|
||||
|
||||
def_value1:
|
||||
DRM_WARN("No enough system memory\n");
|
||||
def_value:
|
||||
adev->pm.smu_prv_buffer_size = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_check_arguments - validate module params
|
||||
*
|
||||
@ -948,6 +991,8 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
|
||||
amdgpu_vm_fragment_size = -1;
|
||||
}
|
||||
|
||||
amdgpu_device_check_smu_prv_buffer_size(adev);
|
||||
|
||||
amdgpu_device_check_vm_size(adev);
|
||||
|
||||
amdgpu_device_check_block_size(adev);
|
||||
@ -1039,10 +1084,11 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
|
||||
* the hardware IP specified.
|
||||
* Returns the error code from the last instance.
|
||||
*/
|
||||
int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
|
||||
int amdgpu_device_ip_set_clockgating_state(void *dev,
|
||||
enum amd_ip_block_type block_type,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
struct amdgpu_device *adev = dev;
|
||||
int i, r = 0;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
@ -1072,10 +1118,11 @@ int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
|
||||
* the hardware IP specified.
|
||||
* Returns the error code from the last instance.
|
||||
*/
|
||||
int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
|
||||
int amdgpu_device_ip_set_powergating_state(void *dev,
|
||||
enum amd_ip_block_type block_type,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
struct amdgpu_device *adev = dev;
|
||||
int i, r = 0;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
@ -1320,9 +1367,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
||||
case CHIP_TOPAZ:
|
||||
case CHIP_TONGA:
|
||||
case CHIP_FIJI:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
@ -1428,9 +1476,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||
case CHIP_TOPAZ:
|
||||
case CHIP_TONGA:
|
||||
case CHIP_FIJI:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
|
||||
@ -1499,6 +1548,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
|
||||
DRM_ERROR("disabled ip block: %d <%s>\n",
|
||||
@ -1654,6 +1705,10 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
|
||||
if (amdgpu_emu_mode == 1)
|
||||
return 0;
|
||||
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
if (r)
|
||||
DRM_ERROR("ib ring test failed (%d).\n", r);
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
@ -1704,7 +1759,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
mod_delayed_work(system_wq, &adev->late_init_work,
|
||||
queue_delayed_work(system_wq, &adev->late_init_work,
|
||||
msecs_to_jiffies(AMDGPU_RESUME_MS));
|
||||
|
||||
amdgpu_device_fill_reset_magic(adev);
|
||||
@ -1850,6 +1905,12 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_request_full_gpu(adev, false);
|
||||
|
||||
/* ungate SMC block powergating */
|
||||
if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
|
||||
amdgpu_device_ip_set_powergating_state(adev,
|
||||
AMD_IP_BLOCK_TYPE_SMC,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
|
||||
/* ungate SMC block first */
|
||||
r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
@ -2086,14 +2147,12 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
||||
case CHIP_MULLINS:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
case CHIP_TONGA:
|
||||
case CHIP_FIJI:
|
||||
#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
|
||||
return amdgpu_dc != 0;
|
||||
#endif
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
@ -2375,10 +2434,6 @@ fence_driver_init:
|
||||
goto failed;
|
||||
}
|
||||
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
if (r)
|
||||
DRM_ERROR("ib ring test failed (%d).\n", r);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_init_data_exchange(adev);
|
||||
|
||||
@ -2539,7 +2594,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
||||
/* unpin the front buffers and cursors */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
struct drm_framebuffer *fb = crtc->primary->fb;
|
||||
struct amdgpu_bo *robj;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
@ -2551,10 +2606,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
||||
}
|
||||
}
|
||||
|
||||
if (rfb == NULL || rfb->obj == NULL) {
|
||||
if (fb == NULL || fb->obj[0] == NULL) {
|
||||
continue;
|
||||
}
|
||||
robj = gem_to_amdgpu_bo(rfb->obj);
|
||||
robj = gem_to_amdgpu_bo(fb->obj[0]);
|
||||
/* don't unpin kernel fb objects */
|
||||
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
|
||||
r = amdgpu_bo_reserve(robj, true);
|
||||
@ -2640,11 +2695,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
||||
}
|
||||
amdgpu_fence_driver_resume(adev);
|
||||
|
||||
if (resume) {
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
if (r)
|
||||
DRM_ERROR("ib ring test failed (%d).\n", r);
|
||||
}
|
||||
|
||||
r = amdgpu_device_ip_late_init(adev);
|
||||
if (r)
|
||||
@ -2736,6 +2786,9 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return true;
|
||||
|
||||
if (amdgpu_asic_need_full_reset(adev))
|
||||
return true;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
@ -2792,6 +2845,9 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (amdgpu_asic_need_full_reset(adev))
|
||||
return true;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
@ -3087,20 +3143,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||||
|
||||
/* now we are okay to resume SMC/CP/SDMA */
|
||||
r = amdgpu_device_ip_reinit_late_sriov(adev);
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
amdgpu_irq_gpu_reset_resume_helper(adev);
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
|
||||
error:
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
|
||||
atomic_inc(&adev->vram_lost_counter);
|
||||
r = amdgpu_device_handle_vram_lost(adev);
|
||||
}
|
||||
|
||||
error:
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
|
||||
static void amdgpu_display_flip_callback(struct dma_fence *f,
|
||||
@ -151,8 +152,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_framebuffer *old_amdgpu_fb;
|
||||
struct amdgpu_framebuffer *new_amdgpu_fb;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_flip_work *work;
|
||||
struct amdgpu_bo *new_abo;
|
||||
@ -174,15 +173,13 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||
work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
|
||||
|
||||
/* schedule unpin of the old buffer */
|
||||
old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
obj = old_amdgpu_fb->obj;
|
||||
obj = crtc->primary->fb->obj[0];
|
||||
|
||||
/* take a reference to the old object */
|
||||
work->old_abo = gem_to_amdgpu_bo(obj);
|
||||
amdgpu_bo_ref(work->old_abo);
|
||||
|
||||
new_amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
obj = new_amdgpu_fb->obj;
|
||||
obj = fb->obj[0];
|
||||
new_abo = gem_to_amdgpu_bo(obj);
|
||||
|
||||
/* pin the new buffer */
|
||||
@ -192,7 +189,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_pin(new_abo, amdgpu_display_framebuffer_domains(adev), &base);
|
||||
r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("failed to pin new abo buffer before flip\n");
|
||||
goto unreserve;
|
||||
@ -482,31 +479,12 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void amdgpu_display_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
|
||||
drm_gem_object_put_unlocked(amdgpu_fb->obj);
|
||||
drm_framebuffer_cleanup(fb);
|
||||
kfree(amdgpu_fb);
|
||||
}
|
||||
|
||||
static int amdgpu_display_user_framebuffer_create_handle(
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_file *file_priv,
|
||||
unsigned int *handle)
|
||||
{
|
||||
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
|
||||
return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle);
|
||||
}
|
||||
|
||||
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
|
||||
.destroy = amdgpu_display_user_framebuffer_destroy,
|
||||
.create_handle = amdgpu_display_user_framebuffer_create_handle,
|
||||
.destroy = drm_gem_fb_destroy,
|
||||
.create_handle = drm_gem_fb_create_handle,
|
||||
};
|
||||
|
||||
uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev)
|
||||
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
|
||||
@ -526,11 +504,11 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
|
||||
struct drm_gem_object *obj)
|
||||
{
|
||||
int ret;
|
||||
rfb->obj = obj;
|
||||
rfb->base.obj[0] = obj;
|
||||
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
|
||||
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
|
||||
if (ret) {
|
||||
rfb->obj = NULL;
|
||||
rfb->base.obj[0] = NULL;
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
|
@ -23,7 +23,7 @@
|
||||
#ifndef __AMDGPU_DISPLAY_H__
|
||||
#define __AMDGPU_DISPLAY_H__
|
||||
|
||||
uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev);
|
||||
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev);
|
||||
struct drm_framebuffer *
|
||||
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
|
@ -115,6 +115,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_device *ddev = adev->ddev;
|
||||
struct drm_crtc *crtc;
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
|
||||
adev->pm.dpm.new_active_crtcs = 0;
|
||||
adev->pm.dpm.new_active_crtc_count = 0;
|
||||
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
|
||||
list_for_each_entry(crtc,
|
||||
&ddev->mode_config.crtc_list, head) {
|
||||
amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
if (amdgpu_crtc->enabled) {
|
||||
adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
|
||||
adev->pm.dpm.new_active_crtc_count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
|
||||
{
|
||||
|
@ -52,8 +52,6 @@ enum amdgpu_dpm_event_src {
|
||||
AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
|
||||
};
|
||||
|
||||
#define SCLK_DEEP_SLEEP_MASK 0x8
|
||||
|
||||
struct amdgpu_ps {
|
||||
u32 caps; /* vbios flags */
|
||||
u32 class; /* vbios flags */
|
||||
@ -349,12 +347,6 @@ enum amdgpu_pcie_gen {
|
||||
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
|
||||
(adev)->powerplay.pp_handle, msg_id))
|
||||
|
||||
#define amdgpu_dpm_notify_smu_memory_info(adev, virtual_addr_low, \
|
||||
virtual_addr_hi, mc_addr_low, mc_addr_hi, size) \
|
||||
((adev)->powerplay.pp_funcs->notify_smu_memory_info)( \
|
||||
(adev)->powerplay.pp_handle, virtual_addr_low, \
|
||||
virtual_addr_hi, mc_addr_low, mc_addr_hi, size)
|
||||
|
||||
#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
|
||||
((adev)->powerplay.pp_funcs->get_power_profile_mode(\
|
||||
(adev)->powerplay.pp_handle, buf))
|
||||
@ -445,6 +437,8 @@ struct amdgpu_pm {
|
||||
uint32_t pcie_gen_mask;
|
||||
uint32_t pcie_mlw_mask;
|
||||
struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
|
||||
uint32_t smu_prv_buffer_size;
|
||||
struct amdgpu_bo *smu_prv_buffer;
|
||||
};
|
||||
|
||||
#define R600_SSTU_DFLT 0
|
||||
@ -482,6 +476,7 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
|
||||
struct amdgpu_ps *rps);
|
||||
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
|
||||
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
|
||||
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
|
||||
bool amdgpu_is_uvd_state(u32 class, u32 class2);
|
||||
void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
|
||||
u32 *p, u32 *u);
|
||||
|
@ -75,9 +75,10 @@
|
||||
* - 3.23.0 - Add query for VRAM lost counter
|
||||
* - 3.24.0 - Add high priority compute support for gfx9
|
||||
* - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
|
||||
* - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 25
|
||||
#define KMS_DRIVER_MINOR 26
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit = 0;
|
||||
@ -121,7 +122,7 @@ uint amdgpu_pg_mask = 0xffffffff;
|
||||
uint amdgpu_sdma_phase_quantum = 32;
|
||||
char *amdgpu_disable_cu = NULL;
|
||||
char *amdgpu_virtual_display = NULL;
|
||||
uint amdgpu_pp_feature_mask = 0xffffbfff;
|
||||
uint amdgpu_pp_feature_mask = 0xffff3fff; /* gfxoff (bit 15) disabled by default */
|
||||
int amdgpu_ngg = 0;
|
||||
int amdgpu_prim_buf_per_se = 0;
|
||||
int amdgpu_pos_buf_per_se = 0;
|
||||
@ -132,6 +133,7 @@ int amdgpu_lbpw = -1;
|
||||
int amdgpu_compute_multipipe = -1;
|
||||
int amdgpu_gpu_recovery = -1; /* auto */
|
||||
int amdgpu_emu_mode = 0;
|
||||
uint amdgpu_smu_memory_pool_size = 0;
|
||||
|
||||
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
|
||||
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
|
||||
@ -316,6 +318,11 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
|
||||
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
|
||||
#endif
|
||||
|
||||
MODULE_PARM_DESC(smu_memory_pool_size,
|
||||
"reserve gtt for smu debug usage, 0 = disable,"
|
||||
"0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
|
||||
module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);
|
||||
|
||||
static const struct pci_device_id pciidlist[] = {
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
@ -534,6 +541,9 @@ static const struct pci_device_id pciidlist[] = {
|
||||
{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
/* VEGAM */
|
||||
{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
|
||||
{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
|
||||
/* Vega 10 */
|
||||
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
|
@ -137,7 +137,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
||||
/* need to align pitch with crtc limits */
|
||||
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
|
||||
fb_tiled);
|
||||
domain = amdgpu_display_framebuffer_domains(adev);
|
||||
domain = amdgpu_display_supported_domains(adev);
|
||||
|
||||
height = ALIGN(mode_cmd->height, 8);
|
||||
size = mode_cmd->pitches[0] * height;
|
||||
@ -292,9 +292,9 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
|
||||
|
||||
drm_fb_helper_unregister_fbi(&rfbdev->helper);
|
||||
|
||||
if (rfb->obj) {
|
||||
amdgpufb_destroy_pinned_object(rfb->obj);
|
||||
rfb->obj = NULL;
|
||||
if (rfb->base.obj[0]) {
|
||||
amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
|
||||
rfb->base.obj[0] = NULL;
|
||||
drm_framebuffer_unregister_private(&rfb->base);
|
||||
drm_framebuffer_cleanup(&rfb->base);
|
||||
}
|
||||
@ -377,7 +377,7 @@ int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
|
||||
if (!adev->mode_info.rfbdev)
|
||||
return 0;
|
||||
|
||||
robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj);
|
||||
robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]);
|
||||
size += amdgpu_bo_size(robj);
|
||||
return size;
|
||||
}
|
||||
@ -386,7 +386,7 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
|
||||
{
|
||||
if (!adev->mode_info.rfbdev)
|
||||
return false;
|
||||
if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj))
|
||||
if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
@ -131,7 +131,8 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
|
||||
* Emits a fence command on the requested ring (all asics).
|
||||
* Returns 0 on success, -ENOMEM on failure.
|
||||
*/
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
|
||||
unsigned flags)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_fence *fence;
|
||||
@ -149,7 +150,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
|
||||
adev->fence_context + ring->idx,
|
||||
seq);
|
||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||
seq, AMDGPU_FENCE_FLAG_INT);
|
||||
seq, flags | AMDGPU_FENCE_FLAG_INT);
|
||||
|
||||
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
|
||||
/* This function can't be called concurrently anyway, otherwise
|
||||
|
@ -113,12 +113,17 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
|
||||
int r;
|
||||
|
||||
if (adev->gart.robj == NULL) {
|
||||
r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
ttm_bo_type_kernel, NULL,
|
||||
&adev->gart.robj);
|
||||
struct amdgpu_bo_param bp;
|
||||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = adev->gart.table_size;
|
||||
bp.byte_align = PAGE_SIZE;
|
||||
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = NULL;
|
||||
r = amdgpu_bo_create(adev, &bp, &adev->gart.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
@ -48,17 +48,25 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
||||
struct drm_gem_object **obj)
|
||||
{
|
||||
struct amdgpu_bo *bo;
|
||||
struct amdgpu_bo_param bp;
|
||||
int r;
|
||||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
*obj = NULL;
|
||||
/* At least align on page size */
|
||||
if (alignment < PAGE_SIZE) {
|
||||
alignment = PAGE_SIZE;
|
||||
}
|
||||
|
||||
bp.size = size;
|
||||
bp.byte_align = alignment;
|
||||
bp.type = type;
|
||||
bp.resv = resv;
|
||||
bp.preferred_domain = initial_domain;
|
||||
retry:
|
||||
r = amdgpu_bo_create(adev, size, alignment, initial_domain,
|
||||
flags, type, resv, &bo);
|
||||
bp.flags = flags;
|
||||
bp.domain = initial_domain;
|
||||
r = amdgpu_bo_create(adev, &bp, &bo);
|
||||
if (r) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
|
||||
@ -221,12 +229,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
|
||||
/* reject invalid gem domains */
|
||||
if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
|
||||
AMDGPU_GEM_DOMAIN_GTT |
|
||||
AMDGPU_GEM_DOMAIN_VRAM |
|
||||
AMDGPU_GEM_DOMAIN_GDS |
|
||||
AMDGPU_GEM_DOMAIN_GWS |
|
||||
AMDGPU_GEM_DOMAIN_OA))
|
||||
if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
/* create a gem object to contain this object in */
|
||||
@ -771,16 +774,23 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \
|
||||
if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
|
||||
seq_printf((m), " " #flag); \
|
||||
}
|
||||
|
||||
static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
|
||||
{
|
||||
struct drm_gem_object *gobj = ptr;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
|
||||
struct seq_file *m = data;
|
||||
|
||||
struct dma_buf_attachment *attachment;
|
||||
struct dma_buf *dma_buf;
|
||||
unsigned domain;
|
||||
const char *placement;
|
||||
unsigned pin_count;
|
||||
uint64_t offset;
|
||||
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
switch (domain) {
|
||||
@ -798,13 +808,27 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
|
||||
seq_printf(m, "\t0x%08x: %12ld byte %s",
|
||||
id, amdgpu_bo_size(bo), placement);
|
||||
|
||||
offset = READ_ONCE(bo->tbo.mem.start);
|
||||
if (offset != AMDGPU_BO_INVALID_OFFSET)
|
||||
seq_printf(m, " @ 0x%010Lx", offset);
|
||||
|
||||
pin_count = READ_ONCE(bo->pin_count);
|
||||
if (pin_count)
|
||||
seq_printf(m, " pin count %d", pin_count);
|
||||
|
||||
dma_buf = READ_ONCE(bo->gem_base.dma_buf);
|
||||
attachment = READ_ONCE(bo->gem_base.import_attach);
|
||||
|
||||
if (attachment)
|
||||
seq_printf(m, " imported from %p", dma_buf);
|
||||
else if (dma_buf)
|
||||
seq_printf(m, " exported as %p", dma_buf);
|
||||
|
||||
amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
|
||||
amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
|
||||
amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
|
||||
amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
|
||||
amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
|
||||
amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
|
||||
amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
|
||||
amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
|
||||
|
||||
seq_printf(m, "\n");
|
||||
|
||||
return 0;
|
||||
|
@ -127,6 +127,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
struct amdgpu_vm *vm;
|
||||
uint64_t fence_ctx;
|
||||
uint32_t status = 0, alloc_size;
|
||||
unsigned fence_flags = 0;
|
||||
|
||||
unsigned i;
|
||||
int r = 0;
|
||||
@ -227,7 +228,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
#endif
|
||||
amdgpu_asic_invalidate_hdp(adev, ring);
|
||||
|
||||
r = amdgpu_fence_emit(ring, f);
|
||||
if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
|
||||
fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
|
||||
|
||||
r = amdgpu_fence_emit(ring, f, fence_flags);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
||||
if (job && job->vmid)
|
||||
@ -242,7 +246,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
/* wrap the last IB with fence */
|
||||
if (job && job->uf_addr) {
|
||||
amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
|
||||
AMDGPU_FENCE_FLAG_64BIT);
|
||||
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
|
||||
}
|
||||
|
||||
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "amdgpu_sched.h"
|
||||
#include "amdgpu_uvd.h"
|
||||
#include "amdgpu_vce.h"
|
||||
#include "atom.h"
|
||||
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <linux/slab.h>
|
||||
@ -214,6 +215,18 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
||||
fw_info->ver = adev->gfx.rlc_fw_version;
|
||||
fw_info->feature = adev->gfx.rlc_feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
|
||||
fw_info->ver = adev->gfx.rlc_srlc_fw_version;
|
||||
fw_info->feature = adev->gfx.rlc_srlc_feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
|
||||
fw_info->ver = adev->gfx.rlc_srlg_fw_version;
|
||||
fw_info->feature = adev->gfx.rlc_srlg_feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
|
||||
fw_info->ver = adev->gfx.rlc_srls_fw_version;
|
||||
fw_info->feature = adev->gfx.rlc_srls_feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_MEC:
|
||||
if (query_fw->index == 0) {
|
||||
fw_info->ver = adev->gfx.mec_fw_version;
|
||||
@ -279,6 +292,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
if (!info->return_size || !info->return_pointer)
|
||||
return -EINVAL;
|
||||
|
||||
/* Ensure IB tests are run on ring */
|
||||
flush_delayed_work(&adev->late_init_work);
|
||||
|
||||
switch (info->query) {
|
||||
case AMDGPU_INFO_ACCEL_WORKING:
|
||||
ui32 = adev->accel_working;
|
||||
@ -701,10 +717,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
}
|
||||
}
|
||||
case AMDGPU_INFO_SENSOR: {
|
||||
struct pp_gpu_power query = {0};
|
||||
int query_size = sizeof(query);
|
||||
|
||||
if (amdgpu_dpm == 0)
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -ENOENT;
|
||||
|
||||
switch (info->sensor_info.type) {
|
||||
@ -746,10 +759,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
/* get average GPU power */
|
||||
if (amdgpu_dpm_read_sensor(adev,
|
||||
AMDGPU_PP_SENSOR_GPU_POWER,
|
||||
(void *)&query, &query_size)) {
|
||||
(void *)&ui32, &ui32_size)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
ui32 = query.average_gpu_power >> 8;
|
||||
ui32 >>= 8;
|
||||
break;
|
||||
case AMDGPU_INFO_SENSOR_VDDNB:
|
||||
/* get VDDNB in millivolts */
|
||||
@ -913,8 +926,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
||||
return;
|
||||
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
|
||||
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
|
||||
amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
|
||||
|
||||
if (adev->asic_type != CHIP_RAVEN) {
|
||||
amdgpu_uvd_free_handles(adev, file_priv);
|
||||
@ -935,6 +947,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
||||
pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
|
||||
|
||||
amdgpu_vm_fini(adev, &fpriv->vm);
|
||||
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
|
||||
|
||||
if (pasid)
|
||||
amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
|
||||
amdgpu_bo_unref(&pd);
|
||||
@ -1088,6 +1102,7 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_amdgpu_info_firmware fw_info;
|
||||
struct drm_amdgpu_query_fw query_fw;
|
||||
struct atom_context *ctx = adev->mode_info.atom_context;
|
||||
int ret, i;
|
||||
|
||||
/* VCE */
|
||||
@ -1146,6 +1161,30 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
|
||||
seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
/* RLC SAVE RESTORE LIST CNTL */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
/* RLC SAVE RESTORE LIST GPM MEM */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
/* RLC SAVE RESTORE LIST SRM MEM */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
/* MEC */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
|
||||
query_fw.index = 0;
|
||||
@ -1210,6 +1249,9 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
|
||||
seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
|
||||
seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -308,7 +308,6 @@ struct amdgpu_display_funcs {
|
||||
|
||||
struct amdgpu_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
struct drm_gem_object *obj;
|
||||
|
||||
/* caching for later use */
|
||||
uint64_t address;
|
||||
|
@ -191,14 +191,21 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
||||
u32 domain, struct amdgpu_bo **bo_ptr,
|
||||
u64 *gpu_addr, void **cpu_addr)
|
||||
{
|
||||
struct amdgpu_bo_param bp;
|
||||
bool free = false;
|
||||
int r;
|
||||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = size;
|
||||
bp.byte_align = align;
|
||||
bp.domain = domain;
|
||||
bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = NULL;
|
||||
|
||||
if (!*bo_ptr) {
|
||||
r = amdgpu_bo_create(adev, size, align, domain,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
ttm_bo_type_kernel, NULL, bo_ptr);
|
||||
r = amdgpu_bo_create(adev, &bp, bo_ptr);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
|
||||
r);
|
||||
@ -341,27 +348,25 @@ fail:
|
||||
return false;
|
||||
}
|
||||
|
||||
static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
|
||||
int byte_align, u32 domain,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct reservation_object *resv,
|
||||
static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_param *bp,
|
||||
struct amdgpu_bo **bo_ptr)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = (type != ttm_bo_type_kernel),
|
||||
.interruptible = (bp->type != ttm_bo_type_kernel),
|
||||
.no_wait_gpu = false,
|
||||
.resv = resv,
|
||||
.resv = bp->resv,
|
||||
.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
|
||||
};
|
||||
struct amdgpu_bo *bo;
|
||||
unsigned long page_align;
|
||||
unsigned long page_align, size = bp->size;
|
||||
size_t acc_size;
|
||||
int r;
|
||||
|
||||
page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
|
||||
if (!amdgpu_bo_validate_size(adev, size, domain))
|
||||
if (!amdgpu_bo_validate_size(adev, size, bp->domain))
|
||||
return -ENOMEM;
|
||||
|
||||
*bo_ptr = NULL;
|
||||
@ -375,18 +380,14 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
|
||||
drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
|
||||
INIT_LIST_HEAD(&bo->shadow_list);
|
||||
INIT_LIST_HEAD(&bo->va);
|
||||
bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
|
||||
AMDGPU_GEM_DOMAIN_GTT |
|
||||
AMDGPU_GEM_DOMAIN_CPU |
|
||||
AMDGPU_GEM_DOMAIN_GDS |
|
||||
AMDGPU_GEM_DOMAIN_GWS |
|
||||
AMDGPU_GEM_DOMAIN_OA);
|
||||
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
|
||||
bp->domain;
|
||||
bo->allowed_domains = bo->preferred_domains;
|
||||
if (type != ttm_bo_type_kernel &&
|
||||
if (bp->type != ttm_bo_type_kernel &&
|
||||
bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
|
||||
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
|
||||
|
||||
bo->flags = flags;
|
||||
bo->flags = bp->flags;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
|
||||
@ -417,11 +418,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
|
||||
#endif
|
||||
|
||||
bo->tbo.bdev = &adev->mman.bdev;
|
||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||
amdgpu_ttm_placement_from_domain(bo, bp->domain);
|
||||
if (bp->type == ttm_bo_type_kernel)
|
||||
bo->tbo.priority = 1;
|
||||
|
||||
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
|
||||
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
|
||||
&bo->placement, page_align, &ctx, acc_size,
|
||||
NULL, resv, &amdgpu_ttm_bo_destroy);
|
||||
NULL, bp->resv, &amdgpu_ttm_bo_destroy);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
@ -433,10 +436,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
|
||||
else
|
||||
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
|
||||
|
||||
if (type == ttm_bo_type_kernel)
|
||||
bo->tbo.priority = 1;
|
||||
|
||||
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
|
||||
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
|
||||
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
@ -449,20 +449,20 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
|
||||
bo->tbo.moving = dma_fence_get(fence);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
if (!resv)
|
||||
if (!bp->resv)
|
||||
amdgpu_bo_unreserve(bo);
|
||||
*bo_ptr = bo;
|
||||
|
||||
trace_amdgpu_bo_create(bo);
|
||||
|
||||
/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
|
||||
if (type == ttm_bo_type_device)
|
||||
if (bp->type == ttm_bo_type_device)
|
||||
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
|
||||
return 0;
|
||||
|
||||
fail_unreserve:
|
||||
if (!resv)
|
||||
if (!bp->resv)
|
||||
ww_mutex_unlock(&bo->tbo.resv->lock);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
@ -472,16 +472,22 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
|
||||
unsigned long size, int byte_align,
|
||||
struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_bo_param bp;
|
||||
int r;
|
||||
|
||||
if (bo->shadow)
|
||||
return 0;
|
||||
|
||||
r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
|
||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
|
||||
AMDGPU_GEM_CREATE_SHADOW,
|
||||
ttm_bo_type_kernel,
|
||||
bo->tbo.resv, &bo->shadow);
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = size;
|
||||
bp.byte_align = byte_align;
|
||||
bp.domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
|
||||
AMDGPU_GEM_CREATE_SHADOW;
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = bo->tbo.resv;
|
||||
|
||||
r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
|
||||
if (!r) {
|
||||
bo->shadow->parent = amdgpu_bo_ref(bo);
|
||||
mutex_lock(&adev->shadow_list_lock);
|
||||
@ -492,28 +498,26 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
|
||||
int byte_align, u32 domain,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct reservation_object *resv,
|
||||
int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_param *bp,
|
||||
struct amdgpu_bo **bo_ptr)
|
||||
{
|
||||
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
|
||||
u64 flags = bp->flags;
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_do_create(adev, size, byte_align, domain,
|
||||
parent_flags, type, resv, bo_ptr);
|
||||
bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
|
||||
r = amdgpu_bo_do_create(adev, bp, bo_ptr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
|
||||
if (!resv)
|
||||
if (!bp->resv)
|
||||
WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
|
||||
NULL));
|
||||
|
||||
r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
|
||||
r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr));
|
||||
|
||||
if (!resv)
|
||||
if (!bp->resv)
|
||||
reservation_object_unlock((*bo_ptr)->tbo.resv);
|
||||
|
||||
if (r)
|
||||
@ -689,8 +693,21 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
return -EINVAL;
|
||||
|
||||
/* A shared bo cannot be migrated to VRAM */
|
||||
if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
|
||||
if (bo->prime_shared_count) {
|
||||
if (domain & AMDGPU_GEM_DOMAIN_GTT)
|
||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
|
||||
* See function amdgpu_display_supported_domains()
|
||||
*/
|
||||
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
|
||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
|
||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
}
|
||||
|
||||
if (bo->pin_count) {
|
||||
uint32_t mem_type = bo->tbo.mem.mem_type;
|
||||
@ -838,6 +855,13 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
|
||||
return amdgpu_ttm_init(adev);
|
||||
}
|
||||
|
||||
int amdgpu_bo_late_init(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_ttm_late_init(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_bo_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_ttm_fini(adev);
|
||||
|
@ -33,6 +33,16 @@
|
||||
|
||||
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
|
||||
|
||||
struct amdgpu_bo_param {
|
||||
unsigned long size;
|
||||
int byte_align;
|
||||
u32 domain;
|
||||
u32 preferred_domain;
|
||||
u64 flags;
|
||||
enum ttm_bo_type type;
|
||||
struct reservation_object *resv;
|
||||
};
|
||||
|
||||
/* bo virtual addresses in a vm */
|
||||
struct amdgpu_bo_va_mapping {
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
@ -195,6 +205,27 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
|
||||
*/
|
||||
static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
|
||||
struct drm_mm_node *node = bo->tbo.mem.mm_node;
|
||||
unsigned long pages_left;
|
||||
|
||||
if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
|
||||
return false;
|
||||
|
||||
for (pages_left = bo->tbo.mem.num_pages; pages_left;
|
||||
pages_left -= node->size, node++)
|
||||
if (node->start < fpfn)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
|
||||
*/
|
||||
@ -203,10 +234,8 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
|
||||
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
|
||||
}
|
||||
|
||||
int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
|
||||
int byte_align, u32 domain,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct reservation_object *resv,
|
||||
int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_param *bp,
|
||||
struct amdgpu_bo **bo_ptr);
|
||||
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
||||
unsigned long size, int align,
|
||||
@ -230,6 +259,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
int amdgpu_bo_unpin(struct amdgpu_bo *bo);
|
||||
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
|
||||
int amdgpu_bo_init(struct amdgpu_device *adev);
|
||||
int amdgpu_bo_late_init(struct amdgpu_device *adev);
|
||||
void amdgpu_bo_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
|
||||
struct vm_area_struct *vma);
|
||||
|
@ -77,6 +77,37 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: power_dpm_state
|
||||
*
|
||||
* This is a legacy interface and is only provided for backwards compatibility.
|
||||
* The amdgpu driver provides a sysfs API for adjusting certain power
|
||||
* related parameters. The file power_dpm_state is used for this.
|
||||
* It accepts the following arguments:
|
||||
* - battery
|
||||
* - balanced
|
||||
* - performance
|
||||
*
|
||||
* battery
|
||||
*
|
||||
* On older GPUs, the vbios provided a special power state for battery
|
||||
* operation. Selecting battery switched to this state. This is no
|
||||
* longer provided on newer GPUs so the option does nothing in that case.
|
||||
*
|
||||
* balanced
|
||||
*
|
||||
* On older GPUs, the vbios provided a special power state for balanced
|
||||
* operation. Selecting balanced switched to this state. This is no
|
||||
* longer provided on newer GPUs so the option does nothing in that case.
|
||||
*
|
||||
* performance
|
||||
*
|
||||
* On older GPUs, the vbios provided a special power state for performance
|
||||
* operation. Selecting performance switched to this state. This is no
|
||||
* longer provided on newer GPUs so the option does nothing in that case.
|
||||
*
|
||||
*/
|
||||
|
||||
static ssize_t amdgpu_get_dpm_state(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
@ -131,6 +162,59 @@ fail:
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* DOC: power_dpm_force_performance_level
|
||||
*
|
||||
* The amdgpu driver provides a sysfs API for adjusting certain power
|
||||
* related parameters. The file power_dpm_force_performance_level is
|
||||
* used for this. It accepts the following arguments:
|
||||
* - auto
|
||||
* - low
|
||||
* - high
|
||||
* - manual
|
||||
* - GPU fan
|
||||
* - profile_standard
|
||||
* - profile_min_sclk
|
||||
* - profile_min_mclk
|
||||
* - profile_peak
|
||||
*
|
||||
* auto
|
||||
*
|
||||
* When auto is selected, the driver will attempt to dynamically select
|
||||
* the optimal power profile for current conditions in the driver.
|
||||
*
|
||||
* low
|
||||
*
|
||||
* When low is selected, the clocks are forced to the lowest power state.
|
||||
*
|
||||
* high
|
||||
*
|
||||
* When high is selected, the clocks are forced to the highest power state.
|
||||
*
|
||||
* manual
|
||||
*
|
||||
* When manual is selected, the user can manually adjust which power states
|
||||
* are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
|
||||
* and pp_dpm_pcie files and adjust the power state transition heuristics
|
||||
* via the pp_power_profile_mode sysfs file.
|
||||
*
|
||||
* profile_standard
|
||||
* profile_min_sclk
|
||||
* profile_min_mclk
|
||||
* profile_peak
|
||||
*
|
||||
* When the profiling modes are selected, clock and power gating are
|
||||
* disabled and the clocks are set for different profiling cases. This
|
||||
* mode is recommended for profiling specific work loads where you do
|
||||
* not want clock or power gating for clock fluctuation to interfere
|
||||
* with your results. profile_standard sets the clocks to a fixed clock
|
||||
* level which varies from asic to asic. profile_min_sclk forces the sclk
|
||||
* to the lowest level. profile_min_mclk forces the mclk to the lowest level.
|
||||
* profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
|
||||
*
|
||||
*/
|
||||
|
||||
static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
@ -324,6 +408,17 @@ fail:
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: pp_table
|
||||
*
|
||||
* The amdgpu driver provides a sysfs API for uploading new powerplay
|
||||
* tables. The file pp_table is used for this. Reading the file
|
||||
* will dump the current power play table. Writing to the file
|
||||
* will attempt to upload a new powerplay table and re-initialize
|
||||
* powerplay using that new table.
|
||||
*
|
||||
*/
|
||||
|
||||
static ssize_t amdgpu_get_pp_table(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
@ -360,6 +455,29 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: pp_od_clk_voltage
|
||||
*
|
||||
* The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
|
||||
* in each power level within a power state. The pp_od_clk_voltage is used for
|
||||
* this.
|
||||
*
|
||||
* Reading the file will display:
|
||||
* - a list of engine clock levels and voltages labeled OD_SCLK
|
||||
* - a list of memory clock levels and voltages labeled OD_MCLK
|
||||
* - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
|
||||
*
|
||||
* To manually adjust these settings, first select manual using
|
||||
* power_dpm_force_performance_level. Enter a new value for each
|
||||
* level by writing a string that contains "s/m level clock voltage" to
|
||||
* the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
|
||||
* at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
|
||||
* 810 mV. When you have edited all of the states as needed, write
|
||||
* "c" (commit) to the file to commit your changes. If you want to reset to the
|
||||
* default power levels, write "r" (reset) to the file to reset them.
|
||||
*
|
||||
*/
|
||||
|
||||
static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
@ -437,6 +555,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
|
||||
if (adev->powerplay.pp_funcs->print_clock_levels) {
|
||||
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
|
||||
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
|
||||
size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
|
||||
return size;
|
||||
} else {
|
||||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
@ -444,6 +563,23 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
|
||||
*
|
||||
* The amdgpu driver provides a sysfs API for adjusting what power levels
|
||||
* are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
|
||||
* and pp_dpm_pcie are used for this.
|
||||
*
|
||||
* Reading back the files will show you the available power levels within
|
||||
* the power state and the clock information for those levels.
|
||||
*
|
||||
* To manually adjust these states, first select manual using
|
||||
* power_dpm_force_performance_level.
|
||||
* Secondly,Enter a new value for each level by inputing a string that
|
||||
* contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
|
||||
* E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
|
||||
*/
|
||||
|
||||
static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
@ -466,14 +602,17 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int ret;
|
||||
long level;
|
||||
uint32_t i, mask = 0;
|
||||
char sub_str[2];
|
||||
uint32_t mask = 0;
|
||||
char *sub_str = NULL;
|
||||
char *tmp;
|
||||
char buf_cpy[count];
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
|
||||
for (i = 0; i < strlen(buf); i++) {
|
||||
if (*(buf + i) == '\n')
|
||||
continue;
|
||||
sub_str[0] = *(buf + i);
|
||||
sub_str[1] = '\0';
|
||||
memcpy(buf_cpy, buf, count+1);
|
||||
tmp = buf_cpy;
|
||||
while (tmp[0]) {
|
||||
sub_str = strsep(&tmp, delimiter);
|
||||
if (strlen(sub_str)) {
|
||||
ret = kstrtol(sub_str, 0, &level);
|
||||
|
||||
if (ret) {
|
||||
@ -481,8 +620,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
|
||||
goto fail;
|
||||
}
|
||||
mask |= 1 << level;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
|
||||
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
|
||||
|
||||
@ -512,14 +652,17 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int ret;
|
||||
long level;
|
||||
uint32_t i, mask = 0;
|
||||
char sub_str[2];
|
||||
uint32_t mask = 0;
|
||||
char *sub_str = NULL;
|
||||
char *tmp;
|
||||
char buf_cpy[count];
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
|
||||
for (i = 0; i < strlen(buf); i++) {
|
||||
if (*(buf + i) == '\n')
|
||||
continue;
|
||||
sub_str[0] = *(buf + i);
|
||||
sub_str[1] = '\0';
|
||||
memcpy(buf_cpy, buf, count+1);
|
||||
tmp = buf_cpy;
|
||||
while (tmp[0]) {
|
||||
sub_str = strsep(&tmp, delimiter);
|
||||
if (strlen(sub_str)) {
|
||||
ret = kstrtol(sub_str, 0, &level);
|
||||
|
||||
if (ret) {
|
||||
@ -527,6 +670,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
|
||||
goto fail;
|
||||
}
|
||||
mask |= 1 << level;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
|
||||
@ -557,14 +702,18 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int ret;
|
||||
long level;
|
||||
uint32_t i, mask = 0;
|
||||
char sub_str[2];
|
||||
uint32_t mask = 0;
|
||||
char *sub_str = NULL;
|
||||
char *tmp;
|
||||
char buf_cpy[count];
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
|
||||
for (i = 0; i < strlen(buf); i++) {
|
||||
if (*(buf + i) == '\n')
|
||||
continue;
|
||||
sub_str[0] = *(buf + i);
|
||||
sub_str[1] = '\0';
|
||||
memcpy(buf_cpy, buf, count+1);
|
||||
tmp = buf_cpy;
|
||||
|
||||
while (tmp[0]) {
|
||||
sub_str = strsep(&tmp, delimiter);
|
||||
if (strlen(sub_str)) {
|
||||
ret = kstrtol(sub_str, 0, &level);
|
||||
|
||||
if (ret) {
|
||||
@ -572,6 +721,8 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
|
||||
goto fail;
|
||||
}
|
||||
mask |= 1 << level;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
|
||||
@ -668,6 +819,26 @@ fail:
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: pp_power_profile_mode
|
||||
*
|
||||
* The amdgpu driver provides a sysfs API for adjusting the heuristics
|
||||
* related to switching between power levels in a power state. The file
|
||||
* pp_power_profile_mode is used for this.
|
||||
*
|
||||
* Reading this file outputs a list of all of the predefined power profiles
|
||||
* and the relevant heuristics settings for that profile.
|
||||
*
|
||||
* To select a profile or create a custom profile, first select manual using
|
||||
* power_dpm_force_performance_level. Writing the number of a predefined
|
||||
* profile to pp_power_profile_mode will enable those heuristics. To
|
||||
* create a custom set of heuristics, write a string of numbers to the file
|
||||
* starting with the number of the custom profile along with a setting
|
||||
* for each heuristic parameter. Due to differences across asic families
|
||||
* the heuristic parameters vary from family to family.
|
||||
*
|
||||
*/
|
||||
|
||||
static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
@ -1020,8 +1191,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
struct drm_device *ddev = adev->ddev;
|
||||
struct pp_gpu_power query = {0};
|
||||
int r, size = sizeof(query);
|
||||
u32 query = 0;
|
||||
int r, size = sizeof(u32);
|
||||
unsigned uw;
|
||||
|
||||
/* Can't get power when the card is off */
|
||||
@ -1041,7 +1212,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
|
||||
return r;
|
||||
|
||||
/* convert to microwatts */
|
||||
uw = (query.average_gpu_power >> 8) * 1000000;
|
||||
uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", uw);
|
||||
}
|
||||
@ -1109,6 +1280,46 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* DOC: hwmon
|
||||
*
|
||||
* The amdgpu driver exposes the following sensor interfaces:
|
||||
* - GPU temperature (via the on-die sensor)
|
||||
* - GPU voltage
|
||||
* - Northbridge voltage (APUs only)
|
||||
* - GPU power
|
||||
* - GPU fan
|
||||
*
|
||||
* hwmon interfaces for GPU temperature:
|
||||
* - temp1_input: the on die GPU temperature in millidegrees Celsius
|
||||
* - temp1_crit: temperature critical max value in millidegrees Celsius
|
||||
* - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
|
||||
*
|
||||
* hwmon interfaces for GPU voltage:
|
||||
* - in0_input: the voltage on the GPU in millivolts
|
||||
* - in1_input: the voltage on the Northbridge in millivolts
|
||||
*
|
||||
* hwmon interfaces for GPU power:
|
||||
* - power1_average: average power used by the GPU in microWatts
|
||||
* - power1_cap_min: minimum cap supported in microWatts
|
||||
* - power1_cap_max: maximum cap supported in microWatts
|
||||
* - power1_cap: selected power cap in microWatts
|
||||
*
|
||||
* hwmon interfaces for GPU fan:
|
||||
* - pwm1: pulse width modulation fan level (0-255)
|
||||
* - pwm1_enable: pulse width modulation fan control method
|
||||
* 0: no fan speed control
|
||||
* 1: manual fan speed control using pwm interface
|
||||
* 2: automatic fan speed control
|
||||
* - pwm1_min: pulse width modulation fan control minimum level (0)
|
||||
* - pwm1_max: pulse width modulation fan control maximum level (255)
|
||||
* - fan1_input: fan speed in RPM
|
||||
*
|
||||
* You can use hwmon tools like sensors to view this information on your system.
|
||||
*
|
||||
*/
|
||||
|
||||
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
|
||||
@ -1153,19 +1364,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
umode_t effective_mode = attr->mode;
|
||||
|
||||
/* handle non-powerplay limitations */
|
||||
if (!adev->powerplay.pp_handle) {
|
||||
|
||||
/* Skip fan attributes if fan is not present */
|
||||
if (adev->pm.no_fan &&
|
||||
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
|
||||
if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
|
||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_input.dev_attr.attr))
|
||||
return 0;
|
||||
/* requires powerplay */
|
||||
if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Skip limit attributes if DPM is not enabled */
|
||||
if (!adev->pm.dpm_enabled &&
|
||||
@ -1658,9 +1864,6 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
|
||||
|
||||
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_device *ddev = adev->ddev;
|
||||
struct drm_crtc *crtc;
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
int i = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
@ -1676,21 +1879,25 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
if (!amdgpu_device_has_dc_support(adev)) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_dpm_get_active_displays(adev);
|
||||
adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
|
||||
adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
|
||||
adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
||||
/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
|
||||
if (adev->pm.pm_display_cfg.vrefresh > 120)
|
||||
adev->pm.pm_display_cfg.min_vblank_time = 0;
|
||||
if (adev->powerplay.pp_funcs->display_configuration_change)
|
||||
adev->powerplay.pp_funcs->display_configuration_change(
|
||||
adev->powerplay.pp_handle,
|
||||
&adev->pm.pm_display_cfg);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
|
||||
} else {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm.new_active_crtcs = 0;
|
||||
adev->pm.dpm.new_active_crtc_count = 0;
|
||||
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
|
||||
list_for_each_entry(crtc,
|
||||
&ddev->mode_config.crtc_list, head) {
|
||||
amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
if (amdgpu_crtc->enabled) {
|
||||
adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
|
||||
adev->pm.dpm.new_active_crtc_count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
amdgpu_dpm_get_active_displays(adev);
|
||||
/* update battery/ac status */
|
||||
if (power_supply_is_system_supplied() > 0)
|
||||
adev->pm.dpm.ac_power = true;
|
||||
@ -1711,7 +1918,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
||||
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t value;
|
||||
struct pp_gpu_power query = {0};
|
||||
uint32_t query = 0;
|
||||
int size;
|
||||
|
||||
/* sanity check PP is enabled */
|
||||
@ -1734,17 +1941,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
|
||||
seq_printf(m, "\t%u mV (VDDGFX)\n", value);
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
|
||||
seq_printf(m, "\t%u mV (VDDNB)\n", value);
|
||||
size = sizeof(query);
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) {
|
||||
seq_printf(m, "\t%u.%u W (VDDC)\n", query.vddc_power >> 8,
|
||||
query.vddc_power & 0xff);
|
||||
seq_printf(m, "\t%u.%u W (VDDCI)\n", query.vddci_power >> 8,
|
||||
query.vddci_power & 0xff);
|
||||
seq_printf(m, "\t%u.%u W (max GPU)\n", query.max_gpu_power >> 8,
|
||||
query.max_gpu_power & 0xff);
|
||||
seq_printf(m, "\t%u.%u W (average GPU)\n", query.average_gpu_power >> 8,
|
||||
query.average_gpu_power & 0xff);
|
||||
}
|
||||
size = sizeof(uint32_t);
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
|
||||
seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
|
||||
size = sizeof(value);
|
||||
seq_printf(m, "\n");
|
||||
|
||||
|
@ -102,12 +102,18 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct reservation_object *resv = attach->dmabuf->resv;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_bo *bo;
|
||||
struct amdgpu_bo_param bp;
|
||||
int ret;
|
||||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = attach->dmabuf->size;
|
||||
bp.byte_align = PAGE_SIZE;
|
||||
bp.domain = AMDGPU_GEM_DOMAIN_CPU;
|
||||
bp.flags = 0;
|
||||
bp.type = ttm_bo_type_sg;
|
||||
bp.resv = resv;
|
||||
ww_mutex_lock(&resv->lock, NULL);
|
||||
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
|
||||
resv, &bo);
|
||||
ret = amdgpu_bo_create(adev, &bp, &bo);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
@ -209,7 +215,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct ttm_operation_ctx ctx = { true, false };
|
||||
u32 domain = amdgpu_display_framebuffer_domains(adev);
|
||||
u32 domain = amdgpu_display_supported_domains(adev);
|
||||
int ret;
|
||||
bool reads = (direction == DMA_BIDIRECTIONAL ||
|
||||
direction == DMA_FROM_DEVICE);
|
||||
|
@ -459,6 +459,26 @@ void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring)
|
||||
spin_unlock(&adev->ring_lru_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @reg0: register to write
|
||||
* @reg1: register to wait on
|
||||
* @ref: reference value to write/wait on
|
||||
* @mask: mask to wait on
|
||||
*
|
||||
* Helper for rings that don't support write and wait in a
|
||||
* single oneshot packet.
|
||||
*/
|
||||
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
|
||||
uint32_t reg0, uint32_t reg1,
|
||||
uint32_t ref, uint32_t mask)
|
||||
{
|
||||
amdgpu_ring_emit_wreg(ring, reg0, ref);
|
||||
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
*/
|
||||
|
@ -42,6 +42,7 @@
|
||||
|
||||
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
|
||||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
|
||||
|
||||
enum amdgpu_ring_type {
|
||||
AMDGPU_RING_TYPE_GFX,
|
||||
@ -90,7 +91,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
||||
unsigned irq_type);
|
||||
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
|
||||
unsigned flags);
|
||||
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
|
||||
void amdgpu_fence_process(struct amdgpu_ring *ring);
|
||||
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
|
||||
@ -154,6 +156,9 @@ struct amdgpu_ring_funcs {
|
||||
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
|
||||
void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
|
||||
uint32_t val, uint32_t mask);
|
||||
void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
|
||||
uint32_t reg0, uint32_t reg1,
|
||||
uint32_t ref, uint32_t mask);
|
||||
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
|
||||
/* priority functions */
|
||||
void (*set_priority) (struct amdgpu_ring *ring,
|
||||
@ -228,6 +233,10 @@ int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
|
||||
int *blacklist, int num_blacklist,
|
||||
bool lru_pipe_order, struct amdgpu_ring **ring);
|
||||
void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
|
||||
uint32_t reg0, uint32_t val0,
|
||||
uint32_t reg1, uint32_t val1);
|
||||
|
||||
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
int i = 0;
|
||||
|
@ -33,6 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
struct amdgpu_bo *vram_obj = NULL;
|
||||
struct amdgpu_bo **gtt_obj = NULL;
|
||||
struct amdgpu_bo_param bp;
|
||||
uint64_t gart_addr, vram_addr;
|
||||
unsigned n, size;
|
||||
int i, r;
|
||||
@ -58,9 +59,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
||||
r = 1;
|
||||
goto out_cleanup;
|
||||
}
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = size;
|
||||
bp.byte_align = PAGE_SIZE;
|
||||
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
bp.flags = 0;
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = NULL;
|
||||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0,
|
||||
ttm_bo_type_kernel, NULL, &vram_obj);
|
||||
r = amdgpu_bo_create(adev, &bp, &vram_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create VRAM object\n");
|
||||
goto out_cleanup;
|
||||
@ -79,9 +86,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
||||
void **vram_start, **vram_end;
|
||||
struct dma_fence *fence = NULL;
|
||||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0,
|
||||
ttm_bo_type_kernel, NULL, gtt_obj + i);
|
||||
bp.domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
r = amdgpu_bo_create(adev, &bp, gtt_obj + i);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create GTT object %d\n", i);
|
||||
goto out_lclean;
|
||||
|
@ -275,7 +275,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->bo = bo_va->base.bo;
|
||||
__entry->bo = bo_va ? bo_va->base.bo : NULL;
|
||||
__entry->start = mapping->start;
|
||||
__entry->last = mapping->last;
|
||||
__entry->offset = mapping->offset;
|
||||
|
@ -111,7 +111,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
|
||||
ring = adev->mman.buffer_funcs_ring;
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
rq, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
|
||||
goto error_entity;
|
||||
@ -223,20 +223,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
||||
if (!adev->mman.buffer_funcs_enabled) {
|
||||
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
|
||||
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
|
||||
unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
|
||||
struct drm_mm_node *node = bo->mem.mm_node;
|
||||
unsigned long pages_left;
|
||||
|
||||
for (pages_left = bo->mem.num_pages;
|
||||
pages_left;
|
||||
pages_left -= node->size, node++) {
|
||||
if (node->start < fpfn)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pages_left)
|
||||
goto gtt;
|
||||
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
|
||||
amdgpu_bo_in_cpu_visible_vram(abo)) {
|
||||
|
||||
/* Try evicting to the CPU inaccessible part of VRAM
|
||||
* first, but only set GTT as busy placement, so this
|
||||
@ -245,12 +233,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
||||
*/
|
||||
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
|
||||
AMDGPU_GEM_DOMAIN_GTT);
|
||||
abo->placements[0].fpfn = fpfn;
|
||||
abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
|
||||
abo->placements[0].lpfn = 0;
|
||||
abo->placement.busy_placement = &abo->placements[1];
|
||||
abo->placement.num_busy_placement = 1;
|
||||
} else {
|
||||
gtt:
|
||||
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
}
|
||||
break;
|
||||
@ -856,6 +843,45 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
|
||||
sg_free_table(ttm->sg);
|
||||
}
|
||||
|
||||
int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
|
||||
struct ttm_buffer_object *tbo,
|
||||
uint64_t flags)
|
||||
{
|
||||
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
|
||||
struct ttm_tt *ttm = tbo->ttm;
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
int r;
|
||||
|
||||
if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
|
||||
uint64_t page_idx = 1;
|
||||
|
||||
r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
|
||||
ttm->pages, gtt->ttm.dma_address, flags);
|
||||
if (r)
|
||||
goto gart_bind_fail;
|
||||
|
||||
/* Patch mtype of the second part BO */
|
||||
flags &= ~AMDGPU_PTE_MTYPE_MASK;
|
||||
flags |= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC);
|
||||
|
||||
r = amdgpu_gart_bind(adev,
|
||||
gtt->offset + (page_idx << PAGE_SHIFT),
|
||||
ttm->num_pages - page_idx,
|
||||
&ttm->pages[page_idx],
|
||||
&(gtt->ttm.dma_address[page_idx]), flags);
|
||||
} else {
|
||||
r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
|
||||
ttm->pages, gtt->ttm.dma_address, flags);
|
||||
}
|
||||
|
||||
gart_bind_fail:
|
||||
if (r)
|
||||
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
|
||||
ttm->num_pages, gtt->offset);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
@ -929,8 +955,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
||||
|
||||
flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
|
||||
gtt->offset = (u64)tmp.start << PAGE_SHIFT;
|
||||
r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
|
||||
bo->ttm->pages, gtt->ttm.dma_address, flags);
|
||||
r = amdgpu_ttm_gart_bind(adev, bo, flags);
|
||||
if (unlikely(r)) {
|
||||
ttm_bo_mem_put(bo, &tmp);
|
||||
return r;
|
||||
@ -947,19 +972,15 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
||||
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
|
||||
uint64_t flags;
|
||||
int r;
|
||||
|
||||
if (!gtt)
|
||||
if (!tbo->ttm)
|
||||
return 0;
|
||||
|
||||
flags = amdgpu_ttm_tt_pte_flags(adev, >t->ttm.ttm, &tbo->mem);
|
||||
r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
|
||||
gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
|
||||
if (r)
|
||||
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
|
||||
gtt->ttm.ttm.num_pages, gtt->offset);
|
||||
flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
|
||||
r = amdgpu_ttm_gart_bind(adev, tbo, flags);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1349,6 +1370,7 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
|
||||
static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct amdgpu_bo_param bp;
|
||||
int r = 0;
|
||||
int i;
|
||||
u64 vram_size = adev->gmc.visible_vram_size;
|
||||
@ -1356,17 +1378,21 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
|
||||
u64 size = adev->fw_vram_usage.size;
|
||||
struct amdgpu_bo *bo;
|
||||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = adev->fw_vram_usage.size;
|
||||
bp.byte_align = PAGE_SIZE;
|
||||
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = NULL;
|
||||
adev->fw_vram_usage.va = NULL;
|
||||
adev->fw_vram_usage.reserved_bo = NULL;
|
||||
|
||||
if (adev->fw_vram_usage.size > 0 &&
|
||||
adev->fw_vram_usage.size <= vram_size) {
|
||||
|
||||
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
ttm_bo_type_kernel, NULL,
|
||||
r = amdgpu_bo_create(adev, &bp,
|
||||
&adev->fw_vram_usage.reserved_bo);
|
||||
if (r)
|
||||
goto error_create;
|
||||
@ -1474,12 +1500,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->gmc.stolen_size) {
|
||||
r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->stolen_vga_memory,
|
||||
NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
|
||||
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
|
||||
|
||||
@ -1548,13 +1576,17 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_ttm_late_init(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
|
||||
}
|
||||
|
||||
void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (!adev->mman.initialized)
|
||||
return;
|
||||
|
||||
amdgpu_ttm_debugfs_fini(adev);
|
||||
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
|
||||
amdgpu_ttm_fw_reserve_vram_fini(adev);
|
||||
if (adev->mman.aper_base_kaddr)
|
||||
iounmap(adev->mman.aper_base_kaddr);
|
||||
|
@ -77,6 +77,7 @@ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
|
||||
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
|
||||
|
||||
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_late_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
|
@ -161,8 +161,38 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
|
||||
DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
|
||||
DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
|
||||
DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
|
||||
if (version_minor == 1) {
|
||||
const struct rlc_firmware_header_v2_1 *v2_1 =
|
||||
container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
|
||||
DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
|
||||
le32_to_cpu(v2_1->reg_list_format_direct_reg_list_length));
|
||||
DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_cntl_ucode_ver));
|
||||
DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_cntl_feature_ver));
|
||||
DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_cntl_size_bytes));
|
||||
DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_cntl_offset_bytes));
|
||||
DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_gpm_ucode_ver));
|
||||
DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_gpm_feature_ver));
|
||||
DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_gpm_size_bytes));
|
||||
DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_gpm_offset_bytes));
|
||||
DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_srm_ucode_ver));
|
||||
DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_srm_feature_ver));
|
||||
DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_srm_size_bytes));
|
||||
DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_srm_offset_bytes));
|
||||
}
|
||||
} else {
|
||||
DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
|
||||
}
|
||||
@ -265,6 +295,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
if (!load_type)
|
||||
return AMDGPU_FW_LOAD_DIRECT;
|
||||
else
|
||||
@ -307,7 +338,10 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
|
||||
(ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
|
||||
ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 &&
|
||||
ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT &&
|
||||
ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT)) {
|
||||
ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT &&
|
||||
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
|
||||
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
|
||||
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
|
||||
ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
|
||||
|
||||
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
|
||||
@ -329,6 +363,18 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
|
||||
le32_to_cpu(header->ucode_array_offset_bytes) +
|
||||
le32_to_cpu(cp_hdr->jt_offset) * 4),
|
||||
ucode->ucode_size);
|
||||
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) {
|
||||
ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
|
||||
memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl,
|
||||
ucode->ucode_size);
|
||||
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) {
|
||||
ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
|
||||
memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm,
|
||||
ucode->ucode_size);
|
||||
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
|
||||
ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
|
||||
memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
|
||||
ucode->ucode_size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -98,6 +98,24 @@ struct rlc_firmware_header_v2_0 {
|
||||
uint32_t reg_list_separate_array_offset_bytes; /* payload offset from the start of the header */
|
||||
};
|
||||
|
||||
/* version_major=2, version_minor=1 */
|
||||
struct rlc_firmware_header_v2_1 {
|
||||
struct rlc_firmware_header_v2_0 v2_0;
|
||||
uint32_t reg_list_format_direct_reg_list_length; /* length of direct reg list format array */
|
||||
uint32_t save_restore_list_cntl_ucode_ver;
|
||||
uint32_t save_restore_list_cntl_feature_ver;
|
||||
uint32_t save_restore_list_cntl_size_bytes;
|
||||
uint32_t save_restore_list_cntl_offset_bytes;
|
||||
uint32_t save_restore_list_gpm_ucode_ver;
|
||||
uint32_t save_restore_list_gpm_feature_ver;
|
||||
uint32_t save_restore_list_gpm_size_bytes;
|
||||
uint32_t save_restore_list_gpm_offset_bytes;
|
||||
uint32_t save_restore_list_srm_ucode_ver;
|
||||
uint32_t save_restore_list_srm_feature_ver;
|
||||
uint32_t save_restore_list_srm_size_bytes;
|
||||
uint32_t save_restore_list_srm_offset_bytes;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct sdma_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
@ -148,6 +166,7 @@ union amdgpu_firmware_header {
|
||||
struct gfx_firmware_header_v1_0 gfx;
|
||||
struct rlc_firmware_header_v1_0 rlc;
|
||||
struct rlc_firmware_header_v2_0 rlc_v2_0;
|
||||
struct rlc_firmware_header_v2_1 rlc_v2_1;
|
||||
struct sdma_firmware_header_v1_0 sdma;
|
||||
struct sdma_firmware_header_v1_1 sdma_v1_1;
|
||||
struct gpu_info_firmware_header_v1_0 gpu_info;
|
||||
@ -168,6 +187,9 @@ enum AMDGPU_UCODE_ID {
|
||||
AMDGPU_UCODE_ID_CP_MEC2,
|
||||
AMDGPU_UCODE_ID_CP_MEC2_JT,
|
||||
AMDGPU_UCODE_ID_RLC_G,
|
||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
|
||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
|
||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
|
||||
AMDGPU_UCODE_ID_STORAGE,
|
||||
AMDGPU_UCODE_ID_SMC,
|
||||
AMDGPU_UCODE_ID_UVD,
|
||||
|
@ -66,6 +66,7 @@
|
||||
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
|
||||
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
|
||||
#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
|
||||
#define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin"
|
||||
|
||||
#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
|
||||
#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
|
||||
@ -109,6 +110,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
|
||||
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
|
||||
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
|
||||
MODULE_FIRMWARE(FIRMWARE_POLARIS12);
|
||||
MODULE_FIRMWARE(FIRMWARE_VEGAM);
|
||||
|
||||
MODULE_FIRMWARE(FIRMWARE_VEGA10);
|
||||
MODULE_FIRMWARE(FIRMWARE_VEGA12);
|
||||
@ -172,6 +174,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
||||
case CHIP_VEGA12:
|
||||
fw_name = FIRMWARE_VEGA12;
|
||||
break;
|
||||
case CHIP_VEGAM:
|
||||
fw_name = FIRMWARE_VEGAM;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -237,7 +242,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
||||
ring = &adev->uvd.ring;
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
rq, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up UVD run queue.\n");
|
||||
return r;
|
||||
|
@ -53,6 +53,7 @@
|
||||
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
|
||||
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
|
||||
#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
|
||||
#define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin"
|
||||
|
||||
#define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
|
||||
#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
|
||||
@ -71,6 +72,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
|
||||
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
|
||||
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
|
||||
MODULE_FIRMWARE(FIRMWARE_POLARIS12);
|
||||
MODULE_FIRMWARE(FIRMWARE_VEGAM);
|
||||
|
||||
MODULE_FIRMWARE(FIRMWARE_VEGA10);
|
||||
MODULE_FIRMWARE(FIRMWARE_VEGA12);
|
||||
@ -132,6 +134,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
||||
case CHIP_POLARIS12:
|
||||
fw_name = FIRMWARE_POLARIS12;
|
||||
break;
|
||||
case CHIP_VEGAM:
|
||||
fw_name = FIRMWARE_VEGAM;
|
||||
break;
|
||||
case CHIP_VEGA10:
|
||||
fw_name = FIRMWARE_VEGA10;
|
||||
break;
|
||||
@ -181,7 +186,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
||||
ring = &adev->vce.ring[0];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
rq, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up VCE run queue.\n");
|
||||
return r;
|
||||
@ -755,6 +760,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
|
||||
if (r)
|
||||
goto out;
|
||||
break;
|
||||
|
||||
case 0x0500000d: /* MV buffer */
|
||||
r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
|
||||
idx + 2, 0, 0);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8,
|
||||
idx + 7, 0, 0);
|
||||
if (r)
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
|
||||
idx += len / 4;
|
||||
@ -860,6 +877,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
|
||||
goto out;
|
||||
break;
|
||||
|
||||
case 0x0500000d: /* MV buffer */
|
||||
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3,
|
||||
idx + 2, *size, 0);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8,
|
||||
idx + 7, *size / 12, 0);
|
||||
if (r)
|
||||
goto out;
|
||||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
|
||||
r = -EINVAL;
|
||||
|
@ -105,7 +105,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
ring = &adev->vcn.ring_dec;
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
rq, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up VCN dec run queue.\n");
|
||||
return r;
|
||||
@ -114,7 +114,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
ring = &adev->vcn.ring_enc[0];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
rq, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up VCN enc run queue.\n");
|
||||
return r;
|
||||
|
@ -94,6 +94,36 @@ struct amdgpu_prt_cb {
|
||||
struct dma_fence_cb cb;
|
||||
};
|
||||
|
||||
static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo)
|
||||
{
|
||||
base->vm = vm;
|
||||
base->bo = bo;
|
||||
INIT_LIST_HEAD(&base->bo_list);
|
||||
INIT_LIST_HEAD(&base->vm_status);
|
||||
|
||||
if (!bo)
|
||||
return;
|
||||
list_add_tail(&base->bo_list, &bo->va);
|
||||
|
||||
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
|
||||
return;
|
||||
|
||||
if (bo->preferred_domains &
|
||||
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
|
||||
return;
|
||||
|
||||
/*
|
||||
* we checked all the prerequisites, but it looks like this per vm bo
|
||||
* is currently evicted. add the bo to the evicted list to make sure it
|
||||
* is validated on next vm use to avoid fault.
|
||||
* */
|
||||
spin_lock(&vm->status_lock);
|
||||
list_move_tail(&base->vm_status, &vm->evicted);
|
||||
spin_unlock(&vm->status_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_level_shift - return the addr shift for each level
|
||||
*
|
||||
@ -412,11 +442,16 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *pt;
|
||||
|
||||
if (!entry->base.bo) {
|
||||
r = amdgpu_bo_create(adev,
|
||||
amdgpu_vm_bo_size(adev, level),
|
||||
AMDGPU_GPU_PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, flags,
|
||||
ttm_bo_type_kernel, resv, &pt);
|
||||
struct amdgpu_bo_param bp;
|
||||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = amdgpu_vm_bo_size(adev, level);
|
||||
bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
|
||||
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
bp.flags = flags;
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = resv;
|
||||
r = amdgpu_bo_create(adev, &bp, &pt);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -441,11 +476,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
||||
*/
|
||||
pt->parent = amdgpu_bo_ref(parent->base.bo);
|
||||
|
||||
entry->base.vm = vm;
|
||||
entry->base.bo = pt;
|
||||
list_add_tail(&entry->base.bo_list, &pt->va);
|
||||
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
|
||||
spin_lock(&vm->status_lock);
|
||||
list_add(&entry->base.vm_status, &vm->relocated);
|
||||
list_move(&entry->base.vm_status, &vm->relocated);
|
||||
spin_unlock(&vm->status_lock);
|
||||
}
|
||||
|
||||
@ -628,7 +661,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
|
||||
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
|
||||
|
||||
if (vm_flush_needed || pasid_mapping_needed) {
|
||||
r = amdgpu_fence_emit(ring, &fence);
|
||||
r = amdgpu_fence_emit(ring, &fence, 0);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -1557,6 +1590,15 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
list_del_init(&bo_va->base.vm_status);
|
||||
|
||||
/* If the BO is not in its preferred location add it back to
|
||||
* the evicted list so that it gets validated again on the
|
||||
* next command submission.
|
||||
*/
|
||||
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
|
||||
!(bo->preferred_domains &
|
||||
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
|
||||
list_add_tail(&bo_va->base.vm_status, &vm->evicted);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
list_splice_init(&bo_va->invalids, &bo_va->valids);
|
||||
@ -1827,36 +1869,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
||||
if (bo_va == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
bo_va->base.vm = vm;
|
||||
bo_va->base.bo = bo;
|
||||
INIT_LIST_HEAD(&bo_va->base.bo_list);
|
||||
INIT_LIST_HEAD(&bo_va->base.vm_status);
|
||||
amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
|
||||
|
||||
bo_va->ref_count = 1;
|
||||
INIT_LIST_HEAD(&bo_va->valids);
|
||||
INIT_LIST_HEAD(&bo_va->invalids);
|
||||
|
||||
if (!bo)
|
||||
return bo_va;
|
||||
|
||||
list_add_tail(&bo_va->base.bo_list, &bo->va);
|
||||
|
||||
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
|
||||
return bo_va;
|
||||
|
||||
if (bo->preferred_domains &
|
||||
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
|
||||
return bo_va;
|
||||
|
||||
/*
|
||||
* We checked all the prerequisites, but it looks like this per VM BO
|
||||
* is currently evicted. add the BO to the evicted list to make sure it
|
||||
* is validated on next VM use to avoid fault.
|
||||
* */
|
||||
spin_lock(&vm->status_lock);
|
||||
list_move_tail(&bo_va->base.vm_status, &vm->evicted);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
return bo_va;
|
||||
}
|
||||
|
||||
@ -2234,6 +2252,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_vm_bo_base *bo_base;
|
||||
|
||||
/* shadow bo doesn't have bo base, its validation needs its parent */
|
||||
if (bo->parent && bo->parent->shadow == bo)
|
||||
bo = bo->parent;
|
||||
|
||||
list_for_each_entry(bo_base, &bo->va, bo_list) {
|
||||
struct amdgpu_vm *vm = bo_base->vm;
|
||||
|
||||
@ -2355,6 +2377,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int vm_context, unsigned int pasid)
|
||||
{
|
||||
struct amdgpu_bo_param bp;
|
||||
struct amdgpu_bo *root;
|
||||
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
|
||||
AMDGPU_VM_PTE_COUNT(adev) * 8);
|
||||
unsigned ring_instance;
|
||||
@ -2380,7 +2404,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
ring = adev->vm_manager.vm_pte_rings[ring_instance];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
r = drm_sched_entity_init(&ring->sched, &vm->entity,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
rq, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -2409,24 +2433,28 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
flags |= AMDGPU_GEM_CREATE_SHADOW;
|
||||
|
||||
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
|
||||
r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
|
||||
ttm_bo_type_kernel, NULL, &vm->root.base.bo);
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = size;
|
||||
bp.byte_align = align;
|
||||
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
bp.flags = flags;
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = NULL;
|
||||
r = amdgpu_bo_create(adev, &bp, &root);
|
||||
if (r)
|
||||
goto error_free_sched_entity;
|
||||
|
||||
r = amdgpu_bo_reserve(vm->root.base.bo, true);
|
||||
r = amdgpu_bo_reserve(root, true);
|
||||
if (r)
|
||||
goto error_free_root;
|
||||
|
||||
r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
|
||||
r = amdgpu_vm_clear_bo(adev, vm, root,
|
||||
adev->vm_manager.root_level,
|
||||
vm->pte_support_ats);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
|
||||
vm->root.base.vm = vm;
|
||||
list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
|
||||
list_add_tail(&vm->root.base.vm_status, &vm->evicted);
|
||||
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
|
||||
amdgpu_bo_unreserve(vm->root.base.bo);
|
||||
|
||||
if (pasid) {
|
||||
|
@ -75,11 +75,12 @@ struct amdgpu_bo_list_entry;
|
||||
/* PDE Block Fragment Size for VEGA10 */
|
||||
#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
|
||||
|
||||
/* VEGA10 only */
|
||||
|
||||
/* For GFX9 */
|
||||
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
|
||||
#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
|
||||
|
||||
/* For Raven */
|
||||
#define AMDGPU_MTYPE_NC 0
|
||||
#define AMDGPU_MTYPE_CC 2
|
||||
|
||||
#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
|
||||
|
@ -5903,7 +5903,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
|
||||
pi->pcie_dpm_key_disabled = 0;
|
||||
pi->thermal_sclk_dpm_enabled = 0;
|
||||
|
||||
if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
|
||||
if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
|
||||
pi->caps_sclk_ds = true;
|
||||
else
|
||||
pi->caps_sclk_ds = false;
|
||||
@ -6255,7 +6255,7 @@ static int ci_dpm_late_init(void *handle)
|
||||
int ret;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!amdgpu_dpm)
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return 0;
|
||||
|
||||
/* init the sysfs and debugfs files late */
|
||||
|
@ -1735,6 +1735,12 @@ static void cik_invalidate_hdp(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
|
||||
static bool cik_need_full_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
/* change this when we support soft reset */
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct amdgpu_asic_funcs cik_asic_funcs =
|
||||
{
|
||||
.read_disabled_bios = &cik_read_disabled_bios,
|
||||
@ -1748,6 +1754,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
|
||||
.get_config_memsize = &cik_get_config_memsize,
|
||||
.flush_hdp = &cik_flush_hdp,
|
||||
.invalidate_hdp = &cik_invalidate_hdp,
|
||||
.need_full_reset = &cik_need_full_reset,
|
||||
};
|
||||
|
||||
static int cik_common_early_init(void *handle)
|
||||
|
@ -1823,7 +1823,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_framebuffer *amdgpu_fb;
|
||||
struct drm_framebuffer *target_fb;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *abo;
|
||||
@ -1842,18 +1841,15 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (atomic) {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
if (atomic)
|
||||
target_fb = fb;
|
||||
} else {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
else
|
||||
target_fb = crtc->primary->fb;
|
||||
}
|
||||
|
||||
/* If atomic, assume fb object is pinned & idle & fenced and
|
||||
* just update base pointers
|
||||
*/
|
||||
obj = amdgpu_fb->obj;
|
||||
obj = target_fb->obj[0];
|
||||
abo = gem_to_amdgpu_bo(obj);
|
||||
r = amdgpu_bo_reserve(abo, false);
|
||||
if (unlikely(r != 0))
|
||||
@ -2043,8 +2039,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
|
||||
|
||||
if (!atomic && fb && fb != crtc->primary->fb) {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||
abo = gem_to_amdgpu_bo(fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(abo, true);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
@ -2526,11 +2521,9 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
|
||||
dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
if (crtc->primary->fb) {
|
||||
int r;
|
||||
struct amdgpu_framebuffer *amdgpu_fb;
|
||||
struct amdgpu_bo *abo;
|
||||
|
||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||
abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(abo, true);
|
||||
if (unlikely(r))
|
||||
DRM_ERROR("failed to reserve abo before unpin\n");
|
||||
|
@ -173,6 +173,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
ARRAY_SIZE(polaris11_golden_settings_a11));
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_VEGAM:
|
||||
amdgpu_device_program_register_sequence(adev,
|
||||
polaris10_golden_settings_a11,
|
||||
ARRAY_SIZE(polaris10_golden_settings_a11));
|
||||
@ -473,6 +474,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
|
||||
num_crtc = 2;
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_VEGAM:
|
||||
num_crtc = 6;
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
@ -1445,6 +1447,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
|
||||
adev->mode_info.audio.num_pins = 7;
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_VEGAM:
|
||||
adev->mode_info.audio.num_pins = 8;
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
@ -1862,7 +1865,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_framebuffer *amdgpu_fb;
|
||||
struct drm_framebuffer *target_fb;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *abo;
|
||||
@ -1881,18 +1883,15 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (atomic) {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
if (atomic)
|
||||
target_fb = fb;
|
||||
} else {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
else
|
||||
target_fb = crtc->primary->fb;
|
||||
}
|
||||
|
||||
/* If atomic, assume fb object is pinned & idle & fenced and
|
||||
* just update base pointers
|
||||
*/
|
||||
obj = amdgpu_fb->obj;
|
||||
obj = target_fb->obj[0];
|
||||
abo = gem_to_amdgpu_bo(obj);
|
||||
r = amdgpu_bo_reserve(abo, false);
|
||||
if (unlikely(r != 0))
|
||||
@ -2082,8 +2081,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
|
||||
|
||||
if (!atomic && fb && fb != crtc->primary->fb) {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||
abo = gem_to_amdgpu_bo(fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(abo, true);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
@ -2253,7 +2251,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
|
||||
|
||||
if ((adev->asic_type == CHIP_POLARIS10) ||
|
||||
(adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS12)) {
|
||||
(adev->asic_type == CHIP_POLARIS12) ||
|
||||
(adev->asic_type == CHIP_VEGAM)) {
|
||||
struct amdgpu_encoder *amdgpu_encoder =
|
||||
to_amdgpu_encoder(amdgpu_crtc->encoder);
|
||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||
@ -2601,11 +2600,9 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
|
||||
dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
if (crtc->primary->fb) {
|
||||
int r;
|
||||
struct amdgpu_framebuffer *amdgpu_fb;
|
||||
struct amdgpu_bo *abo;
|
||||
|
||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||
abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(abo, true);
|
||||
if (unlikely(r))
|
||||
DRM_ERROR("failed to reserve abo before unpin\n");
|
||||
@ -2673,7 +2670,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
|
||||
|
||||
if ((adev->asic_type == CHIP_POLARIS10) ||
|
||||
(adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS12)) {
|
||||
(adev->asic_type == CHIP_POLARIS12) ||
|
||||
(adev->asic_type == CHIP_VEGAM)) {
|
||||
struct amdgpu_encoder *amdgpu_encoder =
|
||||
to_amdgpu_encoder(amdgpu_crtc->encoder);
|
||||
int encoder_mode =
|
||||
@ -2830,6 +2828,7 @@ static int dce_v11_0_early_init(void *handle)
|
||||
adev->mode_info.num_dig = 9;
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_VEGAM:
|
||||
adev->mode_info.num_hpd = 6;
|
||||
adev->mode_info.num_dig = 6;
|
||||
break;
|
||||
@ -2949,7 +2948,8 @@ static int dce_v11_0_hw_init(void *handle)
|
||||
amdgpu_atombios_encoder_init_dig(adev);
|
||||
if ((adev->asic_type == CHIP_POLARIS10) ||
|
||||
(adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS12)) {
|
||||
(adev->asic_type == CHIP_POLARIS12) ||
|
||||
(adev->asic_type == CHIP_VEGAM)) {
|
||||
amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
|
||||
DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
|
||||
amdgpu_atombios_crtc_set_dce_clock(adev, 0,
|
||||
|
@ -1780,7 +1780,6 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_framebuffer *amdgpu_fb;
|
||||
struct drm_framebuffer *target_fb;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *abo;
|
||||
@ -1798,18 +1797,15 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (atomic) {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
if (atomic)
|
||||
target_fb = fb;
|
||||
} else {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
else
|
||||
target_fb = crtc->primary->fb;
|
||||
}
|
||||
|
||||
/* If atomic, assume fb object is pinned & idle & fenced and
|
||||
* just update base pointers
|
||||
*/
|
||||
obj = amdgpu_fb->obj;
|
||||
obj = target_fb->obj[0];
|
||||
abo = gem_to_amdgpu_bo(obj);
|
||||
r = amdgpu_bo_reserve(abo, false);
|
||||
if (unlikely(r != 0))
|
||||
@ -1978,8 +1974,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
|
||||
|
||||
if (!atomic && fb && fb != crtc->primary->fb) {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||
abo = gem_to_amdgpu_bo(fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(abo, true);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
@ -2414,11 +2409,9 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
|
||||
dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
if (crtc->primary->fb) {
|
||||
int r;
|
||||
struct amdgpu_framebuffer *amdgpu_fb;
|
||||
struct amdgpu_bo *abo;
|
||||
|
||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||
abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(abo, true);
|
||||
if (unlikely(r))
|
||||
DRM_ERROR("failed to reserve abo before unpin\n");
|
||||
|
@ -1754,7 +1754,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_framebuffer *amdgpu_fb;
|
||||
struct drm_framebuffer *target_fb;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *abo;
|
||||
@ -1773,18 +1772,15 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (atomic) {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
if (atomic)
|
||||
target_fb = fb;
|
||||
} else {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
else
|
||||
target_fb = crtc->primary->fb;
|
||||
}
|
||||
|
||||
/* If atomic, assume fb object is pinned & idle & fenced and
|
||||
* just update base pointers
|
||||
*/
|
||||
obj = amdgpu_fb->obj;
|
||||
obj = target_fb->obj[0];
|
||||
abo = gem_to_amdgpu_bo(obj);
|
||||
r = amdgpu_bo_reserve(abo, false);
|
||||
if (unlikely(r != 0))
|
||||
@ -1955,8 +1951,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
|
||||
|
||||
if (!atomic && fb && fb != crtc->primary->fb) {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||
abo = gem_to_amdgpu_bo(fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(abo, true);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
@ -2430,11 +2425,9 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
|
||||
dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
if (crtc->primary->fb) {
|
||||
int r;
|
||||
struct amdgpu_framebuffer *amdgpu_fb;
|
||||
struct amdgpu_bo *abo;
|
||||
|
||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||
abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(abo, true);
|
||||
if (unlikely(r))
|
||||
DRM_ERROR("failed to reserve abo before unpin\n");
|
||||
|
@ -168,11 +168,9 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
|
||||
dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
if (crtc->primary->fb) {
|
||||
int r;
|
||||
struct amdgpu_framebuffer *amdgpu_fb;
|
||||
struct amdgpu_bo *abo;
|
||||
|
||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||
abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(abo, true);
|
||||
if (unlikely(r))
|
||||
DRM_ERROR("failed to reserve abo before unpin\n");
|
||||
@ -329,7 +327,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_mode_valid(struct drm_connector *connector,
|
||||
static enum drm_mode_status dce_virtual_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
return MODE_OK;
|
||||
@ -462,8 +460,9 @@ static int dce_virtual_hw_init(void *handle)
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_VEGAM:
|
||||
dce_v11_0_disable_dce(adev);
|
||||
break;
|
||||
case CHIP_TOPAZ:
|
||||
|
112
drivers/gpu/drm/amd/amdgpu/df_v1_7.c
Normal file
112
drivers/gpu/drm/amd/amdgpu/df_v1_7.c
Normal file
@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "amdgpu.h"
|
||||
#include "df_v1_7.h"
|
||||
|
||||
#include "df/df_1_7_default.h"
|
||||
#include "df/df_1_7_offset.h"
|
||||
#include "df/df_1_7_sh_mask.h"
|
||||
|
||||
static u32 df_v1_7_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
|
||||
|
||||
static void df_v1_7_init (struct amdgpu_device *adev)
|
||||
{
|
||||
}
|
||||
|
||||
static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
if (enable) {
|
||||
tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
|
||||
tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
|
||||
WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
|
||||
} else
|
||||
WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
|
||||
mmFabricConfigAccessControl_DEFAULT);
|
||||
}
|
||||
|
||||
static u32 df_v1_7_get_fb_channel_number(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
|
||||
tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
|
||||
tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
|
||||
{
|
||||
int fb_channel_number;
|
||||
|
||||
fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
|
||||
|
||||
return df_v1_7_channel_number[fb_channel_number];
|
||||
}
|
||||
|
||||
static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
/* Put DF on broadcast mode */
|
||||
adev->df_funcs->enable_broadcast_mode(adev, true);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
|
||||
tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
|
||||
tmp |= DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY;
|
||||
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
|
||||
} else {
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
|
||||
tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
|
||||
tmp |= DF_V1_7_MGCG_DISABLE;
|
||||
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
|
||||
}
|
||||
|
||||
/* Exit boradcast mode */
|
||||
adev->df_funcs->enable_broadcast_mode(adev, false);
|
||||
}
|
||||
|
||||
static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
|
||||
u32 *flags)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
/* AMD_CG_SUPPORT_DF_MGCG */
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
|
||||
if (tmp & DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY)
|
||||
*flags |= AMD_CG_SUPPORT_DF_MGCG;
|
||||
}
|
||||
|
||||
const struct amdgpu_df_funcs df_v1_7_funcs = {
|
||||
.init = df_v1_7_init,
|
||||
.enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
|
||||
.get_fb_channel_number = df_v1_7_get_fb_channel_number,
|
||||
.get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
|
||||
.update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating,
|
||||
.get_clockgating_state = df_v1_7_get_clockgating_state,
|
||||
};
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@ -20,33 +20,21 @@
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef PP_SOC15_H
|
||||
#define PP_SOC15_H
|
||||
|
||||
#include "soc15_hw_ip.h"
|
||||
#include "vega10_ip_offset.h"
|
||||
#ifndef __DF_V1_7_H__
|
||||
#define __DF_V1_7_H__
|
||||
|
||||
inline static uint32_t soc15_get_register_offset(
|
||||
uint32_t hw_id,
|
||||
uint32_t inst,
|
||||
uint32_t segment,
|
||||
uint32_t offset)
|
||||
#include "soc15_common.h"
|
||||
enum DF_V1_7_MGCG
|
||||
{
|
||||
uint32_t reg = 0;
|
||||
DF_V1_7_MGCG_DISABLE = 0,
|
||||
DF_V1_7_MGCG_ENABLE_00_CYCLE_DELAY =1,
|
||||
DF_V1_7_MGCG_ENABLE_01_CYCLE_DELAY =2,
|
||||
DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY =13,
|
||||
DF_V1_7_MGCG_ENABLE_31_CYCLE_DELAY =14,
|
||||
DF_V1_7_MGCG_ENABLE_63_CYCLE_DELAY =15
|
||||
};
|
||||
|
||||
if (hw_id == THM_HWID)
|
||||
reg = THM_BASE.instance[inst].segment[segment] + offset;
|
||||
else if (hw_id == NBIF_HWID)
|
||||
reg = NBIF_BASE.instance[inst].segment[segment] + offset;
|
||||
else if (hw_id == MP1_HWID)
|
||||
reg = MP1_BASE.instance[inst].segment[segment] + offset;
|
||||
else if (hw_id == DF_HWID)
|
||||
reg = DF_BASE.instance[inst].segment[segment] + offset;
|
||||
else if (hw_id == GC_HWID)
|
||||
reg = GC_BASE.instance[inst].segment[segment] + offset;
|
||||
else if (hw_id == SMUIO_HWID)
|
||||
reg = SMUIO_BASE.instance[inst].segment[segment] + offset;
|
||||
return reg;
|
||||
}
|
||||
extern const struct amdgpu_df_funcs df_v1_7_funcs;
|
||||
|
||||
#endif
|
@ -125,18 +125,6 @@ MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
|
||||
@ -149,6 +137,18 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
|
||||
@ -161,6 +161,13 @@ MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vegam_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
|
||||
|
||||
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
|
||||
{
|
||||
{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
|
||||
@ -292,6 +299,37 @@ static const u32 tonga_mgcg_cgcg_init[] =
|
||||
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
|
||||
};
|
||||
|
||||
static const u32 golden_settings_vegam_a11[] =
|
||||
{
|
||||
mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
|
||||
mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
|
||||
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
|
||||
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
|
||||
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
|
||||
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
|
||||
mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
|
||||
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
|
||||
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
|
||||
mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
|
||||
mmSQ_CONFIG, 0x07f80000, 0x01180000,
|
||||
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
|
||||
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
|
||||
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
|
||||
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
|
||||
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
|
||||
mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
|
||||
};
|
||||
|
||||
static const u32 vegam_golden_common_all[] =
|
||||
{
|
||||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
|
||||
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
|
||||
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
|
||||
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
|
||||
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
|
||||
};
|
||||
|
||||
static const u32 golden_settings_polaris11_a11[] =
|
||||
{
|
||||
mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
|
||||
@ -712,6 +750,14 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
tonga_golden_common_all,
|
||||
ARRAY_SIZE(tonga_golden_common_all));
|
||||
break;
|
||||
case CHIP_VEGAM:
|
||||
amdgpu_device_program_register_sequence(adev,
|
||||
golden_settings_vegam_a11,
|
||||
ARRAY_SIZE(golden_settings_vegam_a11));
|
||||
amdgpu_device_program_register_sequence(adev,
|
||||
vegam_golden_common_all,
|
||||
ARRAY_SIZE(vegam_golden_common_all));
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
amdgpu_device_program_register_sequence(adev,
|
||||
@ -918,17 +964,20 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
||||
case CHIP_FIJI:
|
||||
chip_name = "fiji";
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
chip_name = "polaris11";
|
||||
case CHIP_STONEY:
|
||||
chip_name = "stoney";
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
chip_name = "polaris10";
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
chip_name = "polaris11";
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
chip_name = "polaris12";
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
chip_name = "stoney";
|
||||
case CHIP_VEGAM:
|
||||
chip_name = "vegam";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
@ -1770,6 +1819,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
|
||||
gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_VEGAM:
|
||||
ret = amdgpu_atombios_get_gfx_info(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1957,12 +2007,13 @@ static int gfx_v8_0_sw_init(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_FIJI:
|
||||
case CHIP_TONGA:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_FIJI:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_VEGAM:
|
||||
adev->gfx.mec.num_mec = 2;
|
||||
break;
|
||||
case CHIP_TOPAZ:
|
||||
@ -2323,6 +2374,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
|
||||
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
case CHIP_VEGAM:
|
||||
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
|
||||
@ -3504,6 +3556,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_FIJI:
|
||||
case CHIP_VEGAM:
|
||||
*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
|
||||
RB_XSEL2(1) | PKR_MAP(2) |
|
||||
PKR_XSEL(1) | PKR_YSEL(1) |
|
||||
@ -4071,7 +4124,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
|
||||
gfx_v8_0_init_power_gating(adev);
|
||||
WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
|
||||
} else if ((adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS12)) {
|
||||
(adev->asic_type == CHIP_POLARIS12) ||
|
||||
(adev->asic_type == CHIP_VEGAM)) {
|
||||
gfx_v8_0_init_csb(adev);
|
||||
gfx_v8_0_init_save_restore_list(adev);
|
||||
gfx_v8_0_enable_save_restore_machine(adev);
|
||||
@ -4146,7 +4200,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
|
||||
WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
|
||||
if (adev->asic_type == CHIP_POLARIS11 ||
|
||||
adev->asic_type == CHIP_POLARIS10 ||
|
||||
adev->asic_type == CHIP_POLARIS12) {
|
||||
adev->asic_type == CHIP_POLARIS12 ||
|
||||
adev->asic_type == CHIP_VEGAM) {
|
||||
tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
|
||||
tmp &= ~0x3;
|
||||
WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
|
||||
@ -5498,7 +5553,8 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
|
||||
bool enable)
|
||||
{
|
||||
if ((adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS12))
|
||||
(adev->asic_type == CHIP_POLARIS12) ||
|
||||
(adev->asic_type == CHIP_VEGAM))
|
||||
/* Send msg to SMU via Powerplay */
|
||||
amdgpu_device_ip_set_powergating_state(adev,
|
||||
AMD_IP_BLOCK_TYPE_SMC,
|
||||
@ -5588,6 +5644,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
|
||||
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
|
||||
else
|
||||
@ -6154,6 +6211,7 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
|
||||
break;
|
||||
default:
|
||||
|
@ -41,7 +41,6 @@
|
||||
#define GFX9_MEC_HPD_SIZE 2048
|
||||
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
|
||||
#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
|
||||
#define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34
|
||||
|
||||
#define mmPWR_MISC_CNTL_STATUS 0x0183
|
||||
#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
|
||||
@ -185,6 +184,30 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000)
|
||||
};
|
||||
|
||||
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
|
||||
{
|
||||
mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
|
||||
mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
|
||||
mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
|
||||
mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
|
||||
mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
|
||||
mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
|
||||
mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
|
||||
mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
|
||||
};
|
||||
|
||||
static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
|
||||
{
|
||||
mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
|
||||
mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
|
||||
mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
|
||||
mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
|
||||
mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
|
||||
mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
|
||||
mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
|
||||
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
|
||||
};
|
||||
|
||||
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
|
||||
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
|
||||
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
|
||||
@ -401,6 +424,27 @@ static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
|
||||
kfree(adev->gfx.rlc.register_list_format);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct rlc_firmware_header_v2_1 *rlc_hdr;
|
||||
|
||||
rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
|
||||
adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
|
||||
adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
|
||||
adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
|
||||
adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
|
||||
adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
|
||||
adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
|
||||
adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
|
||||
adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
|
||||
adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
|
||||
adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
|
||||
adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
|
||||
adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
|
||||
adev->gfx.rlc.reg_list_format_direct_reg_list_length =
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
|
||||
}
|
||||
|
||||
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const char *chip_name;
|
||||
@ -412,6 +456,8 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
||||
const struct rlc_firmware_header_v2_0 *rlc_hdr;
|
||||
unsigned int *tmp = NULL;
|
||||
unsigned int i = 0;
|
||||
uint16_t version_major;
|
||||
uint16_t version_minor;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
@ -468,6 +514,12 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
||||
goto out;
|
||||
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
|
||||
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
|
||||
|
||||
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
|
||||
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
|
||||
if (version_major == 2 && version_minor == 1)
|
||||
adev->gfx.rlc.is_rlc_v2_1 = true;
|
||||
|
||||
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
|
||||
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
|
||||
adev->gfx.rlc.save_and_restore_offset =
|
||||
@ -508,6 +560,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
||||
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
|
||||
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
|
||||
|
||||
if (adev->gfx.rlc.is_rlc_v2_1)
|
||||
gfx_v9_0_init_rlc_ext_microcode(adev);
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
|
||||
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
@ -566,6 +621,26 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
|
||||
if (adev->gfx.rlc.is_rlc_v2_1) {
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
|
||||
info->fw = adev->gfx.rlc_fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
|
||||
info->fw = adev->gfx.rlc_fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
|
||||
info->fw = adev->gfx.rlc_fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
|
||||
}
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
|
||||
info->fw = adev->gfx.mec_fw;
|
||||
@ -1600,6 +1675,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
|
||||
|
||||
gfx_v9_0_setup_rb(adev);
|
||||
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
|
||||
adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
|
||||
|
||||
/* XXX SH_MEM regs */
|
||||
/* where to put LDS, scratch, GPUVM in FSA64 space */
|
||||
@ -1616,7 +1692,10 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
|
||||
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
|
||||
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
|
||||
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
|
||||
tmp = adev->gmc.shared_aperture_start >> 48;
|
||||
tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
|
||||
(adev->gmc.private_aperture_start >> 48));
|
||||
tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
|
||||
(adev->gmc.shared_aperture_start >> 48));
|
||||
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
|
||||
}
|
||||
}
|
||||
@ -1708,55 +1787,42 @@ static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
|
||||
adev->gfx.rlc.clear_state_size);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_parse_ind_reg_list(int *register_list_format,
|
||||
static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
|
||||
int indirect_offset,
|
||||
int list_size,
|
||||
int *unique_indirect_regs,
|
||||
int *unique_indirect_reg_count,
|
||||
int max_indirect_reg_count,
|
||||
int *indirect_start_offsets,
|
||||
int *indirect_start_offsets_count,
|
||||
int max_indirect_start_offsets_count)
|
||||
int *indirect_start_offsets_count)
|
||||
{
|
||||
int idx;
|
||||
bool new_entry = true;
|
||||
|
||||
for (; indirect_offset < list_size; indirect_offset++) {
|
||||
|
||||
if (new_entry) {
|
||||
new_entry = false;
|
||||
indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
|
||||
*indirect_start_offsets_count = *indirect_start_offsets_count + 1;
|
||||
BUG_ON(*indirect_start_offsets_count >= max_indirect_start_offsets_count);
|
||||
}
|
||||
|
||||
if (register_list_format[indirect_offset] == 0xFFFFFFFF) {
|
||||
new_entry = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
|
||||
indirect_offset += 2;
|
||||
|
||||
/* look for the matching indice */
|
||||
for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
|
||||
if (unique_indirect_regs[idx] ==
|
||||
register_list_format[indirect_offset])
|
||||
register_list_format[indirect_offset] ||
|
||||
!unique_indirect_regs[idx])
|
||||
break;
|
||||
}
|
||||
|
||||
if (idx >= *unique_indirect_reg_count) {
|
||||
unique_indirect_regs[*unique_indirect_reg_count] =
|
||||
register_list_format[indirect_offset];
|
||||
idx = *unique_indirect_reg_count;
|
||||
*unique_indirect_reg_count = *unique_indirect_reg_count + 1;
|
||||
BUG_ON(*unique_indirect_reg_count >= max_indirect_reg_count);
|
||||
}
|
||||
BUG_ON(idx >= *unique_indirect_reg_count);
|
||||
|
||||
register_list_format[indirect_offset] = idx;
|
||||
if (!unique_indirect_regs[idx])
|
||||
unique_indirect_regs[idx] = register_list_format[indirect_offset];
|
||||
|
||||
indirect_offset++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
|
||||
static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
|
||||
{
|
||||
int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
|
||||
int unique_indirect_reg_count = 0;
|
||||
@ -1765,7 +1831,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
|
||||
int indirect_start_offsets_count = 0;
|
||||
|
||||
int list_size = 0;
|
||||
int i = 0;
|
||||
int i = 0, j = 0;
|
||||
u32 tmp = 0;
|
||||
|
||||
u32 *register_list_format =
|
||||
@ -1776,15 +1842,14 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
|
||||
adev->gfx.rlc.reg_list_format_size_bytes);
|
||||
|
||||
/* setup unique_indirect_regs array and indirect_start_offsets array */
|
||||
gfx_v9_0_parse_ind_reg_list(register_list_format,
|
||||
GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH,
|
||||
unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
|
||||
gfx_v9_1_parse_ind_reg_list(register_list_format,
|
||||
adev->gfx.rlc.reg_list_format_direct_reg_list_length,
|
||||
adev->gfx.rlc.reg_list_format_size_bytes >> 2,
|
||||
unique_indirect_regs,
|
||||
&unique_indirect_reg_count,
|
||||
ARRAY_SIZE(unique_indirect_regs),
|
||||
indirect_start_offsets,
|
||||
&indirect_start_offsets_count,
|
||||
ARRAY_SIZE(indirect_start_offsets));
|
||||
&indirect_start_offsets_count);
|
||||
|
||||
/* enable auto inc in case it is disabled */
|
||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
|
||||
@ -1798,19 +1863,37 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
|
||||
adev->gfx.rlc.register_restore[i]);
|
||||
|
||||
/* load direct register */
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 0);
|
||||
for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
|
||||
adev->gfx.rlc.register_restore[i]);
|
||||
|
||||
/* load indirect register */
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
|
||||
adev->gfx.rlc.reg_list_format_start);
|
||||
for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
|
||||
|
||||
/* direct register portion */
|
||||
for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
|
||||
register_list_format[i]);
|
||||
|
||||
/* indirect register portion */
|
||||
while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
|
||||
if (register_list_format[i] == 0xFFFFFFFF) {
|
||||
WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
|
||||
continue;
|
||||
}
|
||||
|
||||
WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
|
||||
WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
|
||||
|
||||
for (j = 0; j < unique_indirect_reg_count; j++) {
|
||||
if (register_list_format[i] == unique_indirect_regs[j]) {
|
||||
WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
BUG_ON(j >= unique_indirect_reg_count);
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
/* set save/restore list size */
|
||||
list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
|
||||
list_size = list_size >> 1;
|
||||
@ -1827,11 +1910,16 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
|
||||
|
||||
/* load unique indirect regs*/
|
||||
for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
|
||||
if (unique_indirect_regs[i] != 0) {
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
|
||||
+ GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
|
||||
unique_indirect_regs[i] & 0x3FFFF);
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
|
||||
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
|
||||
+ GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
|
||||
unique_indirect_regs[i] >> 20);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(register_list_format);
|
||||
return 0;
|
||||
@ -2010,6 +2098,9 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
|
||||
|
||||
static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
|
||||
{
|
||||
if (!adev->gfx.rlc.is_rlc_v2_1)
|
||||
return;
|
||||
|
||||
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_GFX_DMG |
|
||||
@ -2017,27 +2108,12 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
|
||||
AMD_PG_SUPPORT_GDS |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS)) {
|
||||
gfx_v9_0_init_csb(adev);
|
||||
gfx_v9_0_init_rlc_save_restore_list(adev);
|
||||
gfx_v9_1_init_rlc_save_restore_list(adev);
|
||||
gfx_v9_0_enable_save_restore_machine(adev);
|
||||
|
||||
if (adev->asic_type == CHIP_RAVEN) {
|
||||
WREG32(mmRLC_JUMP_TABLE_RESTORE,
|
||||
adev->gfx.rlc.cp_table_gpu_addr >> 8);
|
||||
gfx_v9_0_init_gfx_power_gating(adev);
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
|
||||
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
|
||||
gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
|
||||
} else {
|
||||
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
|
||||
gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
|
||||
}
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_CP)
|
||||
gfx_v9_0_enable_cp_power_gating(adev, true);
|
||||
else
|
||||
gfx_v9_0_enable_cp_power_gating(adev, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3061,6 +3137,9 @@ static int gfx_v9_0_hw_fini(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i;
|
||||
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
|
||||
@ -3279,6 +3358,11 @@ static int gfx_v9_0_late_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
|
||||
AMD_PG_STATE_GATE);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3339,8 +3423,7 @@ static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
|
||||
static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
/* TODO: double check if we need to perform under safe mdoe */
|
||||
/* gfx_v9_0_enter_rlc_safe_mode(adev); */
|
||||
gfx_v9_0_enter_rlc_safe_mode(adev);
|
||||
|
||||
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
|
||||
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
|
||||
@ -3351,7 +3434,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
|
||||
gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
|
||||
}
|
||||
|
||||
/* gfx_v9_0_exit_rlc_safe_mode(adev); */
|
||||
gfx_v9_0_exit_rlc_safe_mode(adev);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
|
||||
@ -3742,7 +3825,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
|
||||
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
|
||||
amdgpu_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
(2 << 0) |
|
||||
@ -3774,13 +3857,16 @@ static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
|
||||
{
|
||||
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
|
||||
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
|
||||
bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
|
||||
|
||||
/* RELEASE_MEM - flush caches, send int */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
|
||||
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
||||
amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
|
||||
EOP_TC_NC_ACTION_EN) :
|
||||
(EOP_TCL1_ACTION_EN |
|
||||
EOP_TC_ACTION_EN |
|
||||
EOP_TC_WB_ACTION_EN |
|
||||
EOP_TC_MD_ACTION_EN |
|
||||
EOP_TC_MD_ACTION_EN)) |
|
||||
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
|
||||
EVENT_INDEX(5)));
|
||||
amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
|
||||
@ -4137,6 +4223,20 @@ static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
|
||||
uint32_t reg0, uint32_t reg1,
|
||||
uint32_t ref, uint32_t mask)
|
||||
{
|
||||
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
|
||||
|
||||
if (amdgpu_sriov_vf(ring->adev))
|
||||
gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
|
||||
ref, mask, 0x20);
|
||||
else
|
||||
amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
|
||||
ref, mask);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
@ -4458,6 +4558,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
|
||||
.emit_tmz = gfx_v9_0_ring_emit_tmz,
|
||||
.emit_wreg = gfx_v9_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
|
||||
@ -4492,6 +4593,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
|
||||
.set_priority = gfx_v9_0_ring_set_priority_compute,
|
||||
.emit_wreg = gfx_v9_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
|
||||
@ -4522,6 +4624,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
|
||||
.emit_rreg = gfx_v9_0_ring_emit_rreg,
|
||||
.emit_wreg = gfx_v9_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
|
||||
};
|
||||
|
||||
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -819,12 +819,33 @@ static int gmc_v6_0_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_bo_late_init(adev);
|
||||
|
||||
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
|
||||
unsigned size;
|
||||
|
||||
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
|
||||
size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
|
||||
} else {
|
||||
u32 viewport = RREG32(mmVIEWPORT_SIZE);
|
||||
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
|
||||
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
|
||||
4);
|
||||
}
|
||||
/* return 0 if the pre-OS buffer uses up most of vram */
|
||||
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
|
||||
return 0;
|
||||
return size;
|
||||
}
|
||||
|
||||
static int gmc_v6_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
@ -851,8 +872,6 @@ static int gmc_v6_0_sw_init(void *handle)
|
||||
|
||||
adev->gmc.mc_mask = 0xffffffffffULL;
|
||||
|
||||
adev->gmc.stolen_size = 256 * 1024;
|
||||
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 40;
|
||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
@ -878,6 +897,8 @@ static int gmc_v6_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev);
|
||||
|
||||
r = amdgpu_bo_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -958,12 +958,33 @@ static int gmc_v7_0_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_bo_late_init(adev);
|
||||
|
||||
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
|
||||
unsigned size;
|
||||
|
||||
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
|
||||
size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
|
||||
} else {
|
||||
u32 viewport = RREG32(mmVIEWPORT_SIZE);
|
||||
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
|
||||
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
|
||||
4);
|
||||
}
|
||||
/* return 0 if the pre-OS buffer uses up most of vram */
|
||||
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
|
||||
return 0;
|
||||
return size;
|
||||
}
|
||||
|
||||
static int gmc_v7_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
@ -998,8 +1019,6 @@ static int gmc_v7_0_sw_init(void *handle)
|
||||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||
|
||||
adev->gmc.stolen_size = 256 * 1024;
|
||||
|
||||
/* set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 40-bits.
|
||||
* IGP - can handle 40-bits
|
||||
@ -1030,6 +1049,8 @@ static int gmc_v7_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev);
|
||||
|
||||
/* Memory manager */
|
||||
r = amdgpu_bo_init(adev);
|
||||
if (r)
|
||||
|
@ -138,6 +138,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
amdgpu_device_program_register_sequence(adev,
|
||||
golden_settings_polaris11_a11,
|
||||
ARRAY_SIZE(golden_settings_polaris11_a11));
|
||||
@ -231,6 +232,7 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
|
||||
case CHIP_FIJI:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
case CHIP_VEGAM:
|
||||
return 0;
|
||||
default: BUG();
|
||||
}
|
||||
@ -567,9 +569,10 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
|
||||
/* set the gart size */
|
||||
if (amdgpu_gart_size == -1) {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_POLARIS11: /* all engines support GPUVM */
|
||||
case CHIP_POLARIS10: /* all engines support GPUVM */
|
||||
case CHIP_POLARIS11: /* all engines support GPUVM */
|
||||
case CHIP_POLARIS12: /* all engines support GPUVM */
|
||||
case CHIP_VEGAM: /* all engines support GPUVM */
|
||||
default:
|
||||
adev->gmc.gart_size = 256ULL << 20;
|
||||
break;
|
||||
@ -1049,12 +1052,33 @@ static int gmc_v8_0_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_bo_late_init(adev);
|
||||
|
||||
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
|
||||
unsigned size;
|
||||
|
||||
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
|
||||
size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
|
||||
} else {
|
||||
u32 viewport = RREG32(mmVIEWPORT_SIZE);
|
||||
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
|
||||
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
|
||||
4);
|
||||
}
|
||||
/* return 0 if the pre-OS buffer uses up most of vram */
|
||||
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
|
||||
return 0;
|
||||
return size;
|
||||
}
|
||||
|
||||
#define mmMC_SEQ_MISC0_FIJI 0xA71
|
||||
|
||||
static int gmc_v8_0_sw_init(void *handle)
|
||||
@ -1068,7 +1092,8 @@ static int gmc_v8_0_sw_init(void *handle)
|
||||
} else {
|
||||
u32 tmp;
|
||||
|
||||
if (adev->asic_type == CHIP_FIJI)
|
||||
if ((adev->asic_type == CHIP_FIJI) ||
|
||||
(adev->asic_type == CHIP_VEGAM))
|
||||
tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
|
||||
else
|
||||
tmp = RREG32(mmMC_SEQ_MISC0);
|
||||
@ -1096,8 +1121,6 @@ static int gmc_v8_0_sw_init(void *handle)
|
||||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||
|
||||
adev->gmc.stolen_size = 256 * 1024;
|
||||
|
||||
/* set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 40-bits.
|
||||
* IGP - can handle 40-bits
|
||||
@ -1128,6 +1151,8 @@ static int gmc_v8_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev);
|
||||
|
||||
/* Memory manager */
|
||||
r = amdgpu_bo_init(adev);
|
||||
if (r)
|
||||
|
@ -43,19 +43,13 @@
|
||||
#include "gfxhub_v1_0.h"
|
||||
#include "mmhub_v1_0.h"
|
||||
|
||||
#define mmDF_CS_AON0_DramBaseAddress0 0x0044
|
||||
#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
|
||||
//DF_CS_AON0_DramBaseAddress0
|
||||
#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
|
||||
#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
|
||||
#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
|
||||
#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
|
||||
#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
|
||||
#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
|
||||
#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
|
||||
#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
|
||||
#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
|
||||
#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
|
||||
/* add these here since we already include dce12 headers and these are for DCN */
|
||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
|
||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
|
||||
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
|
||||
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
|
||||
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
|
||||
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
|
||||
|
||||
/* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
|
||||
#define AMDGPU_NUM_OF_VMIDS 8
|
||||
@ -385,11 +379,9 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
|
||||
upper_32_bits(pd_addr));
|
||||
|
||||
amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
|
||||
|
||||
/* wait for the invalidate to complete */
|
||||
amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
|
||||
1 << vmid, 1 << vmid);
|
||||
amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
|
||||
hub->vm_inv_eng0_ack + eng,
|
||||
req, 1 << vmid);
|
||||
|
||||
return pd_addr;
|
||||
}
|
||||
@ -556,8 +548,7 @@ static int gmc_v9_0_early_init(void *handle)
|
||||
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
|
||||
adev->gmc.shared_aperture_end =
|
||||
adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
|
||||
adev->gmc.private_aperture_start =
|
||||
adev->gmc.shared_aperture_end + 1;
|
||||
adev->gmc.private_aperture_start = 0x1000000000000000ULL;
|
||||
adev->gmc.private_aperture_end =
|
||||
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
|
||||
|
||||
@ -659,6 +650,11 @@ static int gmc_v9_0_late_init(void *handle)
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
/*
|
||||
* TODO - Uncomment once GART corruption issue is fixed.
|
||||
*/
|
||||
/* amdgpu_bo_late_init(adev); */
|
||||
|
||||
for(i = 0; i < adev->num_rings; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
@ -714,7 +710,6 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
*/
|
||||
static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
int chansize, numchan;
|
||||
int r;
|
||||
|
||||
@ -727,39 +722,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
||||
else
|
||||
chansize = 128;
|
||||
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
|
||||
tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
|
||||
tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
|
||||
switch (tmp) {
|
||||
case 0:
|
||||
default:
|
||||
numchan = 1;
|
||||
break;
|
||||
case 1:
|
||||
numchan = 2;
|
||||
break;
|
||||
case 2:
|
||||
numchan = 0;
|
||||
break;
|
||||
case 3:
|
||||
numchan = 4;
|
||||
break;
|
||||
case 4:
|
||||
numchan = 0;
|
||||
break;
|
||||
case 5:
|
||||
numchan = 8;
|
||||
break;
|
||||
case 6:
|
||||
numchan = 0;
|
||||
break;
|
||||
case 7:
|
||||
numchan = 16;
|
||||
break;
|
||||
case 8:
|
||||
numchan = 2;
|
||||
break;
|
||||
}
|
||||
numchan = adev->df_funcs->get_hbm_channel_number(adev);
|
||||
adev->gmc.vram_width = numchan * chansize;
|
||||
}
|
||||
|
||||
@ -826,6 +789,52 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
|
||||
return amdgpu_gart_table_vram_alloc(adev);
|
||||
}
|
||||
|
||||
static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
||||
{
|
||||
#if 0
|
||||
u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
|
||||
#endif
|
||||
unsigned size;
|
||||
|
||||
/*
|
||||
* TODO Remove once GART corruption is resolved
|
||||
* Check related code in gmc_v9_0_sw_fini
|
||||
* */
|
||||
size = 9 * 1024 * 1024;
|
||||
|
||||
#if 0
|
||||
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
|
||||
size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
|
||||
} else {
|
||||
u32 viewport;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
|
||||
size = (REG_GET_FIELD(viewport,
|
||||
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
|
||||
REG_GET_FIELD(viewport,
|
||||
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
|
||||
4);
|
||||
break;
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
default:
|
||||
viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
|
||||
size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
|
||||
REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
|
||||
4);
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* return 0 if the pre-OS buffer uses up most of vram */
|
||||
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
|
||||
return 0;
|
||||
|
||||
#endif
|
||||
return size;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
@ -877,12 +886,6 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
|
||||
|
||||
/*
|
||||
* It needs to reserve 8M stolen memory for vega10
|
||||
* TODO: Figure out how to avoid that...
|
||||
*/
|
||||
adev->gmc.stolen_size = 8 * 1024 * 1024;
|
||||
|
||||
/* set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 44-bits.
|
||||
* IGP - can handle 44-bits
|
||||
@ -907,6 +910,8 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
|
||||
|
||||
/* Memory manager */
|
||||
r = amdgpu_bo_init(adev);
|
||||
if (r)
|
||||
@ -950,6 +955,18 @@ static int gmc_v9_0_sw_fini(void *handle)
|
||||
amdgpu_gem_force_release(adev);
|
||||
amdgpu_vm_manager_fini(adev);
|
||||
gmc_v9_0_gart_fini(adev);
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
* Currently there is a bug where some memory client outside
|
||||
* of the driver writes to first 8M of VRAM on S3 resume,
|
||||
* this overrides GART which by default gets placed in first 8M and
|
||||
* causes VM_FAULTS once GTT is accessed.
|
||||
* Keep the stolen memory reservation until the while this is not solved.
|
||||
* Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
|
||||
*/
|
||||
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
|
||||
|
||||
amdgpu_bo_fini(adev);
|
||||
|
||||
return 0;
|
||||
|
@ -2817,7 +2817,7 @@ static int kv_dpm_init(struct amdgpu_device *adev)
|
||||
pi->caps_tcp_ramping = true;
|
||||
}
|
||||
|
||||
if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
|
||||
if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
|
||||
pi->caps_sclk_ds = true;
|
||||
else
|
||||
pi->caps_sclk_ds = false;
|
||||
@ -2974,7 +2974,7 @@ static int kv_dpm_late_init(void *handle)
|
||||
/* powerdown unused blocks for now */
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!amdgpu_dpm)
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return 0;
|
||||
|
||||
kv_dpm_powergate_acp(adev, true);
|
||||
|
@ -260,8 +260,10 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
|
||||
} while (timeout > 1);
|
||||
|
||||
flr_done:
|
||||
if (locked)
|
||||
if (locked) {
|
||||
adev->in_gpu_reset = 0;
|
||||
mutex_unlock(&adev->lock_reset);
|
||||
}
|
||||
|
||||
/* Trigger recovery for world switch failure if no TDR */
|
||||
if (amdgpu_lockup_timeout == 0)
|
||||
|
@ -40,11 +40,20 @@ enum psp_gfx_crtl_cmd_id
|
||||
GFX_CTRL_CMD_ID_INIT_GPCOM_RING = 0x00020000, /* initialize GPCOM ring */
|
||||
GFX_CTRL_CMD_ID_DESTROY_RINGS = 0x00030000, /* destroy rings */
|
||||
GFX_CTRL_CMD_ID_CAN_INIT_RINGS = 0x00040000, /* is it allowed to initialized the rings */
|
||||
GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
|
||||
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
|
||||
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
|
||||
|
||||
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
|
||||
};
|
||||
|
||||
|
||||
/*-----------------------------------------------------------------------------
|
||||
NOTE: All physical addresses used in this interface are actually
|
||||
GPU Virtual Addresses.
|
||||
*/
|
||||
|
||||
|
||||
/* Control registers of the TEE Gfx interface. These are located in
|
||||
* SRBM-to-PSP mailbox registers (total 8 registers).
|
||||
*/
|
||||
@ -55,8 +64,8 @@ struct psp_gfx_ctrl
|
||||
volatile uint32_t rbi_rptr; /* +8 Read pointer (index) of RBI ring */
|
||||
volatile uint32_t gpcom_wptr; /* +12 Write pointer (index) of GPCOM ring */
|
||||
volatile uint32_t gpcom_rptr; /* +16 Read pointer (index) of GPCOM ring */
|
||||
volatile uint32_t ring_addr_lo; /* +20 bits [31:0] of physical address of ring buffer */
|
||||
volatile uint32_t ring_addr_hi; /* +24 bits [63:32] of physical address of ring buffer */
|
||||
volatile uint32_t ring_addr_lo; /* +20 bits [31:0] of GPU Virtual of ring buffer (VMID=0)*/
|
||||
volatile uint32_t ring_addr_hi; /* +24 bits [63:32] of GPU Virtual of ring buffer (VMID=0) */
|
||||
volatile uint32_t ring_buf_size; /* +28 Ring buffer size (in bytes) */
|
||||
|
||||
};
|
||||
@ -78,6 +87,8 @@ enum psp_gfx_cmd_id
|
||||
GFX_CMD_ID_LOAD_ASD = 0x00000004, /* load ASD Driver */
|
||||
GFX_CMD_ID_SETUP_TMR = 0x00000005, /* setup TMR region */
|
||||
GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */
|
||||
GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */
|
||||
GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */
|
||||
|
||||
};
|
||||
|
||||
@ -85,11 +96,11 @@ enum psp_gfx_cmd_id
|
||||
/* Command to load Trusted Application binary into PSP OS. */
|
||||
struct psp_gfx_cmd_load_ta
|
||||
{
|
||||
uint32_t app_phy_addr_lo; /* bits [31:0] of the physical address of the TA binary (must be 4 KB aligned) */
|
||||
uint32_t app_phy_addr_hi; /* bits [63:32] of the physical address of the TA binary */
|
||||
uint32_t app_phy_addr_lo; /* bits [31:0] of the GPU Virtual address of the TA binary (must be 4 KB aligned) */
|
||||
uint32_t app_phy_addr_hi; /* bits [63:32] of the GPU Virtual address of the TA binary */
|
||||
uint32_t app_len; /* length of the TA binary in bytes */
|
||||
uint32_t cmd_buf_phy_addr_lo; /* bits [31:0] of the physical address of CMD buffer (must be 4 KB aligned) */
|
||||
uint32_t cmd_buf_phy_addr_hi; /* bits [63:32] of the physical address of CMD buffer */
|
||||
uint32_t cmd_buf_phy_addr_lo; /* bits [31:0] of the GPU Virtual address of CMD buffer (must be 4 KB aligned) */
|
||||
uint32_t cmd_buf_phy_addr_hi; /* bits [63:32] of the GPU Virtual address of CMD buffer */
|
||||
uint32_t cmd_buf_len; /* length of the CMD buffer in bytes; must be multiple of 4 KB */
|
||||
|
||||
/* Note: CmdBufLen can be set to 0. In this case no persistent CMD buffer is provided
|
||||
@ -111,8 +122,8 @@ struct psp_gfx_cmd_unload_ta
|
||||
*/
|
||||
struct psp_gfx_buf_desc
|
||||
{
|
||||
uint32_t buf_phy_addr_lo; /* bits [31:0] of physical address of the buffer (must be 4 KB aligned) */
|
||||
uint32_t buf_phy_addr_hi; /* bits [63:32] of physical address of the buffer */
|
||||
uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of the buffer (must be 4 KB aligned) */
|
||||
uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of the buffer */
|
||||
uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB and no bigger than 64 MB) */
|
||||
|
||||
};
|
||||
@ -145,8 +156,8 @@ struct psp_gfx_cmd_invoke_cmd
|
||||
/* Command to setup TMR region. */
|
||||
struct psp_gfx_cmd_setup_tmr
|
||||
{
|
||||
uint32_t buf_phy_addr_lo; /* bits [31:0] of physical address of TMR buffer (must be 4 KB aligned) */
|
||||
uint32_t buf_phy_addr_hi; /* bits [63:32] of physical address of TMR buffer */
|
||||
uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of TMR buffer (must be 4 KB aligned) */
|
||||
uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of TMR buffer */
|
||||
uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB) */
|
||||
|
||||
};
|
||||
@ -174,18 +185,32 @@ enum psp_gfx_fw_type
|
||||
GFX_FW_TYPE_ISP = 16,
|
||||
GFX_FW_TYPE_ACP = 17,
|
||||
GFX_FW_TYPE_SMU = 18,
|
||||
GFX_FW_TYPE_MMSCH = 19,
|
||||
GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM = 20,
|
||||
GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM = 21,
|
||||
GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL = 22,
|
||||
GFX_FW_TYPE_MAX = 23
|
||||
};
|
||||
|
||||
/* Command to load HW IP FW. */
|
||||
struct psp_gfx_cmd_load_ip_fw
|
||||
{
|
||||
uint32_t fw_phy_addr_lo; /* bits [31:0] of physical address of FW location (must be 4 KB aligned) */
|
||||
uint32_t fw_phy_addr_hi; /* bits [63:32] of physical address of FW location */
|
||||
uint32_t fw_phy_addr_lo; /* bits [31:0] of GPU Virtual address of FW location (must be 4 KB aligned) */
|
||||
uint32_t fw_phy_addr_hi; /* bits [63:32] of GPU Virtual address of FW location */
|
||||
uint32_t fw_size; /* FW buffer size in bytes */
|
||||
enum psp_gfx_fw_type fw_type; /* FW type */
|
||||
|
||||
};
|
||||
|
||||
/* Command to save/restore HW IP FW. */
|
||||
struct psp_gfx_cmd_save_restore_ip_fw
|
||||
{
|
||||
uint32_t save_fw; /* if set, command is used for saving fw otherwise for resetoring*/
|
||||
uint32_t save_restore_addr_lo; /* bits [31:0] of FB address of GART memory used as save/restore buffer (must be 4 KB aligned) */
|
||||
uint32_t save_restore_addr_hi; /* bits [63:32] of FB address of GART memory used as save/restore buffer */
|
||||
uint32_t buf_size; /* Size of the save/restore buffer in bytes */
|
||||
enum psp_gfx_fw_type fw_type; /* FW type */
|
||||
};
|
||||
|
||||
/* All GFX ring buffer commands. */
|
||||
union psp_gfx_commands
|
||||
@ -195,7 +220,7 @@ union psp_gfx_commands
|
||||
struct psp_gfx_cmd_invoke_cmd cmd_invoke_cmd;
|
||||
struct psp_gfx_cmd_setup_tmr cmd_setup_tmr;
|
||||
struct psp_gfx_cmd_load_ip_fw cmd_load_ip_fw;
|
||||
|
||||
struct psp_gfx_cmd_save_restore_ip_fw cmd_save_restore_ip_fw;
|
||||
};
|
||||
|
||||
|
||||
@ -226,8 +251,8 @@ struct psp_gfx_cmd_resp
|
||||
|
||||
/* These fields are used for RBI only. They are all 0 in GPCOM commands
|
||||
*/
|
||||
uint32_t resp_buf_addr_lo; /* +12 bits [31:0] of physical address of response buffer (must be 4 KB aligned) */
|
||||
uint32_t resp_buf_addr_hi; /* +16 bits [63:32] of physical address of response buffer */
|
||||
uint32_t resp_buf_addr_lo; /* +12 bits [31:0] of GPU Virtual address of response buffer (must be 4 KB aligned) */
|
||||
uint32_t resp_buf_addr_hi; /* +16 bits [63:32] of GPU Virtual address of response buffer */
|
||||
uint32_t resp_offset; /* +20 offset within response buffer */
|
||||
uint32_t resp_buf_size; /* +24 total size of the response buffer in bytes */
|
||||
|
||||
@ -251,18 +276,18 @@ struct psp_gfx_cmd_resp
|
||||
/* Structure of the Ring Buffer Frame */
|
||||
struct psp_gfx_rb_frame
|
||||
{
|
||||
uint32_t cmd_buf_addr_lo; /* +0 bits [31:0] of physical address of command buffer (must be 4 KB aligned) */
|
||||
uint32_t cmd_buf_addr_hi; /* +4 bits [63:32] of physical address of command buffer */
|
||||
uint32_t cmd_buf_addr_lo; /* +0 bits [31:0] of GPU Virtual address of command buffer (must be 4 KB aligned) */
|
||||
uint32_t cmd_buf_addr_hi; /* +4 bits [63:32] of GPU Virtual address of command buffer */
|
||||
uint32_t cmd_buf_size; /* +8 command buffer size in bytes */
|
||||
uint32_t fence_addr_lo; /* +12 bits [31:0] of physical address of Fence for this frame */
|
||||
uint32_t fence_addr_hi; /* +16 bits [63:32] of physical address of Fence for this frame */
|
||||
uint32_t fence_addr_lo; /* +12 bits [31:0] of GPU Virtual address of Fence for this frame */
|
||||
uint32_t fence_addr_hi; /* +16 bits [63:32] of GPU Virtual address of Fence for this frame */
|
||||
uint32_t fence_value; /* +20 Fence value */
|
||||
uint32_t sid_lo; /* +24 bits [31:0] of SID value (used only for RBI frames) */
|
||||
uint32_t sid_hi; /* +28 bits [63:32] of SID value (used only for RBI frames) */
|
||||
uint8_t vmid; /* +32 VMID value used for mapping of all addresses for this frame */
|
||||
uint8_t frame_type; /* +33 1: destory context frame, 0: all other frames; used only for RBI frames */
|
||||
uint8_t reserved1[2]; /* +34 reserved, must be 0 */
|
||||
uint32_t reserved2[7]; /* +40 reserved, must be 0 */
|
||||
uint32_t reserved2[7]; /* +36 reserved, must be 0 */
|
||||
/* total 64 bytes */
|
||||
};
|
||||
|
||||
|
@ -70,6 +70,15 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
|
||||
case AMDGPU_UCODE_ID_RLC_G:
|
||||
*type = GFX_FW_TYPE_RLC_G;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
|
||||
*type = GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
|
||||
*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
|
||||
*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SMC:
|
||||
*type = GFX_FW_TYPE_SMU;
|
||||
break;
|
||||
|
@ -62,6 +62,8 @@ MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vegam_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vegam_sdma1.bin");
|
||||
|
||||
|
||||
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
|
||||
@ -209,6 +211,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
amdgpu_device_program_register_sequence(adev,
|
||||
golden_settings_polaris11_a11,
|
||||
ARRAY_SIZE(golden_settings_polaris11_a11));
|
||||
@ -275,15 +278,18 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
|
||||
case CHIP_FIJI:
|
||||
chip_name = "fiji";
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
chip_name = "polaris11";
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
chip_name = "polaris10";
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
chip_name = "polaris11";
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
chip_name = "polaris12";
|
||||
break;
|
||||
case CHIP_VEGAM:
|
||||
chip_name = "vegam";
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
chip_name = "carrizo";
|
||||
break;
|
||||
|
@ -360,6 +360,31 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
|
||||
}
|
||||
|
||||
static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
|
||||
int mem_space, int hdp,
|
||||
uint32_t addr0, uint32_t addr1,
|
||||
uint32_t ref, uint32_t mask,
|
||||
uint32_t inv)
|
||||
{
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
|
||||
if (mem_space) {
|
||||
/* memory */
|
||||
amdgpu_ring_write(ring, addr0);
|
||||
amdgpu_ring_write(ring, addr1);
|
||||
} else {
|
||||
/* registers */
|
||||
amdgpu_ring_write(ring, addr0 << 2);
|
||||
amdgpu_ring_write(ring, addr1 << 2);
|
||||
}
|
||||
amdgpu_ring_write(ring, ref); /* reference */
|
||||
amdgpu_ring_write(ring, mask); /* mask */
|
||||
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
|
||||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
|
||||
*
|
||||
@ -378,15 +403,10 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
else
|
||||
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
|
||||
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
|
||||
amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
|
||||
amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
|
||||
amdgpu_ring_write(ring, ref_and_mask); /* reference */
|
||||
amdgpu_ring_write(ring, ref_and_mask); /* mask */
|
||||
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
|
||||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
|
||||
sdma_v4_0_wait_reg_mem(ring, 0, 1,
|
||||
adev->nbio_funcs->get_hdp_flush_done_offset(adev),
|
||||
adev->nbio_funcs->get_hdp_flush_req_offset(adev),
|
||||
ref_and_mask, ref_and_mask, 10);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1114,16 +1134,10 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
/* wait for idle */
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, seq); /* reference */
|
||||
amdgpu_ring_write(ring, 0xffffffff); /* mask */
|
||||
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
|
||||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
|
||||
sdma_v4_0_wait_reg_mem(ring, 1, 0,
|
||||
addr & 0xfffffffc,
|
||||
upper_32_bits(addr) & 0xffffffff,
|
||||
seq, 0xffffffff, 4);
|
||||
}
|
||||
|
||||
|
||||
@ -1154,15 +1168,7 @@ static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
|
||||
static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
uint32_t val, uint32_t mask)
|
||||
{
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
|
||||
amdgpu_ring_write(ring, reg << 2);
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, val); /* reference */
|
||||
amdgpu_ring_write(ring, mask); /* mask */
|
||||
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
|
||||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
|
||||
sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
|
||||
}
|
||||
|
||||
static int sdma_v4_0_early_init(void *handle)
|
||||
@ -1605,6 +1611,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
|
||||
.pad_ib = sdma_v4_0_ring_pad_ib,
|
||||
.emit_wreg = sdma_v4_0_ring_emit_wreg,
|
||||
.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -1252,6 +1252,12 @@ static void si_invalidate_hdp(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
|
||||
static bool si_need_full_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
/* change this when we support soft reset */
|
||||
return true;
|
||||
}
|
||||
|
||||
static int si_get_pcie_lanes(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 link_width_cntl;
|
||||
@ -1332,6 +1338,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
|
||||
.get_config_memsize = &si_get_config_memsize,
|
||||
.flush_hdp = &si_flush_hdp,
|
||||
.invalidate_hdp = &si_invalidate_hdp,
|
||||
.need_full_reset = &si_need_full_reset,
|
||||
};
|
||||
|
||||
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
|
||||
|
@ -7580,7 +7580,7 @@ static int si_dpm_late_init(void *handle)
|
||||
int ret;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!amdgpu_dpm)
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return 0;
|
||||
|
||||
ret = si_set_temperature_range(adev);
|
||||
|
@ -52,6 +52,7 @@
|
||||
#include "gmc_v9_0.h"
|
||||
#include "gfxhub_v1_0.h"
|
||||
#include "mmhub_v1_0.h"
|
||||
#include "df_v1_7.h"
|
||||
#include "vega10_ih.h"
|
||||
#include "sdma_v4_0.h"
|
||||
#include "uvd_v7_0.h"
|
||||
@ -60,33 +61,6 @@
|
||||
#include "dce_virtual.h"
|
||||
#include "mxgpu_ai.h"
|
||||
|
||||
#define mmFabricConfigAccessControl 0x0410
|
||||
#define mmFabricConfigAccessControl_BASE_IDX 0
|
||||
#define mmFabricConfigAccessControl_DEFAULT 0x00000000
|
||||
//FabricConfigAccessControl
|
||||
#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0
|
||||
#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1
|
||||
#define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10
|
||||
#define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L
|
||||
#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L
|
||||
#define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L
|
||||
|
||||
|
||||
#define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
|
||||
#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
|
||||
//DF_PIE_AON0_DfGlobalClkGater
|
||||
#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
|
||||
#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
|
||||
|
||||
enum {
|
||||
DF_MGCG_DISABLE = 0,
|
||||
DF_MGCG_ENABLE_00_CYCLE_DELAY =1,
|
||||
DF_MGCG_ENABLE_01_CYCLE_DELAY =2,
|
||||
DF_MGCG_ENABLE_15_CYCLE_DELAY =13,
|
||||
DF_MGCG_ENABLE_31_CYCLE_DELAY =14,
|
||||
DF_MGCG_ENABLE_63_CYCLE_DELAY =15
|
||||
};
|
||||
|
||||
#define mmMP0_MISC_CGTT_CTRL0 0x01b9
|
||||
#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
|
||||
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
|
||||
@ -313,6 +287,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
|
||||
};
|
||||
|
||||
static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
|
||||
@ -341,6 +316,8 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
|
||||
} else {
|
||||
if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
|
||||
return adev->gfx.config.gb_addr_config;
|
||||
else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
|
||||
return adev->gfx.config.db_debug2;
|
||||
return RREG32(reg_offset);
|
||||
}
|
||||
}
|
||||
@ -521,6 +498,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
||||
else
|
||||
adev->nbio_funcs = &nbio_v6_1_funcs;
|
||||
|
||||
adev->df_funcs = &df_v1_7_funcs;
|
||||
adev->nbio_funcs->detect_hw_virt(adev);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
@ -593,6 +571,12 @@ static void soc15_invalidate_hdp(struct amdgpu_device *adev,
|
||||
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
|
||||
}
|
||||
|
||||
static bool soc15_need_full_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
/* change this when we implement soft reset */
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct amdgpu_asic_funcs soc15_asic_funcs =
|
||||
{
|
||||
.read_disabled_bios = &soc15_read_disabled_bios,
|
||||
@ -606,6 +590,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
|
||||
.get_config_memsize = &soc15_get_config_memsize,
|
||||
.flush_hdp = &soc15_flush_hdp,
|
||||
.invalidate_hdp = &soc15_invalidate_hdp,
|
||||
.need_full_reset = &soc15_need_full_reset,
|
||||
};
|
||||
|
||||
static int soc15_common_early_init(void *handle)
|
||||
@ -697,6 +682,11 @@ static int soc15_common_early_init(void *handle)
|
||||
AMD_CG_SUPPORT_SDMA_LS;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_SDMA;
|
||||
|
||||
if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS;
|
||||
|
||||
adev->external_rev_id = 0x1;
|
||||
break;
|
||||
default:
|
||||
@ -871,32 +861,6 @@ static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *ade
|
||||
WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
|
||||
}
|
||||
|
||||
static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t data;
|
||||
|
||||
/* Put DF on broadcast mode */
|
||||
data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl));
|
||||
data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
|
||||
WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
|
||||
data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
|
||||
data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
|
||||
data |= DF_MGCG_ENABLE_15_CYCLE_DELAY;
|
||||
WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
|
||||
} else {
|
||||
data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
|
||||
data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
|
||||
data |= DF_MGCG_DISABLE;
|
||||
WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
|
||||
}
|
||||
|
||||
WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl),
|
||||
mmFabricConfigAccessControl_DEFAULT);
|
||||
}
|
||||
|
||||
static int soc15_common_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
@ -920,7 +884,7 @@ static int soc15_common_set_clockgating_state(void *handle,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
soc15_update_rom_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
soc15_update_df_medium_grain_clock_gating(adev,
|
||||
adev->df_funcs->update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
@ -973,10 +937,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
|
||||
if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
|
||||
*flags |= AMD_CG_SUPPORT_ROM_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_DF_MGCG */
|
||||
data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
|
||||
if (data & DF_MGCG_ENABLE_15_CYCLE_DELAY)
|
||||
*flags |= AMD_CG_SUPPORT_DF_MGCG;
|
||||
adev->df_funcs->get_clockgating_state(adev, flags);
|
||||
}
|
||||
|
||||
static int soc15_common_set_powergating_state(void *handle,
|
||||
|
@ -159,6 +159,7 @@
|
||||
#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
|
||||
#define EOP_TCL1_ACTION_EN (1 << 16)
|
||||
#define EOP_TC_ACTION_EN (1 << 17) /* L2 */
|
||||
#define EOP_TC_NC_ACTION_EN (1 << 19)
|
||||
#define EOP_TC_MD_ACTION_EN (1 << 21) /* L2 metadata */
|
||||
|
||||
#define DATA_SEL(x) ((x) << 29)
|
||||
|
@ -688,7 +688,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v4_2_stop(adev);
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
|
||||
if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
|
||||
CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
|
||||
WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
|
||||
@ -699,7 +699,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
|
||||
if (RREG32_SMC(ixCURRENT_PG_STATUS) &
|
||||
CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
|
||||
WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
|
||||
|
@ -62,7 +62,7 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
|
||||
static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
|
||||
{
|
||||
return ((adev->asic_type >= CHIP_POLARIS10) &&
|
||||
(adev->asic_type <= CHIP_POLARIS12) &&
|
||||
(adev->asic_type <= CHIP_VEGAM) &&
|
||||
(!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
|
||||
}
|
||||
|
||||
@ -429,7 +429,7 @@ static int uvd_v6_0_sw_init(void *handle)
|
||||
ring = &adev->uvd.ring_enc[0];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
rq, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
|
||||
return r;
|
||||
@ -963,6 +963,16 @@ static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
|
||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*/
|
||||
static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
{
|
||||
/* The firmware doesn't seem to like touching registers at this point. */
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_ring_test_ring - register write test
|
||||
*
|
||||
@ -1528,12 +1538,13 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
|
||||
.set_wptr = uvd_v6_0_ring_set_wptr,
|
||||
.parse_cs = amdgpu_uvd_ring_parse_cs,
|
||||
.emit_frame_size =
|
||||
6 + 6 + /* hdp flush / invalidate */
|
||||
6 + /* hdp invalidate */
|
||||
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
|
||||
14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
|
||||
.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
|
||||
.emit_ib = uvd_v6_0_ring_emit_ib,
|
||||
.emit_fence = uvd_v6_0_ring_emit_fence,
|
||||
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
|
||||
.test_ring = uvd_v6_0_ring_test_ring,
|
||||
.test_ib = amdgpu_uvd_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
@ -1552,7 +1563,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
|
||||
.get_wptr = uvd_v6_0_ring_get_wptr,
|
||||
.set_wptr = uvd_v6_0_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
6 + 6 + /* hdp flush / invalidate */
|
||||
6 + /* hdp invalidate */
|
||||
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
|
||||
VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
|
||||
14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
|
||||
@ -1561,6 +1572,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
|
||||
.emit_fence = uvd_v6_0_ring_emit_fence,
|
||||
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
|
||||
.emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
|
||||
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
|
||||
.test_ring = uvd_v6_0_ring_test_ring,
|
||||
.test_ib = amdgpu_uvd_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
|
@ -418,7 +418,7 @@ static int uvd_v7_0_sw_init(void *handle)
|
||||
ring = &adev->uvd.ring_enc[0];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
rq, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
|
||||
return r;
|
||||
@ -1135,6 +1135,16 @@ static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
|
||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*/
|
||||
static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
{
|
||||
/* The firmware doesn't seem to like touching registers at this point. */
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v7_0_ring_test_ring - register write test
|
||||
*
|
||||
@ -1654,7 +1664,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
|
||||
.get_wptr = uvd_v7_0_ring_get_wptr,
|
||||
.set_wptr = uvd_v7_0_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
6 + 6 + /* hdp flush / invalidate */
|
||||
6 + /* hdp invalidate */
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
|
||||
8 + /* uvd_v7_0_ring_emit_vm_flush */
|
||||
@ -1663,6 +1673,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
|
||||
.emit_ib = uvd_v7_0_ring_emit_ib,
|
||||
.emit_fence = uvd_v7_0_ring_emit_fence,
|
||||
.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
|
||||
.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
|
||||
.test_ring = uvd_v7_0_ring_test_ring,
|
||||
.test_ib = amdgpu_uvd_ring_test_ib,
|
||||
.insert_nop = uvd_v7_0_ring_insert_nop,
|
||||
@ -1671,6 +1682,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.emit_wreg = uvd_v7_0_ring_emit_wreg,
|
||||
.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
|
||||
@ -1702,6 +1714,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
|
||||
.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -388,7 +388,8 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
|
||||
default:
|
||||
if ((adev->asic_type == CHIP_POLARIS10) ||
|
||||
(adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS12))
|
||||
(adev->asic_type == CHIP_POLARIS12) ||
|
||||
(adev->asic_type == CHIP_VEGAM))
|
||||
return AMDGPU_VCE_HARVEST_VCE1;
|
||||
|
||||
return 0;
|
||||
@ -467,7 +468,7 @@ static int vce_v3_0_hw_init(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
vce_v3_0_override_vce_clock_gating(adev, true);
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
|
||||
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
|
||||
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
|
@ -1081,6 +1081,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
|
||||
.end_use = amdgpu_vce_ring_end_use,
|
||||
.emit_wreg = vce_v4_0_emit_wreg,
|
||||
.emit_reg_wait = vce_v4_0_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -1109,6 +1109,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
|
||||
.end_use = amdgpu_vcn_ring_end_use,
|
||||
.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
|
||||
.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
|
||||
@ -1139,6 +1140,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
|
||||
.end_use = amdgpu_vcn_ring_end_use,
|
||||
.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
|
||||
.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -305,9 +305,10 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
|
||||
stoney_mgcg_cgcg_init,
|
||||
ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -728,26 +729,51 @@ static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
|
||||
return r;
|
||||
|
||||
tmp = RREG32_SMC(cntl_reg);
|
||||
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
|
||||
else
|
||||
tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
|
||||
CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
|
||||
tmp |= dividers.post_divider;
|
||||
WREG32_SMC(cntl_reg, tmp);
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
|
||||
tmp = RREG32_SMC(status_reg);
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
if (tmp & 0x10000)
|
||||
break;
|
||||
} else {
|
||||
if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
|
||||
break;
|
||||
}
|
||||
mdelay(10);
|
||||
}
|
||||
if (i == 100)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ixGNB_CLK1_DFS_CNTL 0xD82200F0
|
||||
#define ixGNB_CLK1_STATUS 0xD822010C
|
||||
#define ixGNB_CLK2_DFS_CNTL 0xD8220110
|
||||
#define ixGNB_CLK2_STATUS 0xD822012C
|
||||
#define ixGNB_CLK3_DFS_CNTL 0xD8220130
|
||||
#define ixGNB_CLK3_STATUS 0xD822014C
|
||||
|
||||
static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
|
||||
if (r)
|
||||
return r;
|
||||
@ -755,6 +781,7 @@ static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
|
||||
r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -764,6 +791,22 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
|
||||
int r, i;
|
||||
struct atom_clock_dividers dividers;
|
||||
u32 tmp;
|
||||
u32 reg_ctrl;
|
||||
u32 reg_status;
|
||||
u32 status_mask;
|
||||
u32 reg_mask;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
reg_ctrl = ixGNB_CLK3_DFS_CNTL;
|
||||
reg_status = ixGNB_CLK3_STATUS;
|
||||
status_mask = 0x00010000;
|
||||
reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
|
||||
} else {
|
||||
reg_ctrl = ixCG_ECLK_CNTL;
|
||||
reg_status = ixCG_ECLK_STATUS;
|
||||
status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
|
||||
reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
|
||||
}
|
||||
|
||||
r = amdgpu_atombios_get_clock_dividers(adev,
|
||||
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
|
||||
@ -772,24 +815,25 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
|
||||
if (RREG32_SMC(reg_status) & status_mask)
|
||||
break;
|
||||
mdelay(10);
|
||||
}
|
||||
|
||||
if (i == 100)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
tmp = RREG32_SMC(ixCG_ECLK_CNTL);
|
||||
tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
|
||||
CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
|
||||
tmp = RREG32_SMC(reg_ctrl);
|
||||
tmp &= ~reg_mask;
|
||||
tmp |= dividers.post_divider;
|
||||
WREG32_SMC(ixCG_ECLK_CNTL, tmp);
|
||||
WREG32_SMC(reg_ctrl, tmp);
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
|
||||
if (RREG32_SMC(reg_status) & status_mask)
|
||||
break;
|
||||
mdelay(10);
|
||||
}
|
||||
|
||||
if (i == 100)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
@ -876,6 +920,27 @@ static void vi_invalidate_hdp(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
|
||||
static bool vi_need_full_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
/* CZ has hang issues with full reset at the moment */
|
||||
return false;
|
||||
case CHIP_FIJI:
|
||||
case CHIP_TONGA:
|
||||
/* XXX: soft reset should work on fiji and tonga */
|
||||
return true;
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_TOPAZ:
|
||||
default:
|
||||
/* change this when we support soft reset */
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct amdgpu_asic_funcs vi_asic_funcs =
|
||||
{
|
||||
.read_disabled_bios = &vi_read_disabled_bios,
|
||||
@ -889,6 +954,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
|
||||
.get_config_memsize = &vi_get_config_memsize,
|
||||
.flush_hdp = &vi_flush_hdp,
|
||||
.invalidate_hdp = &vi_invalidate_hdp,
|
||||
.need_full_reset = &vi_need_full_reset,
|
||||
};
|
||||
|
||||
#define CZ_REV_BRISTOL(rev) \
|
||||
@ -1031,6 +1097,30 @@ static int vi_common_early_init(void *handle)
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x64;
|
||||
break;
|
||||
case CHIP_VEGAM:
|
||||
adev->cg_flags = 0;
|
||||
/*AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_RLC_LS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_BIF_MGCG |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_ROM_MGCG |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_DRM_LS |
|
||||
AMD_CG_SUPPORT_UVD_MGCG |
|
||||
AMD_CG_SUPPORT_VCE_MGCG;*/
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x6E;
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
@ -1422,6 +1512,7 @@ static int vi_common_set_clockgating_state(void *handle,
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
vi_common_set_clockgating_state_by_smu(adev, state);
|
||||
default:
|
||||
break;
|
||||
@ -1551,9 +1642,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
|
||||
}
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
|
||||
|
@ -9,14 +9,6 @@ config DRM_AMD_DC
|
||||
support for AMDGPU. This adds required support for Vega and
|
||||
Raven ASICs.
|
||||
|
||||
config DRM_AMD_DC_PRE_VEGA
|
||||
bool "DC support for Polaris and older ASICs"
|
||||
default y
|
||||
help
|
||||
Choose this option to enable the new DC support for older asics
|
||||
by default. This includes Polaris, Carrizo, Tonga, Bonaire,
|
||||
and Hawaii.
|
||||
|
||||
config DRM_AMD_DC_FBC
|
||||
bool "AMD FBC - Enable Frame Buffer Compression"
|
||||
depends on DRM_AMD_DC
|
||||
@ -42,4 +34,10 @@ config DEBUG_KERNEL_DC
|
||||
if you want to hit
|
||||
kdgb_break in assert.
|
||||
|
||||
config DRM_AMD_DC_VEGAM
|
||||
bool "VEGAM support"
|
||||
depends on DRM_AMD_DC
|
||||
help
|
||||
Choose this option if you want to have
|
||||
VEGAM support for display engine
|
||||
endmenu
|
||||
|
@ -433,11 +433,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
|
||||
init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
|
||||
|
||||
if (amdgpu_dc_log)
|
||||
init_data.log_mask = DC_DEFAULT_LOG_MASK;
|
||||
else
|
||||
init_data.log_mask = DC_MIN_LOG_MASK;
|
||||
|
||||
/*
|
||||
* TODO debug why this doesn't work on Raven
|
||||
*/
|
||||
@ -649,18 +644,6 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
|
||||
static int dm_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = handle;
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
int ret = 0;
|
||||
|
||||
/* power on hardware */
|
||||
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
|
||||
|
||||
ret = amdgpu_dm_display_resume(adev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_dm_display_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_device *ddev = adev->ddev;
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
@ -671,10 +654,12 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *new_plane_state;
|
||||
struct dm_plane_state *dm_new_plane_state;
|
||||
|
||||
int ret = 0;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* power on hardware */
|
||||
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
|
||||
|
||||
/* program HPD filter */
|
||||
dc_resume(dm->dc);
|
||||
|
||||
@ -688,8 +673,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
|
||||
amdgpu_dm_irq_resume_early(adev);
|
||||
|
||||
/* Do detection*/
|
||||
list_for_each_entry(connector,
|
||||
&ddev->mode_config.connector_list, head) {
|
||||
list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
/*
|
||||
@ -711,7 +695,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
/* Force mode set in atomic comit */
|
||||
for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
|
||||
for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
|
||||
new_crtc_state->active_changed = true;
|
||||
|
||||
/*
|
||||
@ -719,7 +703,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
|
||||
* them here, since they were duplicated as part of the suspend
|
||||
* procedure.
|
||||
*/
|
||||
for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
|
||||
for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
|
||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
if (dm_new_crtc_state->stream) {
|
||||
WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
|
||||
@ -728,7 +712,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
|
||||
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
|
||||
dm_new_plane_state = to_dm_plane_state(new_plane_state);
|
||||
if (dm_new_plane_state->dc_state) {
|
||||
WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
|
||||
@ -737,9 +721,9 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
|
||||
ret = drm_atomic_helper_resume(ddev, dm->cached_state);
|
||||
|
||||
adev->dm.cached_state = NULL;
|
||||
dm->cached_state = NULL;
|
||||
|
||||
amdgpu_dm_irq_resume_late(adev);
|
||||
|
||||
@ -1529,6 +1513,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS12:
|
||||
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
|
||||
case CHIP_VEGAM:
|
||||
#endif
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
if (dce110_register_irq_handlers(dm->adev)) {
|
||||
@ -1549,7 +1536,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
|
||||
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -1657,7 +1644,6 @@ static ssize_t s3_debug_store(struct device *device,
|
||||
if (ret == 0) {
|
||||
if (s3_state) {
|
||||
dm_resume(adev);
|
||||
amdgpu_dm_display_resume(adev);
|
||||
drm_kms_helper_hotplug_event(adev->ddev);
|
||||
} else
|
||||
dm_suspend(adev);
|
||||
@ -1722,6 +1708,9 @@ static int dm_early_init(void *handle)
|
||||
adev->mode_info.plane_type = dm_plane_type_default;
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
|
||||
case CHIP_VEGAM:
|
||||
#endif
|
||||
adev->mode_info.num_crtc = 6;
|
||||
adev->mode_info.num_hpd = 6;
|
||||
adev->mode_info.num_dig = 6;
|
||||
@ -1743,7 +1732,7 @@ static int dm_early_init(void *handle)
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
|
||||
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1848,7 +1837,7 @@ static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
|
||||
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
|
||||
uint64_t *tiling_flags)
|
||||
{
|
||||
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
|
||||
int r = amdgpu_bo_reserve(rbo, false);
|
||||
|
||||
if (unlikely(r)) {
|
||||
@ -2017,7 +2006,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
|
||||
const struct amdgpu_framebuffer *amdgpu_fb =
|
||||
to_amdgpu_framebuffer(plane_state->fb);
|
||||
const struct drm_crtc *crtc = plane_state->crtc;
|
||||
struct dc_transfer_func *input_tf;
|
||||
int ret = 0;
|
||||
|
||||
if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
|
||||
@ -2031,13 +2019,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
input_tf = dc_create_transfer_func();
|
||||
|
||||
if (input_tf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
dc_plane_state->in_transfer_func = input_tf;
|
||||
|
||||
/*
|
||||
* Always set input transfer function, since plane state is refreshed
|
||||
* every time.
|
||||
@ -2206,7 +2187,6 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
|
||||
const struct drm_connector *connector)
|
||||
{
|
||||
struct dc_crtc_timing *timing_out = &stream->timing;
|
||||
struct dc_transfer_func *tf = dc_create_transfer_func();
|
||||
|
||||
memset(timing_out, 0, sizeof(struct dc_crtc_timing));
|
||||
|
||||
@ -2250,9 +2230,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
|
||||
|
||||
stream->output_color_space = get_output_color_space(timing_out);
|
||||
|
||||
tf->type = TF_TYPE_PREDEFINED;
|
||||
tf->tf = TRANSFER_FUNCTION_SRGB;
|
||||
stream->out_transfer_func = tf;
|
||||
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
|
||||
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
|
||||
}
|
||||
|
||||
static void fill_audio_info(struct audio_info *audio_info,
|
||||
@ -2488,6 +2467,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
|
||||
update_stream_signal(stream);
|
||||
|
||||
if (dm_state && dm_state->freesync_capable)
|
||||
stream->ignore_msa_timing_param = true;
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
@ -2710,19 +2692,16 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
|
||||
const struct dc_link *link = aconnector->dc_link;
|
||||
struct amdgpu_device *adev = connector->dev->dev_private;
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
|
||||
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
|
||||
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
|
||||
|
||||
if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
|
||||
link->type != dc_connection_none) {
|
||||
amdgpu_dm_register_backlight_device(dm);
|
||||
|
||||
if (dm->backlight_dev) {
|
||||
link->type != dc_connection_none &&
|
||||
dm->backlight_dev) {
|
||||
backlight_device_unregister(dm->backlight_dev);
|
||||
dm->backlight_dev = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
drm_connector_unregister(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
@ -2855,7 +2834,7 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
|
||||
create_eml_sink(aconnector);
|
||||
}
|
||||
|
||||
int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
|
||||
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
int result = MODE_ERROR;
|
||||
@ -3058,8 +3037,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
||||
}
|
||||
|
||||
afb = to_amdgpu_framebuffer(new_state->fb);
|
||||
|
||||
obj = afb->obj;
|
||||
obj = new_state->fb->obj[0];
|
||||
rbo = gem_to_amdgpu_bo(obj);
|
||||
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
|
||||
r = amdgpu_bo_reserve(rbo, false);
|
||||
@ -3067,12 +3045,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
||||
return r;
|
||||
|
||||
if (plane->type != DRM_PLANE_TYPE_CURSOR)
|
||||
domain = amdgpu_display_framebuffer_domains(adev);
|
||||
domain = amdgpu_display_supported_domains(adev);
|
||||
else
|
||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
|
||||
r = amdgpu_bo_pin(rbo, domain, &afb->address);
|
||||
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
|
||||
if (unlikely(r != 0)) {
|
||||
@ -3123,14 +3100,12 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
struct amdgpu_bo *rbo;
|
||||
struct amdgpu_framebuffer *afb;
|
||||
int r;
|
||||
|
||||
if (!old_state->fb)
|
||||
return;
|
||||
|
||||
afb = to_amdgpu_framebuffer(old_state->fb);
|
||||
rbo = gem_to_amdgpu_bo(afb->obj);
|
||||
rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(rbo, false);
|
||||
if (unlikely(r)) {
|
||||
DRM_ERROR("failed to reserve rbo before unpin\n");
|
||||
@ -3773,7 +3748,7 @@ static void remove_stream(struct amdgpu_device *adev,
|
||||
static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
struct dc_cursor_position *position)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
int x, y;
|
||||
int xorigin = 0, yorigin = 0;
|
||||
|
||||
@ -3905,7 +3880,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
|
||||
int r, vpos, hpos;
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
|
||||
struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
|
||||
struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
|
||||
struct dc_flip_addrs addr = { {0} };
|
||||
@ -3986,6 +3961,96 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
|
||||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO this whole function needs to go
|
||||
*
|
||||
* dc_surface_update is needlessly complex. See if we can just replace this
|
||||
* with a dc_plane_state and follow the atomic model a bit more closely here.
|
||||
*/
|
||||
static bool commit_planes_to_stream(
|
||||
struct dc *dc,
|
||||
struct dc_plane_state **plane_states,
|
||||
uint8_t new_plane_count,
|
||||
struct dm_crtc_state *dm_new_crtc_state,
|
||||
struct dm_crtc_state *dm_old_crtc_state,
|
||||
struct dc_state *state)
|
||||
{
|
||||
/* no need to dynamically allocate this. it's pretty small */
|
||||
struct dc_surface_update updates[MAX_SURFACES];
|
||||
struct dc_flip_addrs *flip_addr;
|
||||
struct dc_plane_info *plane_info;
|
||||
struct dc_scaling_info *scaling_info;
|
||||
int i;
|
||||
struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
|
||||
struct dc_stream_update *stream_update =
|
||||
kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
|
||||
|
||||
if (!stream_update) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return false;
|
||||
}
|
||||
|
||||
flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
|
||||
GFP_KERNEL);
|
||||
plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
|
||||
GFP_KERNEL);
|
||||
scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!flip_addr || !plane_info || !scaling_info) {
|
||||
kfree(flip_addr);
|
||||
kfree(plane_info);
|
||||
kfree(scaling_info);
|
||||
kfree(stream_update);
|
||||
return false;
|
||||
}
|
||||
|
||||
memset(updates, 0, sizeof(updates));
|
||||
|
||||
stream_update->src = dc_stream->src;
|
||||
stream_update->dst = dc_stream->dst;
|
||||
stream_update->out_transfer_func = dc_stream->out_transfer_func;
|
||||
|
||||
for (i = 0; i < new_plane_count; i++) {
|
||||
updates[i].surface = plane_states[i];
|
||||
updates[i].gamma =
|
||||
(struct dc_gamma *)plane_states[i]->gamma_correction;
|
||||
updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
|
||||
flip_addr[i].address = plane_states[i]->address;
|
||||
flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
|
||||
plane_info[i].color_space = plane_states[i]->color_space;
|
||||
plane_info[i].format = plane_states[i]->format;
|
||||
plane_info[i].plane_size = plane_states[i]->plane_size;
|
||||
plane_info[i].rotation = plane_states[i]->rotation;
|
||||
plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
|
||||
plane_info[i].stereo_format = plane_states[i]->stereo_format;
|
||||
plane_info[i].tiling_info = plane_states[i]->tiling_info;
|
||||
plane_info[i].visible = plane_states[i]->visible;
|
||||
plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
|
||||
plane_info[i].dcc = plane_states[i]->dcc;
|
||||
scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
|
||||
scaling_info[i].src_rect = plane_states[i]->src_rect;
|
||||
scaling_info[i].dst_rect = plane_states[i]->dst_rect;
|
||||
scaling_info[i].clip_rect = plane_states[i]->clip_rect;
|
||||
|
||||
updates[i].flip_addr = &flip_addr[i];
|
||||
updates[i].plane_info = &plane_info[i];
|
||||
updates[i].scaling_info = &scaling_info[i];
|
||||
}
|
||||
|
||||
dc_commit_updates_for_stream(
|
||||
dc,
|
||||
updates,
|
||||
new_plane_count,
|
||||
dc_stream, stream_update, plane_states, state);
|
||||
|
||||
kfree(flip_addr);
|
||||
kfree(plane_info);
|
||||
kfree(scaling_info);
|
||||
kfree(stream_update);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
struct drm_device *dev,
|
||||
struct amdgpu_display_manager *dm,
|
||||
@ -4001,6 +4066,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
struct drm_crtc_state *new_pcrtc_state =
|
||||
drm_atomic_get_new_crtc_state(state, pcrtc);
|
||||
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
|
||||
struct dm_crtc_state *dm_old_crtc_state =
|
||||
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
|
||||
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
|
||||
int planes_count = 0;
|
||||
unsigned long flags;
|
||||
@ -4037,7 +4104,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
}
|
||||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||
|
||||
if (!pflip_needed) {
|
||||
if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
|
||||
WARN_ON(!dm_new_plane_state->dc_state);
|
||||
|
||||
plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
|
||||
@ -4079,10 +4146,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
|
||||
}
|
||||
|
||||
if (false == dc_commit_planes_to_stream(dm->dc,
|
||||
|
||||
if (false == commit_planes_to_stream(dm->dc,
|
||||
plane_states_constructed,
|
||||
planes_count,
|
||||
dc_stream_attach,
|
||||
acrtc_state,
|
||||
dm_old_crtc_state,
|
||||
dm_state->context))
|
||||
dm_error("%s: Failed to attach plane!\n", __func__);
|
||||
} else {
|
||||
@ -4307,8 +4376,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
|
||||
struct dc_stream_status *status = NULL;
|
||||
|
||||
if (acrtc)
|
||||
if (acrtc) {
|
||||
new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
|
||||
old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
|
||||
}
|
||||
|
||||
/* Skip any modesets/resets */
|
||||
if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
|
||||
@ -4331,11 +4402,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
WARN_ON(!status->plane_count);
|
||||
|
||||
/*TODO How it works with MPO ?*/
|
||||
if (!dc_commit_planes_to_stream(
|
||||
if (!commit_planes_to_stream(
|
||||
dm->dc,
|
||||
status->plane_states,
|
||||
status->plane_count,
|
||||
dm_new_crtc_state->stream,
|
||||
dm_new_crtc_state,
|
||||
to_dm_crtc_state(old_crtc_state),
|
||||
dm_state->context))
|
||||
dm_error("%s: Failed to update stream scaling!\n", __func__);
|
||||
}
|
||||
@ -4755,7 +4827,8 @@ static int dm_update_planes_state(struct dc *dc,
|
||||
|
||||
/* Remove any changed/removed planes */
|
||||
if (!enable) {
|
||||
if (pflip_needed)
|
||||
if (pflip_needed &&
|
||||
plane->type != DRM_PLANE_TYPE_OVERLAY)
|
||||
continue;
|
||||
|
||||
if (!old_plane_crtc)
|
||||
@ -4802,7 +4875,8 @@ static int dm_update_planes_state(struct dc *dc,
|
||||
if (!dm_new_crtc_state->stream)
|
||||
continue;
|
||||
|
||||
if (pflip_needed)
|
||||
if (pflip_needed &&
|
||||
plane->type != DRM_PLANE_TYPE_OVERLAY)
|
||||
continue;
|
||||
|
||||
WARN_ON(dm_new_plane_state->dc_state);
|
||||
@ -5009,17 +5083,24 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
|
||||
struct edid *edid)
|
||||
{
|
||||
int i;
|
||||
uint64_t val_capable;
|
||||
bool edid_check_required;
|
||||
struct detailed_timing *timing;
|
||||
struct detailed_non_pixel *data;
|
||||
struct detailed_data_monitor_range *range;
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector =
|
||||
to_amdgpu_dm_connector(connector);
|
||||
struct dm_connector_state *dm_con_state;
|
||||
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
if (!connector->state) {
|
||||
DRM_ERROR("%s - Connector has no state", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
dm_con_state = to_dm_connector_state(connector->state);
|
||||
|
||||
edid_check_required = false;
|
||||
if (!amdgpu_dm_connector->dc_sink) {
|
||||
DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
|
||||
@ -5038,7 +5119,7 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
|
||||
amdgpu_dm_connector);
|
||||
}
|
||||
}
|
||||
val_capable = 0;
|
||||
dm_con_state->freesync_capable = false;
|
||||
if (edid_check_required == true && (edid->version > 1 ||
|
||||
(edid->version == 1 && edid->revision > 1))) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
@ -5074,7 +5155,7 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
|
||||
amdgpu_dm_connector->min_vfreq * 1000000;
|
||||
amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
|
||||
amdgpu_dm_connector->max_vfreq * 1000000;
|
||||
val_capable = 1;
|
||||
dm_con_state->freesync_capable = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,6 @@
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
#include "dc.h"
|
||||
|
||||
/*
|
||||
* This file contains the definition for amdgpu_display_manager
|
||||
@ -53,6 +52,7 @@
|
||||
struct amdgpu_device;
|
||||
struct drm_device;
|
||||
struct amdgpu_dm_irq_handler_data;
|
||||
struct dc;
|
||||
|
||||
struct amdgpu_dm_prev_state {
|
||||
struct drm_framebuffer *fb;
|
||||
@ -220,6 +220,7 @@ struct dm_connector_state {
|
||||
uint8_t underscan_hborder;
|
||||
bool underscan_enable;
|
||||
struct mod_freesync_user_enable user_enable;
|
||||
bool freesync_capable;
|
||||
};
|
||||
|
||||
#define to_dm_connector_state(x)\
|
||||
@ -246,7 +247,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
||||
struct dc_link *link,
|
||||
int link_index);
|
||||
|
||||
int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
|
||||
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode);
|
||||
|
||||
void dm_restore_drm_connector_state(struct drm_device *dev,
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
#include "amdgpu_mode.h"
|
||||
#include "amdgpu_dm.h"
|
||||
#include "dc.h"
|
||||
#include "modules/color/color_gamma.h"
|
||||
|
||||
#define MAX_DRM_LUT_VALUE 0xFFFF
|
||||
|
@ -83,21 +83,22 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
|
||||
I2C_MOT_TRUE : I2C_MOT_FALSE;
|
||||
enum ddc_result res;
|
||||
ssize_t read_bytes;
|
||||
uint32_t read_bytes = msg->size;
|
||||
|
||||
if (WARN_ON(msg->size > 16))
|
||||
return -E2BIG;
|
||||
|
||||
switch (msg->request & ~DP_AUX_I2C_MOT) {
|
||||
case DP_AUX_NATIVE_READ:
|
||||
read_bytes = dal_ddc_service_read_dpcd_data(
|
||||
res = dal_ddc_service_read_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
false,
|
||||
I2C_MOT_UNDEF,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size);
|
||||
return read_bytes;
|
||||
msg->size,
|
||||
&read_bytes);
|
||||
break;
|
||||
case DP_AUX_NATIVE_WRITE:
|
||||
res = dal_ddc_service_write_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
@ -108,14 +109,15 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
msg->size);
|
||||
break;
|
||||
case DP_AUX_I2C_READ:
|
||||
read_bytes = dal_ddc_service_read_dpcd_data(
|
||||
res = dal_ddc_service_read_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
true,
|
||||
mot,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size);
|
||||
return read_bytes;
|
||||
msg->size,
|
||||
&read_bytes);
|
||||
break;
|
||||
case DP_AUX_I2C_WRITE:
|
||||
res = dal_ddc_service_write_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
@ -137,7 +139,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
r == DDC_RESULT_SUCESSFULL);
|
||||
#endif
|
||||
|
||||
return msg->size;
|
||||
if (res != DDC_RESULT_SUCESSFULL)
|
||||
return -EIO;
|
||||
return read_bytes;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
|
@ -37,8 +37,17 @@
|
||||
|
||||
unsigned long long dm_get_timestamp(struct dc_context *ctx)
|
||||
{
|
||||
/* TODO: return actual timestamp */
|
||||
return 0;
|
||||
struct timespec64 time;
|
||||
|
||||
getrawmonotonic64(&time);
|
||||
return timespec64_to_ns(&time);
|
||||
}
|
||||
|
||||
unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
|
||||
unsigned long long current_time_stamp,
|
||||
unsigned long long last_time_stamp)
|
||||
{
|
||||
return current_time_stamp - last_time_stamp;
|
||||
}
|
||||
|
||||
void dm_perf_trace_timestamp(const char *func_name, unsigned int line)
|
||||
|
@ -26,13 +26,13 @@
|
||||
#include "dm_services.h"
|
||||
#include "include/fixed31_32.h"
|
||||
|
||||
static inline uint64_t abs_i64(
|
||||
int64_t arg)
|
||||
static inline unsigned long long abs_i64(
|
||||
long long arg)
|
||||
{
|
||||
if (arg > 0)
|
||||
return (uint64_t)arg;
|
||||
return (unsigned long long)arg;
|
||||
else
|
||||
return (uint64_t)(-arg);
|
||||
return (unsigned long long)(-arg);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -40,12 +40,12 @@ static inline uint64_t abs_i64(
|
||||
* result = dividend / divisor
|
||||
* *remainder = dividend % divisor
|
||||
*/
|
||||
static inline uint64_t complete_integer_division_u64(
|
||||
uint64_t dividend,
|
||||
uint64_t divisor,
|
||||
uint64_t *remainder)
|
||||
static inline unsigned long long complete_integer_division_u64(
|
||||
unsigned long long dividend,
|
||||
unsigned long long divisor,
|
||||
unsigned long long *remainder)
|
||||
{
|
||||
uint64_t result;
|
||||
unsigned long long result;
|
||||
|
||||
ASSERT(divisor);
|
||||
|
||||
@ -65,29 +65,29 @@ static inline uint64_t complete_integer_division_u64(
|
||||
(FRACTIONAL_PART_MASK & (x))
|
||||
|
||||
struct fixed31_32 dal_fixed31_32_from_fraction(
|
||||
int64_t numerator,
|
||||
int64_t denominator)
|
||||
long long numerator,
|
||||
long long denominator)
|
||||
{
|
||||
struct fixed31_32 res;
|
||||
|
||||
bool arg1_negative = numerator < 0;
|
||||
bool arg2_negative = denominator < 0;
|
||||
|
||||
uint64_t arg1_value = arg1_negative ? -numerator : numerator;
|
||||
uint64_t arg2_value = arg2_negative ? -denominator : denominator;
|
||||
unsigned long long arg1_value = arg1_negative ? -numerator : numerator;
|
||||
unsigned long long arg2_value = arg2_negative ? -denominator : denominator;
|
||||
|
||||
uint64_t remainder;
|
||||
unsigned long long remainder;
|
||||
|
||||
/* determine integer part */
|
||||
|
||||
uint64_t res_value = complete_integer_division_u64(
|
||||
unsigned long long res_value = complete_integer_division_u64(
|
||||
arg1_value, arg2_value, &remainder);
|
||||
|
||||
ASSERT(res_value <= LONG_MAX);
|
||||
|
||||
/* determine fractional part */
|
||||
{
|
||||
uint32_t i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
|
||||
unsigned int i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
|
||||
|
||||
do {
|
||||
remainder <<= 1;
|
||||
@ -103,14 +103,14 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
|
||||
|
||||
/* round up LSB */
|
||||
{
|
||||
uint64_t summand = (remainder << 1) >= arg2_value;
|
||||
unsigned long long summand = (remainder << 1) >= arg2_value;
|
||||
|
||||
ASSERT(res_value <= LLONG_MAX - summand);
|
||||
|
||||
res_value += summand;
|
||||
}
|
||||
|
||||
res.value = (int64_t)res_value;
|
||||
res.value = (long long)res_value;
|
||||
|
||||
if (arg1_negative ^ arg2_negative)
|
||||
res.value = -res.value;
|
||||
@ -119,7 +119,7 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
|
||||
}
|
||||
|
||||
struct fixed31_32 dal_fixed31_32_from_int_nonconst(
|
||||
int64_t arg)
|
||||
long long arg)
|
||||
{
|
||||
struct fixed31_32 res;
|
||||
|
||||
@ -132,7 +132,7 @@ struct fixed31_32 dal_fixed31_32_from_int_nonconst(
|
||||
|
||||
struct fixed31_32 dal_fixed31_32_shl(
|
||||
struct fixed31_32 arg,
|
||||
uint8_t shift)
|
||||
unsigned char shift)
|
||||
{
|
||||
struct fixed31_32 res;
|
||||
|
||||
@ -181,16 +181,16 @@ struct fixed31_32 dal_fixed31_32_mul(
|
||||
bool arg1_negative = arg1.value < 0;
|
||||
bool arg2_negative = arg2.value < 0;
|
||||
|
||||
uint64_t arg1_value = arg1_negative ? -arg1.value : arg1.value;
|
||||
uint64_t arg2_value = arg2_negative ? -arg2.value : arg2.value;
|
||||
unsigned long long arg1_value = arg1_negative ? -arg1.value : arg1.value;
|
||||
unsigned long long arg2_value = arg2_negative ? -arg2.value : arg2.value;
|
||||
|
||||
uint64_t arg1_int = GET_INTEGER_PART(arg1_value);
|
||||
uint64_t arg2_int = GET_INTEGER_PART(arg2_value);
|
||||
unsigned long long arg1_int = GET_INTEGER_PART(arg1_value);
|
||||
unsigned long long arg2_int = GET_INTEGER_PART(arg2_value);
|
||||
|
||||
uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
|
||||
uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
|
||||
unsigned long long arg1_fra = GET_FRACTIONAL_PART(arg1_value);
|
||||
unsigned long long arg2_fra = GET_FRACTIONAL_PART(arg2_value);
|
||||
|
||||
uint64_t tmp;
|
||||
unsigned long long tmp;
|
||||
|
||||
res.value = arg1_int * arg2_int;
|
||||
|
||||
@ -200,22 +200,22 @@ struct fixed31_32 dal_fixed31_32_mul(
|
||||
|
||||
tmp = arg1_int * arg2_fra;
|
||||
|
||||
ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
|
||||
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
|
||||
|
||||
res.value += tmp;
|
||||
|
||||
tmp = arg2_int * arg1_fra;
|
||||
|
||||
ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
|
||||
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
|
||||
|
||||
res.value += tmp;
|
||||
|
||||
tmp = arg1_fra * arg2_fra;
|
||||
|
||||
tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
|
||||
(tmp >= (uint64_t)dal_fixed31_32_half.value);
|
||||
(tmp >= (unsigned long long)dal_fixed31_32_half.value);
|
||||
|
||||
ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
|
||||
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
|
||||
|
||||
res.value += tmp;
|
||||
|
||||
@ -230,13 +230,13 @@ struct fixed31_32 dal_fixed31_32_sqr(
|
||||
{
|
||||
struct fixed31_32 res;
|
||||
|
||||
uint64_t arg_value = abs_i64(arg.value);
|
||||
unsigned long long arg_value = abs_i64(arg.value);
|
||||
|
||||
uint64_t arg_int = GET_INTEGER_PART(arg_value);
|
||||
unsigned long long arg_int = GET_INTEGER_PART(arg_value);
|
||||
|
||||
uint64_t arg_fra = GET_FRACTIONAL_PART(arg_value);
|
||||
unsigned long long arg_fra = GET_FRACTIONAL_PART(arg_value);
|
||||
|
||||
uint64_t tmp;
|
||||
unsigned long long tmp;
|
||||
|
||||
res.value = arg_int * arg_int;
|
||||
|
||||
@ -246,20 +246,20 @@ struct fixed31_32 dal_fixed31_32_sqr(
|
||||
|
||||
tmp = arg_int * arg_fra;
|
||||
|
||||
ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
|
||||
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
|
||||
|
||||
res.value += tmp;
|
||||
|
||||
ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
|
||||
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
|
||||
|
||||
res.value += tmp;
|
||||
|
||||
tmp = arg_fra * arg_fra;
|
||||
|
||||
tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
|
||||
(tmp >= (uint64_t)dal_fixed31_32_half.value);
|
||||
(tmp >= (unsigned long long)dal_fixed31_32_half.value);
|
||||
|
||||
ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
|
||||
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
|
||||
|
||||
res.value += tmp;
|
||||
|
||||
@ -288,7 +288,7 @@ struct fixed31_32 dal_fixed31_32_sinc(
|
||||
|
||||
struct fixed31_32 res = dal_fixed31_32_one;
|
||||
|
||||
int32_t n = 27;
|
||||
int n = 27;
|
||||
|
||||
struct fixed31_32 arg_norm = arg;
|
||||
|
||||
@ -299,7 +299,7 @@ struct fixed31_32 dal_fixed31_32_sinc(
|
||||
arg_norm,
|
||||
dal_fixed31_32_mul_int(
|
||||
dal_fixed31_32_two_pi,
|
||||
(int32_t)div64_s64(
|
||||
(int)div64_s64(
|
||||
arg_norm.value,
|
||||
dal_fixed31_32_two_pi.value)));
|
||||
}
|
||||
@ -343,7 +343,7 @@ struct fixed31_32 dal_fixed31_32_cos(
|
||||
|
||||
struct fixed31_32 res = dal_fixed31_32_one;
|
||||
|
||||
int32_t n = 26;
|
||||
int n = 26;
|
||||
|
||||
do {
|
||||
res = dal_fixed31_32_sub(
|
||||
@ -370,7 +370,7 @@ struct fixed31_32 dal_fixed31_32_cos(
|
||||
static struct fixed31_32 fixed31_32_exp_from_taylor_series(
|
||||
struct fixed31_32 arg)
|
||||
{
|
||||
uint32_t n = 9;
|
||||
unsigned int n = 9;
|
||||
|
||||
struct fixed31_32 res = dal_fixed31_32_from_fraction(
|
||||
n + 2,
|
||||
@ -409,7 +409,7 @@ struct fixed31_32 dal_fixed31_32_exp(
|
||||
if (dal_fixed31_32_le(
|
||||
dal_fixed31_32_ln2_div_2,
|
||||
dal_fixed31_32_abs(arg))) {
|
||||
int32_t m = dal_fixed31_32_round(
|
||||
int m = dal_fixed31_32_round(
|
||||
dal_fixed31_32_div(
|
||||
arg,
|
||||
dal_fixed31_32_ln2));
|
||||
@ -429,7 +429,7 @@ struct fixed31_32 dal_fixed31_32_exp(
|
||||
if (m > 0)
|
||||
return dal_fixed31_32_shl(
|
||||
fixed31_32_exp_from_taylor_series(r),
|
||||
(uint8_t)m);
|
||||
(unsigned char)m);
|
||||
else
|
||||
return dal_fixed31_32_div_int(
|
||||
fixed31_32_exp_from_taylor_series(r),
|
||||
@ -482,50 +482,50 @@ struct fixed31_32 dal_fixed31_32_pow(
|
||||
arg2));
|
||||
}
|
||||
|
||||
int32_t dal_fixed31_32_floor(
|
||||
int dal_fixed31_32_floor(
|
||||
struct fixed31_32 arg)
|
||||
{
|
||||
uint64_t arg_value = abs_i64(arg.value);
|
||||
unsigned long long arg_value = abs_i64(arg.value);
|
||||
|
||||
if (arg.value >= 0)
|
||||
return (int32_t)GET_INTEGER_PART(arg_value);
|
||||
return (int)GET_INTEGER_PART(arg_value);
|
||||
else
|
||||
return -(int32_t)GET_INTEGER_PART(arg_value);
|
||||
return -(int)GET_INTEGER_PART(arg_value);
|
||||
}
|
||||
|
||||
int32_t dal_fixed31_32_round(
|
||||
int dal_fixed31_32_round(
|
||||
struct fixed31_32 arg)
|
||||
{
|
||||
uint64_t arg_value = abs_i64(arg.value);
|
||||
unsigned long long arg_value = abs_i64(arg.value);
|
||||
|
||||
const int64_t summand = dal_fixed31_32_half.value;
|
||||
const long long summand = dal_fixed31_32_half.value;
|
||||
|
||||
ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
|
||||
ASSERT(LLONG_MAX - (long long)arg_value >= summand);
|
||||
|
||||
arg_value += summand;
|
||||
|
||||
if (arg.value >= 0)
|
||||
return (int32_t)GET_INTEGER_PART(arg_value);
|
||||
return (int)GET_INTEGER_PART(arg_value);
|
||||
else
|
||||
return -(int32_t)GET_INTEGER_PART(arg_value);
|
||||
return -(int)GET_INTEGER_PART(arg_value);
|
||||
}
|
||||
|
||||
int32_t dal_fixed31_32_ceil(
|
||||
int dal_fixed31_32_ceil(
|
||||
struct fixed31_32 arg)
|
||||
{
|
||||
uint64_t arg_value = abs_i64(arg.value);
|
||||
unsigned long long arg_value = abs_i64(arg.value);
|
||||
|
||||
const int64_t summand = dal_fixed31_32_one.value -
|
||||
const long long summand = dal_fixed31_32_one.value -
|
||||
dal_fixed31_32_epsilon.value;
|
||||
|
||||
ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
|
||||
ASSERT(LLONG_MAX - (long long)arg_value >= summand);
|
||||
|
||||
arg_value += summand;
|
||||
|
||||
if (arg.value >= 0)
|
||||
return (int32_t)GET_INTEGER_PART(arg_value);
|
||||
return (int)GET_INTEGER_PART(arg_value);
|
||||
else
|
||||
return -(int32_t)GET_INTEGER_PART(arg_value);
|
||||
return -(int)GET_INTEGER_PART(arg_value);
|
||||
}
|
||||
|
||||
/* this function is a generic helper to translate fixed point value to
|
||||
@ -535,15 +535,15 @@ int32_t dal_fixed31_32_ceil(
|
||||
* part in 32 bits. It is used in hw programming (scaler)
|
||||
*/
|
||||
|
||||
static inline uint32_t ux_dy(
|
||||
int64_t value,
|
||||
uint32_t integer_bits,
|
||||
uint32_t fractional_bits)
|
||||
static inline unsigned int ux_dy(
|
||||
long long value,
|
||||
unsigned int integer_bits,
|
||||
unsigned int fractional_bits)
|
||||
{
|
||||
/* 1. create mask of integer part */
|
||||
uint32_t result = (1 << integer_bits) - 1;
|
||||
unsigned int result = (1 << integer_bits) - 1;
|
||||
/* 2. mask out fractional part */
|
||||
uint32_t fractional_part = FRACTIONAL_PART_MASK & value;
|
||||
unsigned int fractional_part = FRACTIONAL_PART_MASK & value;
|
||||
/* 3. shrink fixed point integer part to be of integer_bits width*/
|
||||
result &= GET_INTEGER_PART(value);
|
||||
/* 4. make space for fractional part to be filled in after integer */
|
||||
@ -554,13 +554,13 @@ static inline uint32_t ux_dy(
|
||||
return result | fractional_part;
|
||||
}
|
||||
|
||||
static inline uint32_t clamp_ux_dy(
|
||||
int64_t value,
|
||||
uint32_t integer_bits,
|
||||
uint32_t fractional_bits,
|
||||
uint32_t min_clamp)
|
||||
static inline unsigned int clamp_ux_dy(
|
||||
long long value,
|
||||
unsigned int integer_bits,
|
||||
unsigned int fractional_bits,
|
||||
unsigned int min_clamp)
|
||||
{
|
||||
uint32_t truncated_val = ux_dy(value, integer_bits, fractional_bits);
|
||||
unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits);
|
||||
|
||||
if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART)))
|
||||
return (1 << (integer_bits + fractional_bits)) - 1;
|
||||
@ -570,35 +570,35 @@ static inline uint32_t clamp_ux_dy(
|
||||
return min_clamp;
|
||||
}
|
||||
|
||||
uint32_t dal_fixed31_32_u2d19(
|
||||
unsigned int dal_fixed31_32_u2d19(
|
||||
struct fixed31_32 arg)
|
||||
{
|
||||
return ux_dy(arg.value, 2, 19);
|
||||
}
|
||||
|
||||
uint32_t dal_fixed31_32_u0d19(
|
||||
unsigned int dal_fixed31_32_u0d19(
|
||||
struct fixed31_32 arg)
|
||||
{
|
||||
return ux_dy(arg.value, 0, 19);
|
||||
}
|
||||
|
||||
uint32_t dal_fixed31_32_clamp_u0d14(
|
||||
unsigned int dal_fixed31_32_clamp_u0d14(
|
||||
struct fixed31_32 arg)
|
||||
{
|
||||
return clamp_ux_dy(arg.value, 0, 14, 1);
|
||||
}
|
||||
|
||||
uint32_t dal_fixed31_32_clamp_u0d10(
|
||||
unsigned int dal_fixed31_32_clamp_u0d10(
|
||||
struct fixed31_32 arg)
|
||||
{
|
||||
return clamp_ux_dy(arg.value, 0, 10, 1);
|
||||
}
|
||||
|
||||
int32_t dal_fixed31_32_s4d19(
|
||||
int dal_fixed31_32_s4d19(
|
||||
struct fixed31_32 arg)
|
||||
{
|
||||
if (arg.value < 0)
|
||||
return -(int32_t)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19);
|
||||
return -(int)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19);
|
||||
else
|
||||
return ux_dy(arg.value, 4, 19);
|
||||
}
|
||||
|
@ -70,6 +70,10 @@ static enum bp_result get_firmware_info_v3_1(
|
||||
struct bios_parser *bp,
|
||||
struct dc_firmware_info *info);
|
||||
|
||||
static enum bp_result get_firmware_info_v3_2(
|
||||
struct bios_parser *bp,
|
||||
struct dc_firmware_info *info);
|
||||
|
||||
static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
|
||||
struct atom_display_object_path_v2 *object);
|
||||
|
||||
@ -1321,9 +1325,11 @@ static enum bp_result bios_parser_get_firmware_info(
|
||||
case 3:
|
||||
switch (revision.minor) {
|
||||
case 1:
|
||||
case 2:
|
||||
result = get_firmware_info_v3_1(bp, info);
|
||||
break;
|
||||
case 2:
|
||||
result = get_firmware_info_v3_2(bp, info);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -1383,6 +1389,84 @@ static enum bp_result get_firmware_info_v3_1(
|
||||
return BP_RESULT_OK;
|
||||
}
|
||||
|
||||
static enum bp_result get_firmware_info_v3_2(
|
||||
struct bios_parser *bp,
|
||||
struct dc_firmware_info *info)
|
||||
{
|
||||
struct atom_firmware_info_v3_2 *firmware_info;
|
||||
struct atom_display_controller_info_v4_1 *dce_info = NULL;
|
||||
struct atom_common_table_header *header;
|
||||
struct atom_data_revision revision;
|
||||
struct atom_smu_info_v3_2 *smu_info_v3_2 = NULL;
|
||||
struct atom_smu_info_v3_3 *smu_info_v3_3 = NULL;
|
||||
|
||||
if (!info)
|
||||
return BP_RESULT_BADINPUT;
|
||||
|
||||
firmware_info = GET_IMAGE(struct atom_firmware_info_v3_2,
|
||||
DATA_TABLES(firmwareinfo));
|
||||
|
||||
dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
|
||||
DATA_TABLES(dce_info));
|
||||
|
||||
if (!firmware_info || !dce_info)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
memset(info, 0, sizeof(*info));
|
||||
|
||||
header = GET_IMAGE(struct atom_common_table_header,
|
||||
DATA_TABLES(smu_info));
|
||||
get_atom_data_table_revision(header, &revision);
|
||||
|
||||
if (revision.minor == 2) {
|
||||
/* Vega12 */
|
||||
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
|
||||
DATA_TABLES(smu_info));
|
||||
|
||||
if (!smu_info_v3_2)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
|
||||
} else if (revision.minor == 3) {
|
||||
/* Vega20 */
|
||||
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
|
||||
DATA_TABLES(smu_info));
|
||||
|
||||
if (!smu_info_v3_3)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
|
||||
}
|
||||
|
||||
// We need to convert from 10KHz units into KHz units.
|
||||
info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
|
||||
|
||||
/* 27MHz for Vega10 & Vega12; 100MHz for Vega20 */
|
||||
info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10;
|
||||
/* Hardcode frequency if BIOS gives no DCE Ref Clk */
|
||||
if (info->pll_info.crystal_frequency == 0) {
|
||||
if (revision.minor == 2)
|
||||
info->pll_info.crystal_frequency = 27000;
|
||||
else if (revision.minor == 3)
|
||||
info->pll_info.crystal_frequency = 100000;
|
||||
}
|
||||
/*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/
|
||||
info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10;
|
||||
info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10;
|
||||
|
||||
/* Get GPU PLL VCO Clock */
|
||||
if (bp->cmd_tbl.get_smu_clock_info != NULL) {
|
||||
if (revision.minor == 2)
|
||||
info->smu_gpu_pll_output_freq =
|
||||
bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
|
||||
else if (revision.minor == 3)
|
||||
info->smu_gpu_pll_output_freq =
|
||||
bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10;
|
||||
}
|
||||
|
||||
return BP_RESULT_OK;
|
||||
}
|
||||
|
||||
static enum bp_result bios_parser_get_encoder_cap_info(
|
||||
struct dc_bios *dcb,
|
||||
struct graphics_object_id object_id,
|
||||
|
@ -24,7 +24,7 @@
|
||||
*/
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "atom.h"
|
||||
|
||||
#include "include/bios_parser_interface.h"
|
||||
@ -35,16 +35,16 @@
|
||||
#include "bios_parser_types_internal.h"
|
||||
|
||||
#define EXEC_BIOS_CMD_TABLE(command, params)\
|
||||
(cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
|
||||
(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
|
||||
GetIndexIntoMasterTable(COMMAND, command), \
|
||||
¶ms) == 0)
|
||||
(uint32_t *)¶ms) == 0)
|
||||
|
||||
#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
|
||||
cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
|
||||
amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
|
||||
GetIndexIntoMasterTable(COMMAND, command), &frev, &crev)
|
||||
|
||||
#define BIOS_CMD_TABLE_PARA_REVISION(command)\
|
||||
bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
|
||||
bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
|
||||
GetIndexIntoMasterTable(COMMAND, command))
|
||||
|
||||
static void init_dig_encoder_control(struct bios_parser *bp);
|
||||
@ -82,16 +82,18 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
|
||||
init_set_dce_clock(bp);
|
||||
}
|
||||
|
||||
static uint32_t bios_cmd_table_para_revision(void *cgs_device,
|
||||
static uint32_t bios_cmd_table_para_revision(void *dev,
|
||||
uint32_t index)
|
||||
{
|
||||
struct amdgpu_device *adev = dev;
|
||||
uint8_t frev, crev;
|
||||
|
||||
if (cgs_atom_get_cmd_table_revs(cgs_device,
|
||||
if (amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context,
|
||||
index,
|
||||
&frev, &crev) != 0)
|
||||
return 0;
|
||||
&frev, &crev))
|
||||
return crev;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
@ -368,7 +370,7 @@ static void init_transmitter_control(struct bios_parser *bp)
|
||||
uint8_t crev;
|
||||
|
||||
if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
|
||||
frev, crev) != 0)
|
||||
frev, crev) == false)
|
||||
BREAK_TO_DEBUGGER();
|
||||
switch (crev) {
|
||||
case 2:
|
||||
|
@ -26,14 +26,18 @@
|
||||
#include "dm_services.h"
|
||||
|
||||
#include "ObjectID.h"
|
||||
#include "atomfirmware.h"
|
||||
|
||||
#include "atomfirmware.h"
|
||||
#include "atom.h"
|
||||
#include "include/bios_parser_interface.h"
|
||||
|
||||
#include "command_table2.h"
|
||||
#include "command_table_helper2.h"
|
||||
#include "bios_parser_helper.h"
|
||||
#include "bios_parser_types_internal2.h"
|
||||
#include "amdgpu.h"
|
||||
|
||||
|
||||
#define DC_LOGGER \
|
||||
bp->base.ctx->logger
|
||||
|
||||
@ -43,16 +47,16 @@
|
||||
->FieldName)-(char *)0)/sizeof(uint16_t))
|
||||
|
||||
#define EXEC_BIOS_CMD_TABLE(fname, params)\
|
||||
(cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
|
||||
(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
|
||||
GET_INDEX_INTO_MASTER_TABLE(command, fname), \
|
||||
¶ms) == 0)
|
||||
(uint32_t *)¶ms) == 0)
|
||||
|
||||
#define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\
|
||||
cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
|
||||
amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
|
||||
GET_INDEX_INTO_MASTER_TABLE(command, fname), &frev, &crev)
|
||||
|
||||
#define BIOS_CMD_TABLE_PARA_REVISION(fname)\
|
||||
bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
|
||||
bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
|
||||
GET_INDEX_INTO_MASTER_TABLE(command, fname))
|
||||
|
||||
static void init_dig_encoder_control(struct bios_parser *bp);
|
||||
@ -86,16 +90,18 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
|
||||
init_get_smu_clock_info(bp);
|
||||
}
|
||||
|
||||
static uint32_t bios_cmd_table_para_revision(void *cgs_device,
|
||||
static uint32_t bios_cmd_table_para_revision(void *dev,
|
||||
uint32_t index)
|
||||
{
|
||||
struct amdgpu_device *adev = dev;
|
||||
uint8_t frev, crev;
|
||||
|
||||
if (cgs_atom_get_cmd_table_revs(cgs_device,
|
||||
if (amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context,
|
||||
index,
|
||||
&frev, &crev) != 0)
|
||||
return 0;
|
||||
&frev, &crev))
|
||||
return crev;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
@ -201,7 +207,7 @@ static void init_transmitter_control(struct bios_parser *bp)
|
||||
uint8_t frev;
|
||||
uint8_t crev;
|
||||
|
||||
if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) != 0)
|
||||
if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) == false)
|
||||
BREAK_TO_DEBUGGER();
|
||||
switch (crev) {
|
||||
case 6:
|
||||
|
@ -51,6 +51,9 @@ bool dal_bios_parser_init_cmd_tbl_helper(
|
||||
return true;
|
||||
|
||||
case DCE_VERSION_11_2:
|
||||
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
|
||||
case DCE_VERSION_11_22:
|
||||
#endif
|
||||
*h = dal_cmd_tbl_helper_dce112_get_table();
|
||||
return true;
|
||||
|
||||
|
@ -52,6 +52,9 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
|
||||
return true;
|
||||
|
||||
case DCE_VERSION_11_2:
|
||||
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
|
||||
case DCE_VERSION_11_22:
|
||||
#endif
|
||||
*h = dal_cmd_tbl_helper_dce112_get_table2();
|
||||
return true;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
|
579
drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
Normal file
579
drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
Normal file
@ -0,0 +1,579 @@
|
||||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _CALCS_CALCS_LOGGER_H_
|
||||
#define _CALCS_CALCS_LOGGER_H_
|
||||
#define DC_LOGGER \
|
||||
logger
|
||||
|
||||
static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calcs_dceip *dceip)
|
||||
{
|
||||
|
||||
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
|
||||
DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_dceip");
|
||||
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
|
||||
DC_LOG_BANDWIDTH_CALCS(" [enum] bw_calcs_version version %d", dceip->version);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] large_cursor: %d", dceip->large_cursor);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] dmif_pipe_en_fbc_chunk_tracker: %d", dceip->dmif_pipe_en_fbc_chunk_tracker);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] display_write_back_supported: %d", dceip->display_write_back_supported);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] argb_compression_support: %d", dceip->argb_compression_support);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] pre_downscaler_enabled: %d", dceip->pre_downscaler_enabled);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] underlay_downscale_prefetch_enabled: %d",
|
||||
dceip->underlay_downscale_prefetch_enabled);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] graphics_lb_nodownscaling_multi_line_prefetching: %d",
|
||||
dceip->graphics_lb_nodownscaling_multi_line_prefetching);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] limit_excessive_outstanding_dmif_requests: %d",
|
||||
dceip->limit_excessive_outstanding_dmif_requests);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_max_outstanding_group_num: %d",
|
||||
dceip->cursor_max_outstanding_group_num);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lines_interleaved_into_lb: %d", dceip->lines_interleaved_into_lb);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] low_power_tiling_mode: %d", dceip->low_power_tiling_mode);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_width: %d", dceip->chunk_width);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_graphics_pipes: %d", dceip->number_of_graphics_pipes);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_pipes: %d", dceip->number_of_underlay_pipes);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_dmif_buffer_allocated: %d", dceip->max_dmif_buffer_allocated);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_dmif_size: %d", dceip->graphics_dmif_size);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_luma_dmif_size: %d", dceip->underlay_luma_dmif_size);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_chroma_dmif_size: %d", dceip->underlay_chroma_dmif_size);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_lines_of_pte_prefetching_in_linear_mode: %d",
|
||||
dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_luma_mcifwr_buffer_size: %d",
|
||||
dceip->display_write_back420_luma_mcifwr_buffer_size);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_chroma_mcifwr_buffer_size: %d",
|
||||
dceip->display_write_back420_chroma_mcifwr_buffer_size);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_pte_request_rows_in_tiling_mode: %d",
|
||||
dceip->scatter_gather_pte_request_rows_in_tiling_mode);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency10_bit_per_component: %d",
|
||||
bw_fixed_to_int(dceip->underlay_vscaler_efficiency10_bit_per_component));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency12_bit_per_component: %d",
|
||||
bw_fixed_to_int(dceip->underlay_vscaler_efficiency12_bit_per_component));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency6_bit_per_component: %d",
|
||||
bw_fixed_to_int(dceip->graphics_vscaler_efficiency6_bit_per_component));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency8_bit_per_component: %d",
|
||||
bw_fixed_to_int(dceip->graphics_vscaler_efficiency8_bit_per_component));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency10_bit_per_component: %d",
|
||||
bw_fixed_to_int(dceip->graphics_vscaler_efficiency10_bit_per_component));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency12_bit_per_component: %d",
|
||||
bw_fixed_to_int(dceip->graphics_vscaler_efficiency12_bit_per_component));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] alpha_vscaler_efficiency: %d",
|
||||
bw_fixed_to_int(dceip->alpha_vscaler_efficiency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_write_pixels_per_dispclk: %d",
|
||||
bw_fixed_to_int(dceip->lb_write_pixels_per_dispclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component444: %d",
|
||||
bw_fixed_to_int(dceip->lb_size_per_component444));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_and_dram_clock_state_change_gated_before_cursor: %d",
|
||||
bw_fixed_to_int(dceip->stutter_and_dram_clock_state_change_gated_before_cursor));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_luma_lb_size_per_component: %d",
|
||||
bw_fixed_to_int(dceip->underlay420_luma_lb_size_per_component));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_chroma_lb_size_per_component: %d",
|
||||
bw_fixed_to_int(dceip->underlay420_chroma_lb_size_per_component));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay422_lb_size_per_component: %d",
|
||||
bw_fixed_to_int(dceip->underlay422_lb_size_per_component));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_chunk_width: %d", bw_fixed_to_int(dceip->cursor_chunk_width));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_dcp_buffer_lines: %d",
|
||||
bw_fixed_to_int(dceip->cursor_dcp_buffer_lines));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_width_efficient_for_tiling: %d",
|
||||
bw_fixed_to_int(dceip->underlay_maximum_width_efficient_for_tiling));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_height_efficient_for_tiling: %d",
|
||||
bw_fixed_to_int(dceip->underlay_maximum_height_efficient_for_tiling));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display: %d",
|
||||
bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation: %d",
|
||||
bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_outstanding_pte_request_limit: %d",
|
||||
bw_fixed_to_int(dceip->minimum_outstanding_pte_request_limit));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_total_outstanding_pte_requests_allowed_by_saw: %d",
|
||||
bw_fixed_to_int(dceip->maximum_total_outstanding_pte_requests_allowed_by_saw));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] linear_mode_line_request_alternation_slice: %d",
|
||||
bw_fixed_to_int(dceip->linear_mode_line_request_alternation_slice));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_efficiency: %d", bw_fixed_to_int(dceip->request_efficiency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_per_request: %d", bw_fixed_to_int(dceip->dispclk_per_request));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_ramping_factor: %d",
|
||||
bw_fixed_to_int(dceip->dispclk_ramping_factor));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_throughput_factor: %d",
|
||||
bw_fixed_to_int(dceip->display_pipe_throughput_factor));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_all_surfaces_burst_time: %d",
|
||||
bw_fixed_to_int(dceip->mcifwr_all_surfaces_burst_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_request_buffer_size: %d",
|
||||
bw_fixed_to_int(dceip->dmif_request_buffer_size));
|
||||
|
||||
|
||||
}
|
||||
|
||||
static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calcs_vbios *vbios)
|
||||
{
|
||||
|
||||
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
|
||||
DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_vbios vbios");
|
||||
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
|
||||
DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] dram_channel_width_in_bits: %d", vbios->dram_channel_width_in_bits);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", vbios->number_of_dram_channels);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_banks: %d", vbios->number_of_dram_banks);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_yclk: %d", bw_fixed_to_int(vbios->low_yclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_yclk: %d", bw_fixed_to_int(vbios->mid_yclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_yclk: %d", bw_fixed_to_int(vbios->high_yclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_sclk: %d", bw_fixed_to_int(vbios->low_sclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid1_sclk: %d", bw_fixed_to_int(vbios->mid1_sclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid2_sclk: %d", bw_fixed_to_int(vbios->mid2_sclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid3_sclk: %d", bw_fixed_to_int(vbios->mid3_sclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid4_sclk: %d", bw_fixed_to_int(vbios->mid4_sclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid5_sclk: %d", bw_fixed_to_int(vbios->mid5_sclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid6_sclk: %d", bw_fixed_to_int(vbios->mid6_sclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_sclk: %d", bw_fixed_to_int(vbios->high_sclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_dispclk: %d",
|
||||
bw_fixed_to_int(vbios->low_voltage_max_dispclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_dispclk;: %d",
|
||||
bw_fixed_to_int(vbios->mid_voltage_max_dispclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_dispclk;: %d",
|
||||
bw_fixed_to_int(vbios->high_voltage_max_dispclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_phyclk: %d",
|
||||
bw_fixed_to_int(vbios->low_voltage_max_phyclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_phyclk: %d",
|
||||
bw_fixed_to_int(vbios->mid_voltage_max_phyclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_phyclk: %d",
|
||||
bw_fixed_to_int(vbios->high_voltage_max_phyclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_return_bus_width: %d", bw_fixed_to_int(vbios->data_return_bus_width));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] trc: %d", bw_fixed_to_int(vbios->trc));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency: %d", bw_fixed_to_int(vbios->dmifmc_urgent_latency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_exit_latency: %d",
|
||||
bw_fixed_to_int(vbios->stutter_self_refresh_exit_latency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_entry_latency: %d",
|
||||
bw_fixed_to_int(vbios->stutter_self_refresh_entry_latency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_latency: %d",
|
||||
bw_fixed_to_int(vbios->nbp_state_change_latency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrmc_urgent_latency: %d",
|
||||
bw_fixed_to_int(vbios->mcifwrmc_urgent_latency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable: %d", vbios->scatter_gather_enable);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] down_spread_percentage: %d",
|
||||
bw_fixed_to_int(vbios->down_spread_percentage));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_width: %d", vbios->cursor_width);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] average_compression_rate: %d", vbios->average_compression_rate);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_request_slots_gmc_reserves_for_dmif_per_channel: %d",
|
||||
vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration: %d", bw_fixed_to_int(vbios->blackout_duration));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_blackout_recovery_time: %d",
|
||||
bw_fixed_to_int(vbios->maximum_blackout_recovery_time));
|
||||
|
||||
|
||||
}
|
||||
|
||||
static void print_bw_calcs_data(struct dal_logger *logger, struct bw_calcs_data *data)
|
||||
{
|
||||
|
||||
int i, j, k;
|
||||
|
||||
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
|
||||
DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_data data");
|
||||
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_displays: %d", data->number_of_displays);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_surface_type: %d", data->underlay_surface_type);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines panning_and_bezel_adjustment: %d",
|
||||
data->panning_and_bezel_adjustment);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_tiling_mode: %d", data->graphics_tiling_mode);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_lb_bpc: %d", data->graphics_lb_bpc);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_lb_bpc: %d", data->underlay_lb_bpc);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_tiling_mode: %d", data->underlay_tiling_mode);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d0_underlay_mode: %d", data->d0_underlay_mode);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] d1_display_write_back_dwb_enable: %d", data->d1_display_write_back_dwb_enable);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d1_underlay_mode: %d", data->d1_underlay_mode);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] cpup_state_change_enable: %d", data->cpup_state_change_enable);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] cpuc_state_change_enable: %d", data->cpuc_state_change_enable);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] nbp_state_change_enable: %d", data->nbp_state_change_enable);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] stutter_mode_enable: %d", data->stutter_mode_enable);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] y_clk_level: %d", data->y_clk_level);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] sclk_level: %d", data->sclk_level);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_surfaces: %d", data->number_of_underlay_surfaces);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_wrchannels: %d", data->number_of_dram_wrchannels);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_request_delay: %d", data->chunk_request_delay);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", data->number_of_dram_channels);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_micro_tile_mode: %d", data->underlay_micro_tile_mode);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_micro_tile_mode: %d", data->graphics_micro_tile_mode);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] max_phyclk: %d", bw_fixed_to_int(data->max_phyclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_efficiency: %d", bw_fixed_to_int(data->dram_efficiency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_surface_type: %d",
|
||||
bw_fixed_to_int(data->src_width_after_surface_type));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_surface_type: %d",
|
||||
bw_fixed_to_int(data->src_height_after_surface_type));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_surface_type: %d",
|
||||
bw_fixed_to_int(data->hsr_after_surface_type));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_surface_type: %d", bw_fixed_to_int(data->vsr_after_surface_type));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_rotation: %d",
|
||||
bw_fixed_to_int(data->src_width_after_rotation));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_rotation: %d",
|
||||
bw_fixed_to_int(data->src_height_after_rotation));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_rotation: %d", bw_fixed_to_int(data->hsr_after_rotation));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_rotation: %d", bw_fixed_to_int(data->vsr_after_rotation));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_pixels: %d", bw_fixed_to_int(data->source_height_pixels));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_stereo: %d", bw_fixed_to_int(data->hsr_after_stereo));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_stereo: %d", bw_fixed_to_int(data->vsr_after_stereo));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_in_lb: %d", bw_fixed_to_int(data->source_width_in_lb));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_line_pitch: %d", bw_fixed_to_int(data->lb_line_pitch));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_source_efficient_for_tiling: %d",
|
||||
bw_fixed_to_int(data->underlay_maximum_source_efficient_for_tiling));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] num_lines_at_frame_start: %d",
|
||||
bw_fixed_to_int(data->num_lines_at_frame_start));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dmif_size_in_time: %d", bw_fixed_to_int(data->min_dmif_size_in_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_mcifwr_size_in_time: %d",
|
||||
bw_fixed_to_int(data->min_mcifwr_size_in_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_dmif_size: %d",
|
||||
bw_fixed_to_int(data->total_requests_for_dmif_size));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting: %d",
|
||||
bw_fixed_to_int(data->peak_pte_request_to_eviction_ratio_limiting));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_pte_per_pte_request: %d",
|
||||
bw_fixed_to_int(data->useful_pte_per_pte_request));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_rows: %d",
|
||||
bw_fixed_to_int(data->scatter_gather_pte_request_rows));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_row_height: %d",
|
||||
bw_fixed_to_int(data->scatter_gather_row_height));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_vblank: %d",
|
||||
bw_fixed_to_int(data->scatter_gather_pte_requests_in_vblank));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] inefficient_linear_pitch_in_bytes: %d",
|
||||
bw_fixed_to_int(data->inefficient_linear_pitch_in_bytes));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_data: %d", bw_fixed_to_int(data->cursor_total_data));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_request_groups: %d",
|
||||
bw_fixed_to_int(data->cursor_total_request_groups));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_requests: %d",
|
||||
bw_fixed_to_int(data->scatter_gather_total_pte_requests));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_request_groups: %d",
|
||||
bw_fixed_to_int(data->scatter_gather_total_pte_request_groups));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] tile_width_in_pixels: %d", bw_fixed_to_int(data->tile_width_in_pixels));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_number_of_data_request_page_close_open: %d",
|
||||
bw_fixed_to_int(data->dmif_total_number_of_data_request_page_close_open));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_number_of_data_request_page_close_open: %d",
|
||||
bw_fixed_to_int(data->mcifwr_total_number_of_data_request_page_close_open));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_page_close_open: %d",
|
||||
bw_fixed_to_int(data->bytes_per_page_close_open));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_page_close_open_time: %d",
|
||||
bw_fixed_to_int(data->mcifwr_total_page_close_open_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_adjusted_dmif_size: %d",
|
||||
bw_fixed_to_int(data->total_requests_for_adjusted_dmif_size));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_trips: %d",
|
||||
bw_fixed_to_int(data->total_dmifmc_urgent_trips));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_latency: %d",
|
||||
bw_fixed_to_int(data->total_dmifmc_urgent_latency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_data: %d",
|
||||
bw_fixed_to_int(data->total_display_reads_required_data));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_dram_access_data: %d",
|
||||
bw_fixed_to_int(data->total_display_reads_required_dram_access_data));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_data: %d",
|
||||
bw_fixed_to_int(data->total_display_writes_required_data));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_dram_access_data: %d",
|
||||
bw_fixed_to_int(data->total_display_writes_required_dram_access_data));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_data: %d",
|
||||
bw_fixed_to_int(data->display_reads_required_data));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_dram_access_data: %d",
|
||||
bw_fixed_to_int(data->display_reads_required_dram_access_data));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_page_close_open_time: %d",
|
||||
bw_fixed_to_int(data->dmif_total_page_close_open_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_cursor_memory_interface_buffer_size_in_time: %d",
|
||||
bw_fixed_to_int(data->min_cursor_memory_interface_buffer_size_in_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_read_buffer_size_in_time: %d",
|
||||
bw_fixed_to_int(data->min_read_buffer_size_in_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer: %d",
|
||||
bw_fixed_to_int(data->display_reads_time_for_data_transfer));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_writes_time_for_data_transfer: %d",
|
||||
bw_fixed_to_int(data->display_writes_time_for_data_transfer));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_dram_bandwidth: %d",
|
||||
bw_fixed_to_int(data->dmif_required_dram_bandwidth));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_dram_bandwidth: %d",
|
||||
bw_fixed_to_int(data->mcifwr_required_dram_bandwidth));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dmifmc_urgent_latency_for_page_close_open: %d",
|
||||
bw_fixed_to_int(data->required_dmifmc_urgent_latency_for_page_close_open));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_mcifmcwr_urgent_latency: %d",
|
||||
bw_fixed_to_int(data->required_mcifmcwr_urgent_latency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dram_bandwidth_gbyte_per_second: %d",
|
||||
bw_fixed_to_int(data->required_dram_bandwidth_gbyte_per_second));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_bandwidth: %d", bw_fixed_to_int(data->dram_bandwidth));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk: %d", bw_fixed_to_int(data->dmif_required_sclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_sclk: %d", bw_fixed_to_int(data->mcifwr_required_sclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_sclk: %d", bw_fixed_to_int(data->required_sclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] downspread_factor: %d", bw_fixed_to_int(data->downspread_factor));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scaler_efficiency: %d", bw_fixed_to_int(data->v_scaler_efficiency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scaler_limits_factor: %d", bw_fixed_to_int(data->scaler_limits_factor));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_pixel_throughput: %d",
|
||||
bw_fixed_to_int(data->display_pipe_pixel_throughput));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping: %d",
|
||||
bw_fixed_to_int(data->total_dispclk_required_with_ramping));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping: %d",
|
||||
bw_fixed_to_int(data->total_dispclk_required_without_ramping));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_read_request_bandwidth: %d",
|
||||
bw_fixed_to_int(data->total_read_request_bandwidth));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_write_request_bandwidth: %d",
|
||||
bw_fixed_to_int(data->total_write_request_bandwidth));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_total_read_request_bandwidth: %d",
|
||||
bw_fixed_to_int(data->dispclk_required_for_total_read_request_bandwidth));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping_with_request_bandwidth: %d",
|
||||
bw_fixed_to_int(data->total_dispclk_required_with_ramping_with_request_bandwidth));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping_with_request_bandwidth: %d",
|
||||
bw_fixed_to_int(data->total_dispclk_required_without_ramping_with_request_bandwidth));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk: %d", bw_fixed_to_int(data->dispclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_recovery_time: %d", bw_fixed_to_int(data->blackout_recovery_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_pixels_per_data_fifo_entry: %d",
|
||||
bw_fixed_to_int(data->min_pixels_per_data_fifo_entry));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] sclk_deep_sleep: %d", bw_fixed_to_int(data->sclk_deep_sleep));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] chunk_request_time: %d", bw_fixed_to_int(data->chunk_request_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_request_time: %d", bw_fixed_to_int(data->cursor_request_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] line_source_pixels_transfer_time: %d",
|
||||
bw_fixed_to_int(data->line_source_pixels_transfer_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifdram_access_efficiency: %d",
|
||||
bw_fixed_to_int(data->dmifdram_access_efficiency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrdram_access_efficiency: %d",
|
||||
bw_fixed_to_int(data->mcifwrdram_access_efficiency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth_no_compression: %d",
|
||||
bw_fixed_to_int(data->total_average_bandwidth_no_compression));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth: %d",
|
||||
bw_fixed_to_int(data->total_average_bandwidth));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_stutter_cycle_duration: %d",
|
||||
bw_fixed_to_int(data->total_stutter_cycle_duration));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_burst_time: %d", bw_fixed_to_int(data->stutter_burst_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] time_in_self_refresh: %d", bw_fixed_to_int(data->time_in_self_refresh));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_efficiency: %d", bw_fixed_to_int(data->stutter_efficiency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] worst_number_of_trips_to_memory: %d",
|
||||
bw_fixed_to_int(data->worst_number_of_trips_to_memory));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] immediate_flip_time: %d", bw_fixed_to_int(data->immediate_flip_time));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_dmif_clients: %d",
|
||||
bw_fixed_to_int(data->latency_for_non_dmif_clients));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_mcifwr_clients: %d",
|
||||
bw_fixed_to_int(data->latency_for_non_mcifwr_clients));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency_supported_in_high_sclk_and_yclk: %d",
|
||||
bw_fixed_to_int(data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_margin: %d",
|
||||
bw_fixed_to_int(data->nbp_state_dram_speed_change_margin));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer_and_urgent_latency: %d",
|
||||
bw_fixed_to_int(data->display_reads_time_for_data_transfer_and_urgent_latency));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_margin: %d",
|
||||
bw_fixed_to_int(data->dram_speed_change_margin));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_vblank_dram_speed_change_margin: %d",
|
||||
bw_fixed_to_int(data->min_vblank_dram_speed_change_margin));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_stutter_refresh_duration: %d",
|
||||
bw_fixed_to_int(data->min_stutter_refresh_duration));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_stutter_dmif_buffer_size: %d", data->total_stutter_dmif_buffer_size);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_bytes_requested: %d", data->total_bytes_requested);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] min_stutter_dmif_buffer_size: %d", data->min_stutter_dmif_buffer_size);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] num_stutter_bursts: %d", data->num_stutter_bursts);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_nbp_state_dram_speed_change_latency_supported: %d",
|
||||
bw_fixed_to_int(data->v_blank_nbp_state_dram_speed_change_latency_supported));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_latency_supported: %d",
|
||||
bw_fixed_to_int(data->nbp_state_dram_speed_change_latency_supported));
|
||||
|
||||
for (i = 0; i < maximum_number_of_surfaces; i++) {
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] fbc_en[%d]:%d\n", i, data->fbc_en[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] lpt_en[%d]:%d", i, data->lpt_en[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] displays_match_flag[%d]:%d", i, data->displays_match_flag[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] use_alpha[%d]:%d", i, data->use_alpha[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] orthogonal_rotation[%d]:%d", i, data->orthogonal_rotation[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] enable[%d]:%d", i, data->enable[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] access_one_channel_only[%d]:%d", i, data->access_one_channel_only[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable_for_pipe[%d]:%d",
|
||||
i, data->scatter_gather_enable_for_pipe[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] interlace_mode[%d]:%d",
|
||||
i, data->interlace_mode[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] display_pstate_change_enable[%d]:%d",
|
||||
i, data->display_pstate_change_enable[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bool] line_buffer_prefetch[%d]:%d", i, data->line_buffer_prefetch[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] bytes_per_pixel[%d]:%d", i, data->bytes_per_pixel[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_chunks_non_fbc_mode[%d]:%d",
|
||||
i, data->max_chunks_non_fbc_mode[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lb_bpc[%d]:%d", i, data->lb_bpc[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bpphdmi[%d]:%d", i, data->output_bpphdmi[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr[%d]:%d", i, data->output_bppdp4_lane_hbr[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr2[%d]:%d",
|
||||
i, data->output_bppdp4_lane_hbr2[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr3[%d]:%d",
|
||||
i, data->output_bppdp4_lane_hbr3[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines stereo_mode[%d]:%d", i, data->stereo_mode[i]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_buffer_transfer_time[%d]:%d",
|
||||
i, bw_fixed_to_int(data->dmif_buffer_transfer_time[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] displays_with_same_mode[%d]:%d",
|
||||
i, bw_fixed_to_int(data->displays_with_same_mode[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_dmif_buffer_size[%d]:%d",
|
||||
i, bw_fixed_to_int(data->stutter_dmif_buffer_size[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_refresh_duration[%d]:%d",
|
||||
i, bw_fixed_to_int(data->stutter_refresh_duration[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_exit_watermark[%d]:%d",
|
||||
i, bw_fixed_to_int(data->stutter_exit_watermark[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_entry_watermark[%d]:%d",
|
||||
i, bw_fixed_to_int(data->stutter_entry_watermark[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_total[%d]:%d", i, bw_fixed_to_int(data->h_total[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_total[%d]:%d", i, bw_fixed_to_int(data->v_total[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixel_rate[%d]:%d", i, bw_fixed_to_int(data->pixel_rate[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width[%d]:%d", i, bw_fixed_to_int(data->src_width[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels[%d]:%d",
|
||||
i, bw_fixed_to_int(data->pitch_in_pixels[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels_after_surface_type[%d]:%d",
|
||||
i, bw_fixed_to_int(data->pitch_in_pixels_after_surface_type[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height[%d]:%d", i, bw_fixed_to_int(data->src_height[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scale_ratio[%d]:%d", i, bw_fixed_to_int(data->scale_ratio[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_taps[%d]:%d", i, bw_fixed_to_int(data->h_taps[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_taps[%d]:%d", i, bw_fixed_to_int(data->v_taps[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->h_scale_ratio[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->v_scale_ratio[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] rotation_angle[%d]:%d",
|
||||
i, bw_fixed_to_int(data->rotation_angle[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] compression_rate[%d]:%d",
|
||||
i, bw_fixed_to_int(data->compression_rate[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr[%d]:%d", i, bw_fixed_to_int(data->hsr[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr[%d]:%d", i, bw_fixed_to_int(data->vsr[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_rounded_up_to_chunks[%d]:%d",
|
||||
i, bw_fixed_to_int(data->source_width_rounded_up_to_chunks[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_pixels[%d]:%d",
|
||||
i, bw_fixed_to_int(data->source_width_pixels[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_rounded_up_to_chunks[%d]:%d",
|
||||
i, bw_fixed_to_int(data->source_height_rounded_up_to_chunks[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_bandwidth[%d]:%d",
|
||||
i, bw_fixed_to_int(data->display_bandwidth[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_bandwidth[%d]:%d",
|
||||
i, bw_fixed_to_int(data->request_bandwidth[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_request[%d]:%d",
|
||||
i, bw_fixed_to_int(data->bytes_per_request[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_bytes_per_request[%d]:%d",
|
||||
i, bw_fixed_to_int(data->useful_bytes_per_request[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lines_interleaved_in_mem_access[%d]:%d",
|
||||
i, bw_fixed_to_int(data->lines_interleaved_in_mem_access[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_hiding_lines[%d]:%d",
|
||||
i, bw_fixed_to_int(data->latency_hiding_lines[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions[%d]:%d",
|
||||
i, bw_fixed_to_int(data->lb_partitions[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions_max[%d]:%d",
|
||||
i, bw_fixed_to_int(data->lb_partitions_max[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_with_ramping[%d]:%d",
|
||||
i, bw_fixed_to_int(data->dispclk_required_with_ramping[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_without_ramping[%d]:%d",
|
||||
i, bw_fixed_to_int(data->dispclk_required_without_ramping[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_buffer_size[%d]:%d",
|
||||
i, bw_fixed_to_int(data->data_buffer_size[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] outstanding_chunk_request_limit[%d]:%d",
|
||||
i, bw_fixed_to_int(data->outstanding_chunk_request_limit[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] urgent_watermark[%d]:%d",
|
||||
i, bw_fixed_to_int(data->urgent_watermark[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_watermark[%d]:%d",
|
||||
i, bw_fixed_to_int(data->nbp_state_change_watermark[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_filter_init[%d]:%d", i, bw_fixed_to_int(data->v_filter_init[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_cycle_duration[%d]:%d",
|
||||
i, bw_fixed_to_int(data->stutter_cycle_duration[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth[%d]:%d",
|
||||
i, bw_fixed_to_int(data->average_bandwidth[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth_no_compression[%d]:%d",
|
||||
i, bw_fixed_to_int(data->average_bandwidth_no_compression[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_limit[%d]:%d",
|
||||
i, bw_fixed_to_int(data->scatter_gather_pte_request_limit[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component[%d]:%d",
|
||||
i, bw_fixed_to_int(data->lb_size_per_component[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] memory_chunk_size_in_bytes[%d]:%d",
|
||||
i, bw_fixed_to_int(data->memory_chunk_size_in_bytes[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pipe_chunk_size_in_bytes[%d]:%d",
|
||||
i, bw_fixed_to_int(data->pipe_chunk_size_in_bytes[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] number_of_trips_to_memory_for_getting_apte_row[%d]:%d",
|
||||
i, bw_fixed_to_int(data->number_of_trips_to_memory_for_getting_apte_row[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size[%d]:%d",
|
||||
i, bw_fixed_to_int(data->adjusted_data_buffer_size[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size_in_memory[%d]:%d",
|
||||
i, bw_fixed_to_int(data->adjusted_data_buffer_size_in_memory[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixels_per_data_fifo_entry[%d]:%d",
|
||||
i, bw_fixed_to_int(data->pixels_per_data_fifo_entry[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_row[%d]:%d",
|
||||
i, bw_fixed_to_int(data->scatter_gather_pte_requests_in_row[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pte_request_per_chunk[%d]:%d",
|
||||
i, bw_fixed_to_int(data->pte_request_per_chunk[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_width[%d]:%d",
|
||||
i, bw_fixed_to_int(data->scatter_gather_page_width[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_height[%d]:%d",
|
||||
i, bw_fixed_to_int(data->scatter_gather_page_height[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_beginning_of_frame[%d]:%d",
|
||||
i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_beginning_of_frame[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_middle_of_frame[%d]:%d",
|
||||
i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_middle_of_frame[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_width_pixels[%d]:%d",
|
||||
i, bw_fixed_to_int(data->cursor_width_pixels[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding[%d]:%d",
|
||||
i, bw_fixed_to_int(data->minimum_latency_hiding[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding[%d]:%d",
|
||||
i, bw_fixed_to_int(data->maximum_latency_hiding[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding_with_cursor[%d]:%d",
|
||||
i, bw_fixed_to_int(data->minimum_latency_hiding_with_cursor[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding_with_cursor[%d]:%d",
|
||||
i, bw_fixed_to_int(data->maximum_latency_hiding_with_cursor[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_first_output_pixel[%d]:%d",
|
||||
i, bw_fixed_to_int(data->src_pixels_for_first_output_pixel[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_last_output_pixel[%d]:%d",
|
||||
i, bw_fixed_to_int(data->src_pixels_for_last_output_pixel[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_first_output_pixel[%d]:%d",
|
||||
i, bw_fixed_to_int(data->src_data_for_first_output_pixel[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_last_output_pixel[%d]:%d",
|
||||
i, bw_fixed_to_int(data->src_data_for_last_output_pixel[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] active_time[%d]:%d", i, bw_fixed_to_int(data->active_time[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] horizontal_blank_and_chunk_granularity_factor[%d]:%d",
|
||||
i, bw_fixed_to_int(data->horizontal_blank_and_chunk_granularity_factor[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_latency_hiding[%d]:%d",
|
||||
i, bw_fixed_to_int(data->cursor_latency_hiding[i]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_dram_speed_change_margin[%d]:%d",
|
||||
i, bw_fixed_to_int(data->v_blank_dram_speed_change_margin[i]));
|
||||
}
|
||||
|
||||
for (i = 0; i < maximum_number_of_surfaces; i++) {
|
||||
for (j = 0; j < 3; j++) {
|
||||
for (k = 0; k < 8; k++) {
|
||||
|
||||
DC_LOG_BANDWIDTH_CALCS("\n [bw_fixed] line_source_transfer_time[%d][%d][%d]:%d",
|
||||
i, j, k, bw_fixed_to_int(data->line_source_transfer_time[i][j][k]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_line_source_transfer_time[%d][%d][%d]:%d",
|
||||
i, j, k,
|
||||
bw_fixed_to_int(data->dram_speed_change_line_source_transfer_time[i][j][k]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
for (j = 0; j < 8; j++) {
|
||||
|
||||
DC_LOG_BANDWIDTH_CALCS("\n [uint32_t] num_displays_with_margin[%d][%d]:%d",
|
||||
i, j, data->num_displays_with_margin[i][j]);
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_burst_time[%d][%d]:%d",
|
||||
i, j, bw_fixed_to_int(data->dmif_burst_time[i][j]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_burst_time[%d][%d]:%d",
|
||||
i, j, bw_fixed_to_int(data->mcifwr_burst_time[i][j]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dram_speed_change_margin[%d][%d]:%d",
|
||||
i, j, bw_fixed_to_int(data->min_dram_speed_change_margin[i][j]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_dram_speed_change[%d][%d]:%d",
|
||||
i, j, bw_fixed_to_int(data->dispclk_required_for_dram_speed_change[i][j]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration_margin[%d][%d]:%d",
|
||||
i, j, bw_fixed_to_int(data->blackout_duration_margin[i][j]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_duration[%d][%d]:%d",
|
||||
i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_duration[i][j]));
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_recovery[%d][%d]:%d",
|
||||
i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_recovery[i][j]));
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk_for_urgent_latency[%d]:%d",
|
||||
i, bw_fixed_to_int(data->dmif_required_sclk_for_urgent_latency[i]));
|
||||
}
|
||||
}
|
||||
;
|
||||
|
||||
#endif /* _CALCS_CALCS_LOGGER_H_ */
|
@ -28,6 +28,7 @@
|
||||
#include "dc.h"
|
||||
#include "core_types.h"
|
||||
#include "dal_asic_id.h"
|
||||
#include "calcs_logger.h"
|
||||
|
||||
/*
|
||||
* NOTE:
|
||||
@ -52,11 +53,16 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi
|
||||
return BW_CALCS_VERSION_CARRIZO;
|
||||
|
||||
case FAMILY_VI:
|
||||
if (ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
|
||||
return BW_CALCS_VERSION_POLARIS12;
|
||||
if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev))
|
||||
return BW_CALCS_VERSION_POLARIS10;
|
||||
if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
|
||||
ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
|
||||
if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev))
|
||||
return BW_CALCS_VERSION_POLARIS11;
|
||||
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
|
||||
if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
|
||||
return BW_CALCS_VERSION_VEGAM;
|
||||
#endif
|
||||
return BW_CALCS_VERSION_INVALID;
|
||||
|
||||
case FAMILY_AI:
|
||||
@ -2145,6 +2151,11 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
|
||||
dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
|
||||
break;
|
||||
case BW_CALCS_VERSION_POLARIS10:
|
||||
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
|
||||
/* TODO: Treat VEGAM the same as P10 for now
|
||||
* Need to tune the para for VEGAM if needed */
|
||||
case BW_CALCS_VERSION_VEGAM:
|
||||
#endif
|
||||
vbios.memory_type = bw_def_gddr5;
|
||||
vbios.dram_channel_width_in_bits = 32;
|
||||
vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
|
||||
@ -2373,6 +2384,122 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
|
||||
dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
|
||||
dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
|
||||
break;
|
||||
case BW_CALCS_VERSION_POLARIS12:
|
||||
vbios.memory_type = bw_def_gddr5;
|
||||
vbios.dram_channel_width_in_bits = 32;
|
||||
vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
|
||||
vbios.number_of_dram_banks = 8;
|
||||
vbios.high_yclk = bw_int_to_fixed(6000);
|
||||
vbios.mid_yclk = bw_int_to_fixed(3200);
|
||||
vbios.low_yclk = bw_int_to_fixed(1000);
|
||||
vbios.low_sclk = bw_int_to_fixed(678);
|
||||
vbios.mid1_sclk = bw_int_to_fixed(864);
|
||||
vbios.mid2_sclk = bw_int_to_fixed(900);
|
||||
vbios.mid3_sclk = bw_int_to_fixed(920);
|
||||
vbios.mid4_sclk = bw_int_to_fixed(940);
|
||||
vbios.mid5_sclk = bw_int_to_fixed(960);
|
||||
vbios.mid6_sclk = bw_int_to_fixed(980);
|
||||
vbios.high_sclk = bw_int_to_fixed(1049);
|
||||
vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
|
||||
vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
|
||||
vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
|
||||
vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
|
||||
vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
|
||||
vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
|
||||
vbios.data_return_bus_width = bw_int_to_fixed(32);
|
||||
vbios.trc = bw_int_to_fixed(48);
|
||||
if (vbios.number_of_dram_channels == 2) // 64-bit
|
||||
vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
|
||||
else
|
||||
vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
|
||||
vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
|
||||
vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
|
||||
vbios.nbp_state_change_latency = bw_int_to_fixed(250);
|
||||
vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
|
||||
vbios.scatter_gather_enable = false;
|
||||
vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
|
||||
vbios.cursor_width = 32;
|
||||
vbios.average_compression_rate = 4;
|
||||
vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
|
||||
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
|
||||
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
|
||||
|
||||
dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
|
||||
dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
|
||||
dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
|
||||
dceip.large_cursor = false;
|
||||
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
|
||||
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
|
||||
dceip.cursor_max_outstanding_group_num = 1;
|
||||
dceip.lines_interleaved_into_lb = 2;
|
||||
dceip.chunk_width = 256;
|
||||
dceip.number_of_graphics_pipes = 5;
|
||||
dceip.number_of_underlay_pipes = 0;
|
||||
dceip.low_power_tiling_mode = 0;
|
||||
dceip.display_write_back_supported = true;
|
||||
dceip.argb_compression_support = true;
|
||||
dceip.underlay_vscaler_efficiency6_bit_per_component =
|
||||
bw_frc_to_fixed(35556, 10000);
|
||||
dceip.underlay_vscaler_efficiency8_bit_per_component =
|
||||
bw_frc_to_fixed(34286, 10000);
|
||||
dceip.underlay_vscaler_efficiency10_bit_per_component =
|
||||
bw_frc_to_fixed(32, 10);
|
||||
dceip.underlay_vscaler_efficiency12_bit_per_component =
|
||||
bw_int_to_fixed(3);
|
||||
dceip.graphics_vscaler_efficiency6_bit_per_component =
|
||||
bw_frc_to_fixed(35, 10);
|
||||
dceip.graphics_vscaler_efficiency8_bit_per_component =
|
||||
bw_frc_to_fixed(34286, 10000);
|
||||
dceip.graphics_vscaler_efficiency10_bit_per_component =
|
||||
bw_frc_to_fixed(32, 10);
|
||||
dceip.graphics_vscaler_efficiency12_bit_per_component =
|
||||
bw_int_to_fixed(3);
|
||||
dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
|
||||
dceip.max_dmif_buffer_allocated = 4;
|
||||
dceip.graphics_dmif_size = 12288;
|
||||
dceip.underlay_luma_dmif_size = 19456;
|
||||
dceip.underlay_chroma_dmif_size = 23552;
|
||||
dceip.pre_downscaler_enabled = true;
|
||||
dceip.underlay_downscale_prefetch_enabled = true;
|
||||
dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
|
||||
dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
|
||||
dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
|
||||
dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
|
||||
bw_int_to_fixed(1);
|
||||
dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
|
||||
82176);
|
||||
dceip.underlay420_chroma_lb_size_per_component =
|
||||
bw_int_to_fixed(164352);
|
||||
dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
|
||||
82176);
|
||||
dceip.cursor_chunk_width = bw_int_to_fixed(64);
|
||||
dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
|
||||
dceip.underlay_maximum_width_efficient_for_tiling =
|
||||
bw_int_to_fixed(1920);
|
||||
dceip.underlay_maximum_height_efficient_for_tiling =
|
||||
bw_int_to_fixed(1080);
|
||||
dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
|
||||
bw_frc_to_fixed(3, 10);
|
||||
dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
|
||||
bw_int_to_fixed(25);
|
||||
dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
|
||||
2);
|
||||
dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
|
||||
bw_int_to_fixed(128);
|
||||
dceip.limit_excessive_outstanding_dmif_requests = true;
|
||||
dceip.linear_mode_line_request_alternation_slice =
|
||||
bw_int_to_fixed(64);
|
||||
dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
|
||||
32;
|
||||
dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
|
||||
dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
|
||||
dceip.request_efficiency = bw_frc_to_fixed(8, 10);
|
||||
dceip.dispclk_per_request = bw_int_to_fixed(2);
|
||||
dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
|
||||
dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
|
||||
dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
|
||||
dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
|
||||
break;
|
||||
case BW_CALCS_VERSION_STONEY:
|
||||
vbios.memory_type = bw_def_gddr5;
|
||||
vbios.dram_channel_width_in_bits = 64;
|
||||
@ -2815,6 +2942,19 @@ static void populate_initial_data(
|
||||
data->bytes_per_pixel[num_displays + 4] = 4;
|
||||
break;
|
||||
}
|
||||
} else if (pipe[i].stream->dst.width != 0 &&
|
||||
pipe[i].stream->dst.height != 0 &&
|
||||
pipe[i].stream->src.width != 0 &&
|
||||
pipe[i].stream->src.height != 0) {
|
||||
data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.width);
|
||||
data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
|
||||
data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.height);
|
||||
data->h_taps[num_displays + 4] = pipe[i].stream->src.width == pipe[i].stream->dst.width ? bw_int_to_fixed(1) : bw_int_to_fixed(2);
|
||||
data->v_taps[num_displays + 4] = pipe[i].stream->src.height == pipe[i].stream->dst.height ? bw_int_to_fixed(1) : bw_int_to_fixed(2);
|
||||
data->h_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.width, pipe[i].stream->dst.width);
|
||||
data->v_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.height, pipe[i].stream->dst.height);
|
||||
data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
|
||||
data->bytes_per_pixel[num_displays + 4] = 4;
|
||||
} else {
|
||||
data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable);
|
||||
data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
|
||||
@ -2873,6 +3013,11 @@ bool bw_calcs(struct dc_context *ctx,
|
||||
struct bw_fixed mid_yclk = vbios->mid_yclk;
|
||||
struct bw_fixed low_yclk = vbios->low_yclk;
|
||||
|
||||
if (ctx->dc->debug.bandwidth_calcs_trace) {
|
||||
print_bw_calcs_dceip(ctx->logger, dceip);
|
||||
print_bw_calcs_vbios(ctx->logger, vbios);
|
||||
print_bw_calcs_data(ctx->logger, data);
|
||||
}
|
||||
calculate_bandwidth(dceip, vbios, data);
|
||||
|
||||
yclk_lvl = data->y_clk_level;
|
||||
@ -2968,7 +3113,33 @@ bool bw_calcs(struct dc_context *ctx,
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
|
||||
|
||||
|
||||
calcs_output->stutter_entry_wm_ns[0].a_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[4], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[1].a_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[5], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[2].a_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[6], bw_int_to_fixed(1000)));
|
||||
if (ctx->dc->caps.max_slave_planes) {
|
||||
calcs_output->stutter_entry_wm_ns[3].a_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[0], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[4].a_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[1], bw_int_to_fixed(1000)));
|
||||
} else {
|
||||
calcs_output->stutter_entry_wm_ns[3].a_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[7], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[4].a_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[8], bw_int_to_fixed(1000)));
|
||||
}
|
||||
calcs_output->stutter_entry_wm_ns[5].a_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[9], bw_int_to_fixed(1000)));
|
||||
|
||||
calcs_output->urgent_wm_ns[0].a_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
@ -3063,7 +3234,33 @@ bool bw_calcs(struct dc_context *ctx,
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
|
||||
|
||||
|
||||
calcs_output->stutter_entry_wm_ns[0].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[4], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[1].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[5], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[2].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[6], bw_int_to_fixed(1000)));
|
||||
if (ctx->dc->caps.max_slave_planes) {
|
||||
calcs_output->stutter_entry_wm_ns[3].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[0], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[4].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[1], bw_int_to_fixed(1000)));
|
||||
} else {
|
||||
calcs_output->stutter_entry_wm_ns[3].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[7], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[4].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[8], bw_int_to_fixed(1000)));
|
||||
}
|
||||
calcs_output->stutter_entry_wm_ns[5].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[9], bw_int_to_fixed(1000)));
|
||||
|
||||
calcs_output->urgent_wm_ns[0].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
@ -3156,6 +3353,34 @@ bool bw_calcs(struct dc_context *ctx,
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
|
||||
|
||||
calcs_output->stutter_entry_wm_ns[0].c_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[4], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[1].c_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[5], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[2].c_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[6], bw_int_to_fixed(1000)));
|
||||
if (ctx->dc->caps.max_slave_planes) {
|
||||
calcs_output->stutter_entry_wm_ns[3].c_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[0], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[4].c_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[1], bw_int_to_fixed(1000)));
|
||||
} else {
|
||||
calcs_output->stutter_entry_wm_ns[3].c_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[7], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[4].c_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[8], bw_int_to_fixed(1000)));
|
||||
}
|
||||
calcs_output->stutter_entry_wm_ns[5].c_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[9], bw_int_to_fixed(1000)));
|
||||
|
||||
calcs_output->urgent_wm_ns[0].c_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
urgent_watermark[4], bw_int_to_fixed(1000)));
|
||||
@ -3260,6 +3485,33 @@ bool bw_calcs(struct dc_context *ctx,
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
|
||||
|
||||
calcs_output->stutter_entry_wm_ns[0].d_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[4], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[1].d_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[5], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[2].d_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[6], bw_int_to_fixed(1000)));
|
||||
if (ctx->dc->caps.max_slave_planes) {
|
||||
calcs_output->stutter_entry_wm_ns[3].d_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[0], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[4].d_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[1], bw_int_to_fixed(1000)));
|
||||
} else {
|
||||
calcs_output->stutter_entry_wm_ns[3].d_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[7], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[4].d_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[8], bw_int_to_fixed(1000)));
|
||||
}
|
||||
calcs_output->stutter_entry_wm_ns[5].d_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[9], bw_int_to_fixed(1000)));
|
||||
|
||||
calcs_output->urgent_wm_ns[0].d_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
|
@ -1459,39 +1459,39 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
|
||||
void dcn_bw_sync_calcs_and_dml(struct dc *dc)
|
||||
{
|
||||
kernel_fpu_begin();
|
||||
DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %d ns\n"
|
||||
"sr_enter_plus_exit_time: %d ns\n"
|
||||
"urgent_latency: %d ns\n"
|
||||
"write_back_latency: %d ns\n"
|
||||
"percent_of_ideal_drambw_received_after_urg_latency: %d %\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
|
||||
"sr_enter_plus_exit_time: %f ns\n"
|
||||
"urgent_latency: %f ns\n"
|
||||
"write_back_latency: %f ns\n"
|
||||
"percent_of_ideal_drambw_received_after_urg_latency: %f %%\n"
|
||||
"max_request_size: %d bytes\n"
|
||||
"dcfclkv_max0p9: %d kHz\n"
|
||||
"dcfclkv_nom0p8: %d kHz\n"
|
||||
"dcfclkv_mid0p72: %d kHz\n"
|
||||
"dcfclkv_min0p65: %d kHz\n"
|
||||
"max_dispclk_vmax0p9: %d kHz\n"
|
||||
"max_dispclk_vnom0p8: %d kHz\n"
|
||||
"max_dispclk_vmid0p72: %d kHz\n"
|
||||
"max_dispclk_vmin0p65: %d kHz\n"
|
||||
"max_dppclk_vmax0p9: %d kHz\n"
|
||||
"max_dppclk_vnom0p8: %d kHz\n"
|
||||
"max_dppclk_vmid0p72: %d kHz\n"
|
||||
"max_dppclk_vmin0p65: %d kHz\n"
|
||||
"socclk: %d kHz\n"
|
||||
"fabric_and_dram_bandwidth_vmax0p9: %d MB/s\n"
|
||||
"fabric_and_dram_bandwidth_vnom0p8: %d MB/s\n"
|
||||
"fabric_and_dram_bandwidth_vmid0p72: %d MB/s\n"
|
||||
"fabric_and_dram_bandwidth_vmin0p65: %d MB/s\n"
|
||||
"phyclkv_max0p9: %d kHz\n"
|
||||
"phyclkv_nom0p8: %d kHz\n"
|
||||
"phyclkv_mid0p72: %d kHz\n"
|
||||
"phyclkv_min0p65: %d kHz\n"
|
||||
"downspreading: %d %\n"
|
||||
"dcfclkv_max0p9: %f kHz\n"
|
||||
"dcfclkv_nom0p8: %f kHz\n"
|
||||
"dcfclkv_mid0p72: %f kHz\n"
|
||||
"dcfclkv_min0p65: %f kHz\n"
|
||||
"max_dispclk_vmax0p9: %f kHz\n"
|
||||
"max_dispclk_vnom0p8: %f kHz\n"
|
||||
"max_dispclk_vmid0p72: %f kHz\n"
|
||||
"max_dispclk_vmin0p65: %f kHz\n"
|
||||
"max_dppclk_vmax0p9: %f kHz\n"
|
||||
"max_dppclk_vnom0p8: %f kHz\n"
|
||||
"max_dppclk_vmid0p72: %f kHz\n"
|
||||
"max_dppclk_vmin0p65: %f kHz\n"
|
||||
"socclk: %f kHz\n"
|
||||
"fabric_and_dram_bandwidth_vmax0p9: %f MB/s\n"
|
||||
"fabric_and_dram_bandwidth_vnom0p8: %f MB/s\n"
|
||||
"fabric_and_dram_bandwidth_vmid0p72: %f MB/s\n"
|
||||
"fabric_and_dram_bandwidth_vmin0p65: %f MB/s\n"
|
||||
"phyclkv_max0p9: %f kHz\n"
|
||||
"phyclkv_nom0p8: %f kHz\n"
|
||||
"phyclkv_mid0p72: %f kHz\n"
|
||||
"phyclkv_min0p65: %f kHz\n"
|
||||
"downspreading: %f %%\n"
|
||||
"round_trip_ping_latency_cycles: %d DCFCLK Cycles\n"
|
||||
"urgent_out_of_order_return_per_channel: %d Bytes\n"
|
||||
"number_of_channels: %d\n"
|
||||
"vmm_page_size: %d Bytes\n"
|
||||
"dram_clock_change_latency: %d ns\n"
|
||||
"dram_clock_change_latency: %f ns\n"
|
||||
"return_bus_width: %d Bytes\n",
|
||||
dc->dcn_soc->sr_exit_time * 1000,
|
||||
dc->dcn_soc->sr_enter_plus_exit_time * 1000,
|
||||
@ -1527,11 +1527,11 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
|
||||
dc->dcn_soc->vmm_page_size,
|
||||
dc->dcn_soc->dram_clock_change_latency * 1000,
|
||||
dc->dcn_soc->return_bus_width);
|
||||
DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %d\n"
|
||||
"det_buffer_size_in_kbyte: %d\n"
|
||||
"dpp_output_buffer_pixels: %d\n"
|
||||
"opp_output_buffer_lines: %d\n"
|
||||
"pixel_chunk_size_in_kbyte: %d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %f\n"
|
||||
"det_buffer_size_in_kbyte: %f\n"
|
||||
"dpp_output_buffer_pixels: %f\n"
|
||||
"opp_output_buffer_lines: %f\n"
|
||||
"pixel_chunk_size_in_kbyte: %f\n"
|
||||
"pte_enable: %d\n"
|
||||
"pte_chunk_size: %d kbytes\n"
|
||||
"meta_chunk_size: %d kbytes\n"
|
||||
@ -1550,13 +1550,13 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
|
||||
"max_pscl_tolb_throughput: %d pixels/dppclk\n"
|
||||
"max_lb_tovscl_throughput: %d pixels/dppclk\n"
|
||||
"max_vscl_tohscl_throughput: %d pixels/dppclk\n"
|
||||
"max_hscl_ratio: %d\n"
|
||||
"max_vscl_ratio: %d\n"
|
||||
"max_hscl_ratio: %f\n"
|
||||
"max_vscl_ratio: %f\n"
|
||||
"max_hscl_taps: %d\n"
|
||||
"max_vscl_taps: %d\n"
|
||||
"pte_buffer_size_in_requests: %d\n"
|
||||
"dispclk_ramping_margin: %d %\n"
|
||||
"under_scan_factor: %d %\n"
|
||||
"dispclk_ramping_margin: %f %%\n"
|
||||
"under_scan_factor: %f %%\n"
|
||||
"max_inter_dcn_tile_repeaters: %d\n"
|
||||
"can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n"
|
||||
"bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n"
|
||||
|
@ -936,95 +936,6 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO this whole function needs to go
|
||||
*
|
||||
* dc_surface_update is needlessly complex. See if we can just replace this
|
||||
* with a dc_plane_state and follow the atomic model a bit more closely here.
|
||||
*/
|
||||
bool dc_commit_planes_to_stream(
|
||||
struct dc *dc,
|
||||
struct dc_plane_state **plane_states,
|
||||
uint8_t new_plane_count,
|
||||
struct dc_stream_state *dc_stream,
|
||||
struct dc_state *state)
|
||||
{
|
||||
/* no need to dynamically allocate this. it's pretty small */
|
||||
struct dc_surface_update updates[MAX_SURFACES];
|
||||
struct dc_flip_addrs *flip_addr;
|
||||
struct dc_plane_info *plane_info;
|
||||
struct dc_scaling_info *scaling_info;
|
||||
int i;
|
||||
struct dc_stream_update *stream_update =
|
||||
kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
|
||||
|
||||
if (!stream_update) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return false;
|
||||
}
|
||||
|
||||
flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
|
||||
GFP_KERNEL);
|
||||
plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
|
||||
GFP_KERNEL);
|
||||
scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!flip_addr || !plane_info || !scaling_info) {
|
||||
kfree(flip_addr);
|
||||
kfree(plane_info);
|
||||
kfree(scaling_info);
|
||||
kfree(stream_update);
|
||||
return false;
|
||||
}
|
||||
|
||||
memset(updates, 0, sizeof(updates));
|
||||
|
||||
stream_update->src = dc_stream->src;
|
||||
stream_update->dst = dc_stream->dst;
|
||||
stream_update->out_transfer_func = dc_stream->out_transfer_func;
|
||||
|
||||
for (i = 0; i < new_plane_count; i++) {
|
||||
updates[i].surface = plane_states[i];
|
||||
updates[i].gamma =
|
||||
(struct dc_gamma *)plane_states[i]->gamma_correction;
|
||||
updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
|
||||
flip_addr[i].address = plane_states[i]->address;
|
||||
flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
|
||||
plane_info[i].color_space = plane_states[i]->color_space;
|
||||
plane_info[i].input_tf = plane_states[i]->input_tf;
|
||||
plane_info[i].format = plane_states[i]->format;
|
||||
plane_info[i].plane_size = plane_states[i]->plane_size;
|
||||
plane_info[i].rotation = plane_states[i]->rotation;
|
||||
plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
|
||||
plane_info[i].stereo_format = plane_states[i]->stereo_format;
|
||||
plane_info[i].tiling_info = plane_states[i]->tiling_info;
|
||||
plane_info[i].visible = plane_states[i]->visible;
|
||||
plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
|
||||
plane_info[i].dcc = plane_states[i]->dcc;
|
||||
scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
|
||||
scaling_info[i].src_rect = plane_states[i]->src_rect;
|
||||
scaling_info[i].dst_rect = plane_states[i]->dst_rect;
|
||||
scaling_info[i].clip_rect = plane_states[i]->clip_rect;
|
||||
|
||||
updates[i].flip_addr = &flip_addr[i];
|
||||
updates[i].plane_info = &plane_info[i];
|
||||
updates[i].scaling_info = &scaling_info[i];
|
||||
}
|
||||
|
||||
dc_commit_updates_for_stream(
|
||||
dc,
|
||||
updates,
|
||||
new_plane_count,
|
||||
dc_stream, stream_update, plane_states, state);
|
||||
|
||||
kfree(flip_addr);
|
||||
kfree(plane_info);
|
||||
kfree(scaling_info);
|
||||
kfree(stream_update);
|
||||
return true;
|
||||
}
|
||||
|
||||
struct dc_state *dc_create_state(void)
|
||||
{
|
||||
struct dc_state *context = kzalloc(sizeof(struct dc_state),
|
||||
@ -1107,9 +1018,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
|
||||
if (u->plane_info->color_space != u->surface->color_space)
|
||||
update_flags->bits.color_space_change = 1;
|
||||
|
||||
if (u->plane_info->input_tf != u->surface->input_tf)
|
||||
update_flags->bits.input_tf_change = 1;
|
||||
|
||||
if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
|
||||
update_flags->bits.horizontal_mirror_change = 1;
|
||||
|
||||
@ -1243,12 +1151,20 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
|
||||
if (u->input_csc_color_matrix)
|
||||
update_flags->bits.input_csc_change = 1;
|
||||
|
||||
if (update_flags->bits.in_transfer_func_change
|
||||
|| update_flags->bits.input_csc_change) {
|
||||
if (u->coeff_reduction_factor)
|
||||
update_flags->bits.coeff_reduction_change = 1;
|
||||
|
||||
if (update_flags->bits.in_transfer_func_change) {
|
||||
type = UPDATE_TYPE_MED;
|
||||
elevate_update_type(&overall_type, type);
|
||||
}
|
||||
|
||||
if (update_flags->bits.input_csc_change
|
||||
|| update_flags->bits.coeff_reduction_change) {
|
||||
type = UPDATE_TYPE_FULL;
|
||||
elevate_update_type(&overall_type, type);
|
||||
}
|
||||
|
||||
return overall_type;
|
||||
}
|
||||
|
||||
@ -1297,7 +1213,7 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
|
||||
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
|
||||
if (type == UPDATE_TYPE_FULL)
|
||||
for (i = 0; i < surface_count; i++)
|
||||
updates[i].surface->update_flags.bits.full_update = 1;
|
||||
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
|
||||
|
||||
return type;
|
||||
}
|
||||
@ -1375,6 +1291,12 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
pipe_ctx->stream_res.abm->funcs->set_abm_level(
|
||||
pipe_ctx->stream_res.abm, stream->abm_level);
|
||||
}
|
||||
|
||||
if (stream_update && stream_update->periodic_fn_vsync_delta &&
|
||||
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
|
||||
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
|
||||
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
|
||||
pipe_ctx->stream->periodic_fn_vsync_delta);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -36,8 +36,9 @@
|
||||
#include "hw_sequencer.h"
|
||||
|
||||
#include "resource.h"
|
||||
#define DC_LOGGER \
|
||||
logger
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
|
||||
#define SURFACE_TRACE(...) do {\
|
||||
if (dc->debug.surface_trace) \
|
||||
@ -60,8 +61,7 @@ void pre_surface_trace(
|
||||
int surface_count)
|
||||
{
|
||||
int i;
|
||||
struct dc *core_dc = dc;
|
||||
struct dal_logger *logger = core_dc->ctx->logger;
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
for (i = 0; i < surface_count; i++) {
|
||||
const struct dc_plane_state *plane_state = plane_states[i];
|
||||
@ -72,8 +72,8 @@ void pre_surface_trace(
|
||||
"plane_state->visible = %d;\n"
|
||||
"plane_state->flip_immediate = %d;\n"
|
||||
"plane_state->address.type = %d;\n"
|
||||
"plane_state->address.grph.addr.quad_part = 0x%X;\n"
|
||||
"plane_state->address.grph.meta_addr.quad_part = 0x%X;\n"
|
||||
"plane_state->address.grph.addr.quad_part = 0x%llX;\n"
|
||||
"plane_state->address.grph.meta_addr.quad_part = 0x%llX;\n"
|
||||
"plane_state->scaling_quality.h_taps = %d;\n"
|
||||
"plane_state->scaling_quality.v_taps = %d;\n"
|
||||
"plane_state->scaling_quality.h_taps_c = %d;\n"
|
||||
@ -155,7 +155,6 @@ void pre_surface_trace(
|
||||
"plane_state->tiling_info.gfx8.pipe_config = %d;\n"
|
||||
"plane_state->tiling_info.gfx8.array_mode = %d;\n"
|
||||
"plane_state->color_space = %d;\n"
|
||||
"plane_state->input_tf = %d;\n"
|
||||
"plane_state->dcc.enable = %d;\n"
|
||||
"plane_state->format = %d;\n"
|
||||
"plane_state->rotation = %d;\n"
|
||||
@ -163,7 +162,6 @@ void pre_surface_trace(
|
||||
plane_state->tiling_info.gfx8.pipe_config,
|
||||
plane_state->tiling_info.gfx8.array_mode,
|
||||
plane_state->color_space,
|
||||
plane_state->input_tf,
|
||||
plane_state->dcc.enable,
|
||||
plane_state->format,
|
||||
plane_state->rotation,
|
||||
@ -183,8 +181,7 @@ void update_surface_trace(
|
||||
int surface_count)
|
||||
{
|
||||
int i;
|
||||
struct dc *core_dc = dc;
|
||||
struct dal_logger *logger = core_dc->ctx->logger;
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
for (i = 0; i < surface_count; i++) {
|
||||
const struct dc_surface_update *update = &updates[i];
|
||||
@ -192,8 +189,8 @@ void update_surface_trace(
|
||||
SURFACE_TRACE("Update %d\n", i);
|
||||
if (update->flip_addr) {
|
||||
SURFACE_TRACE("flip_addr->address.type = %d;\n"
|
||||
"flip_addr->address.grph.addr.quad_part = 0x%X;\n"
|
||||
"flip_addr->address.grph.meta_addr.quad_part = 0x%X;\n"
|
||||
"flip_addr->address.grph.addr.quad_part = 0x%llX;\n"
|
||||
"flip_addr->address.grph.meta_addr.quad_part = 0x%llX;\n"
|
||||
"flip_addr->flip_immediate = %d;\n",
|
||||
update->flip_addr->address.type,
|
||||
update->flip_addr->address.grph.addr.quad_part,
|
||||
@ -204,16 +201,15 @@ void update_surface_trace(
|
||||
if (update->plane_info) {
|
||||
SURFACE_TRACE(
|
||||
"plane_info->color_space = %d;\n"
|
||||
"plane_info->input_tf = %d;\n"
|
||||
"plane_info->format = %d;\n"
|
||||
"plane_info->plane_size.grph.surface_pitch = %d;\n"
|
||||
"plane_info->plane_size.grph.surface_size.height = %d;\n"
|
||||
"plane_info->plane_size.grph.surface_size.width = %d;\n"
|
||||
"plane_info->plane_size.grph.surface_size.x = %d;\n"
|
||||
"plane_info->plane_size.grph.surface_size.y = %d;\n"
|
||||
"plane_info->rotation = %d;\n",
|
||||
"plane_info->rotation = %d;\n"
|
||||
"plane_info->stereo_format = %d;\n",
|
||||
update->plane_info->color_space,
|
||||
update->plane_info->input_tf,
|
||||
update->plane_info->format,
|
||||
update->plane_info->plane_size.grph.surface_pitch,
|
||||
update->plane_info->plane_size.grph.surface_size.height,
|
||||
@ -303,8 +299,7 @@ void update_surface_trace(
|
||||
|
||||
void post_surface_trace(struct dc *dc)
|
||||
{
|
||||
struct dc *core_dc = dc;
|
||||
struct dal_logger *logger = core_dc->ctx->logger;
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
SURFACE_TRACE("post surface process.\n");
|
||||
|
||||
@ -316,10 +311,10 @@ void context_timing_trace(
|
||||
{
|
||||
int i;
|
||||
struct dc *core_dc = dc;
|
||||
struct dal_logger *logger = core_dc->ctx->logger;
|
||||
int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
|
||||
struct crtc_position position;
|
||||
unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
|
||||
for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
|
||||
@ -354,9 +349,7 @@ void context_clock_trace(
|
||||
struct dc_state *context)
|
||||
{
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
struct dc *core_dc = dc;
|
||||
struct dal_logger *logger = core_dc->ctx->logger;
|
||||
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
|
||||
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
|
||||
context->bw.dcn.calc_clk.dispclk_khz,
|
||||
@ -371,6 +364,7 @@ void context_clock_trace(
|
||||
context->bw.dcn.calc_clk.dppclk_khz,
|
||||
context->bw.dcn.calc_clk.dcfclk_khz,
|
||||
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
|
||||
context->bw.dcn.calc_clk.fclk_khz);
|
||||
context->bw.dcn.calc_clk.fclk_khz,
|
||||
context->bw.dcn.calc_clk.socclk_khz);
|
||||
#endif
|
||||
}
|
||||
|
@ -208,6 +208,7 @@ void color_space_to_black_color(
|
||||
case COLOR_SPACE_YCBCR709:
|
||||
case COLOR_SPACE_YCBCR601_LIMITED:
|
||||
case COLOR_SPACE_YCBCR709_LIMITED:
|
||||
case COLOR_SPACE_2020_YCBCR:
|
||||
*black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
|
||||
break;
|
||||
|
||||
@ -216,7 +217,25 @@ void color_space_to_black_color(
|
||||
black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
|
||||
break;
|
||||
|
||||
default:
|
||||
/**
|
||||
* Remove default and add case for all color space
|
||||
* so when we forget to add new color space
|
||||
* compiler will give a warning
|
||||
*/
|
||||
case COLOR_SPACE_UNKNOWN:
|
||||
case COLOR_SPACE_SRGB:
|
||||
case COLOR_SPACE_XR_RGB:
|
||||
case COLOR_SPACE_MSREF_SCRGB:
|
||||
case COLOR_SPACE_XV_YCC_709:
|
||||
case COLOR_SPACE_XV_YCC_601:
|
||||
case COLOR_SPACE_2020_RGB_FULLRANGE:
|
||||
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
|
||||
case COLOR_SPACE_ADOBERGB:
|
||||
case COLOR_SPACE_DCIP3:
|
||||
case COLOR_SPACE_DISPLAYNATIVE:
|
||||
case COLOR_SPACE_DOLBYVISION:
|
||||
case COLOR_SPACE_APPCTRL:
|
||||
case COLOR_SPACE_CUSTOMPOINTS:
|
||||
/* fefault is sRGB black (full range). */
|
||||
*black_color =
|
||||
black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE];
|
||||
@ -230,6 +249,9 @@ bool hwss_wait_for_blank_complete(
|
||||
{
|
||||
int counter;
|
||||
|
||||
/* Not applicable if the pipe is not primary, save 300ms of boot time */
|
||||
if (!tg->funcs->is_blanked)
|
||||
return true;
|
||||
for (counter = 0; counter < 100; counter++) {
|
||||
if (tg->funcs->is_blanked(tg))
|
||||
break;
|
||||
|
@ -45,8 +45,9 @@
|
||||
#include "dce/dce_11_0_d.h"
|
||||
#include "dce/dce_11_0_enum.h"
|
||||
#include "dce/dce_11_0_sh_mask.h"
|
||||
#define DC_LOGGER \
|
||||
dc_ctx->logger
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
|
||||
#define LINK_INFO(...) \
|
||||
DC_LOG_HW_HOTPLUG( \
|
||||
@ -561,7 +562,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
struct dc_context *dc_ctx = link->ctx;
|
||||
struct dc_sink *sink = NULL;
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
|
||||
return false;
|
||||
|
||||
@ -927,6 +928,7 @@ static bool construct(
|
||||
struct integrated_info info = {{{ 0 }}};
|
||||
struct dc_bios *bios = init_params->dc->ctx->dc_bios;
|
||||
const struct dc_vbios_funcs *bp_funcs = bios->funcs;
|
||||
DC_LOGGER_INIT(dc_ctx->logger);
|
||||
|
||||
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
|
||||
link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
|
||||
@ -1135,7 +1137,8 @@ static void dpcd_configure_panel_mode(
|
||||
{
|
||||
union dpcd_edp_config edp_config_set;
|
||||
bool panel_mode_edp = false;
|
||||
struct dc_context *dc_ctx = link->ctx;
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
|
||||
memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
|
||||
|
||||
if (DP_PANEL_MODE_DEFAULT != panel_mode) {
|
||||
@ -1183,16 +1186,21 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct dc_link *link = stream->sink->link;
|
||||
union down_spread_ctrl downspread;
|
||||
union down_spread_ctrl old_downspread;
|
||||
union down_spread_ctrl new_downspread;
|
||||
|
||||
core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
|
||||
&downspread.raw, sizeof(downspread));
|
||||
&old_downspread.raw, sizeof(old_downspread));
|
||||
|
||||
downspread.bits.IGNORE_MSA_TIMING_PARAM =
|
||||
new_downspread.raw = old_downspread.raw;
|
||||
|
||||
new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
|
||||
(stream->ignore_msa_timing_param) ? 1 : 0;
|
||||
|
||||
if (new_downspread.raw != old_downspread.raw) {
|
||||
core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
|
||||
&downspread.raw, sizeof(downspread));
|
||||
&new_downspread.raw, sizeof(new_downspread));
|
||||
}
|
||||
}
|
||||
|
||||
static enum dc_status enable_link_dp(
|
||||
@ -1843,9 +1851,22 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
|
||||
|
||||
static bool dp_active_dongle_validate_timing(
|
||||
const struct dc_crtc_timing *timing,
|
||||
const struct dc_dongle_caps *dongle_caps)
|
||||
const struct dpcd_caps *dpcd_caps)
|
||||
{
|
||||
unsigned int required_pix_clk = timing->pix_clk_khz;
|
||||
const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
|
||||
|
||||
switch (dpcd_caps->dongle_type) {
|
||||
case DISPLAY_DONGLE_DP_VGA_CONVERTER:
|
||||
case DISPLAY_DONGLE_DP_DVI_CONVERTER:
|
||||
case DISPLAY_DONGLE_DP_DVI_DONGLE:
|
||||
if (timing->pixel_encoding == PIXEL_ENCODING_RGB)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
|
||||
dongle_caps->extendedCapValid == false)
|
||||
@ -1911,7 +1932,7 @@ enum dc_status dc_link_validate_mode_timing(
|
||||
const struct dc_crtc_timing *timing)
|
||||
{
|
||||
uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
|
||||
struct dc_dongle_caps *dongle_caps = &link->dpcd_caps.dongle_caps;
|
||||
struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
|
||||
|
||||
/* A hack to avoid failing any modes for EDID override feature on
|
||||
* topology change such as lower quality cable for DP or different dongle
|
||||
@ -1924,7 +1945,7 @@ enum dc_status dc_link_validate_mode_timing(
|
||||
return DC_EXCEED_DONGLE_CAP;
|
||||
|
||||
/* Active Dongle*/
|
||||
if (!dp_active_dongle_validate_timing(timing, dongle_caps))
|
||||
if (!dp_active_dongle_validate_timing(timing, dpcd_caps))
|
||||
return DC_EXCEED_DONGLE_CAP;
|
||||
|
||||
switch (stream->signal) {
|
||||
@ -1950,10 +1971,10 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
|
||||
struct dc *core_dc = link->ctx->dc;
|
||||
struct abm *abm = core_dc->res_pool->abm;
|
||||
struct dmcu *dmcu = core_dc->res_pool->dmcu;
|
||||
struct dc_context *dc_ctx = link->ctx;
|
||||
unsigned int controller_id = 0;
|
||||
bool use_smooth_brightness = true;
|
||||
int i;
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
|
||||
if ((dmcu == NULL) ||
|
||||
(abm == NULL) ||
|
||||
@ -1961,7 +1982,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
|
||||
return false;
|
||||
|
||||
if (stream) {
|
||||
if (stream->bl_pwm_level == 0)
|
||||
if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL)
|
||||
frame_ramp = 0;
|
||||
|
||||
((struct dc_stream_state *)stream)->bl_pwm_level = level;
|
||||
@ -2149,8 +2170,8 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
||||
struct fixed31_32 avg_time_slots_per_mtp;
|
||||
struct fixed31_32 pbn;
|
||||
struct fixed31_32 pbn_per_slot;
|
||||
struct dc_context *dc_ctx = link->ctx;
|
||||
uint8_t i;
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
|
||||
/* enable_link_dp_mst already check link->enabled_stream_count
|
||||
* and stream is in link->stream[]. This is called during set mode,
|
||||
@ -2178,11 +2199,11 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
||||
link->mst_stream_alloc_table.stream_count);
|
||||
|
||||
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
|
||||
DC_LOG_MST("stream_enc[%d]: 0x%x "
|
||||
DC_LOG_MST("stream_enc[%d]: %p "
|
||||
"stream[%d].vcp_id: %d "
|
||||
"stream[%d].slot_count: %d\n",
|
||||
i,
|
||||
link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
|
||||
(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
|
||||
i,
|
||||
link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
|
||||
i,
|
||||
@ -2229,7 +2250,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
||||
struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
|
||||
uint8_t i;
|
||||
bool mst_mode = (link->type == dc_connection_mst_branch);
|
||||
struct dc_context *dc_ctx = link->ctx;
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
|
||||
/* deallocate_mst_payload is called before disable link. When mode or
|
||||
* disable/enable monitor, new stream is created which is not in link
|
||||
@ -2268,11 +2289,11 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
||||
link->mst_stream_alloc_table.stream_count);
|
||||
|
||||
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
|
||||
DC_LOG_MST("stream_enc[%d]: 0x%x "
|
||||
DC_LOG_MST("stream_enc[%d]: %p "
|
||||
"stream[%d].vcp_id: %d "
|
||||
"stream[%d].slot_count: %d\n",
|
||||
i,
|
||||
link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
|
||||
(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
|
||||
i,
|
||||
link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
|
||||
i,
|
||||
@ -2302,8 +2323,8 @@ void core_link_enable_stream(
|
||||
struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc_context *dc_ctx = pipe_ctx->stream->ctx;
|
||||
enum dc_status status;
|
||||
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
||||
|
||||
/* eDP lit up by bios already, no need to enable again. */
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
|
||||
|
@ -629,13 +629,14 @@ bool dal_ddc_service_query_ddc_data(
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t dal_ddc_service_read_dpcd_data(
|
||||
enum ddc_result dal_ddc_service_read_dpcd_data(
|
||||
struct ddc_service *ddc,
|
||||
bool i2c,
|
||||
enum i2c_mot_mode mot,
|
||||
uint32_t address,
|
||||
uint8_t *data,
|
||||
uint32_t len)
|
||||
uint32_t len,
|
||||
uint32_t *read)
|
||||
{
|
||||
struct aux_payload read_payload = {
|
||||
.i2c_over_aux = i2c,
|
||||
@ -652,6 +653,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
|
||||
.mot = mot
|
||||
};
|
||||
|
||||
*read = 0;
|
||||
|
||||
if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return DDC_RESULT_FAILED_INVALID_OPERATION;
|
||||
@ -661,7 +664,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
|
||||
ddc->ctx->i2caux,
|
||||
ddc->ddc_pin,
|
||||
&command)) {
|
||||
return (ssize_t)command.payloads->length;
|
||||
*read = command.payloads->length;
|
||||
return DDC_RESULT_SUCESSFULL;
|
||||
}
|
||||
|
||||
return DDC_RESULT_FAILED_OPERATION;
|
||||
|
@ -1378,8 +1378,8 @@ static uint32_t bandwidth_in_kbps_from_timing(
|
||||
{
|
||||
uint32_t bits_per_channel = 0;
|
||||
uint32_t kbps;
|
||||
switch (timing->display_color_depth) {
|
||||
|
||||
switch (timing->display_color_depth) {
|
||||
case COLOR_DEPTH_666:
|
||||
bits_per_channel = 6;
|
||||
break;
|
||||
@ -1401,14 +1401,20 @@ static uint32_t bandwidth_in_kbps_from_timing(
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ASSERT(bits_per_channel != 0);
|
||||
|
||||
kbps = timing->pix_clk_khz;
|
||||
kbps *= bits_per_channel;
|
||||
|
||||
if (timing->flags.Y_ONLY != 1)
|
||||
if (timing->flags.Y_ONLY != 1) {
|
||||
/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
|
||||
kbps *= 3;
|
||||
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
kbps /= 2;
|
||||
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
|
||||
kbps = kbps * 2 / 3;
|
||||
}
|
||||
|
||||
return kbps;
|
||||
|
||||
@ -2278,6 +2284,8 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
union edp_configuration_cap edp_config_cap;
|
||||
union dp_downstream_port_present ds_port = { 0 };
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
uint32_t read_dpcd_retry_cnt = 3;
|
||||
int i;
|
||||
|
||||
memset(dpcd_data, '\0', sizeof(dpcd_data));
|
||||
memset(&down_strm_port_count,
|
||||
@ -2285,11 +2293,15 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
memset(&edp_config_cap, '\0',
|
||||
sizeof(union edp_configuration_cap));
|
||||
|
||||
for (i = 0; i < read_dpcd_retry_cnt; i++) {
|
||||
status = core_link_read_dpcd(
|
||||
link,
|
||||
DP_DPCD_REV,
|
||||
dpcd_data,
|
||||
sizeof(dpcd_data));
|
||||
if (status == DC_OK)
|
||||
break;
|
||||
}
|
||||
|
||||
if (status != DC_OK) {
|
||||
dm_error("%s: Read dpcd data failed.\n", __func__);
|
||||
@ -2376,6 +2388,10 @@ bool detect_dp_sink_caps(struct dc_link *link)
|
||||
void detect_edp_sink_caps(struct dc_link *link)
|
||||
{
|
||||
retrieve_link_cap(link);
|
||||
|
||||
if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)
|
||||
link->reported_link_cap.link_rate = LINK_RATE_HIGH2;
|
||||
|
||||
link->verified_link_cap = link->reported_link_cap;
|
||||
}
|
||||
|
||||
|
@ -45,8 +45,9 @@
|
||||
#include "dcn10/dcn10_resource.h"
|
||||
#endif
|
||||
#include "dce120/dce120_resource.h"
|
||||
#define DC_LOGGER \
|
||||
ctx->logger
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
{
|
||||
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
|
||||
@ -78,6 +79,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
|
||||
dc_version = DCE_VERSION_11_2;
|
||||
}
|
||||
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
|
||||
if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
|
||||
dc_version = DCE_VERSION_11_22;
|
||||
#endif
|
||||
break;
|
||||
case FAMILY_AI:
|
||||
dc_version = DCE_VERSION_12_0;
|
||||
@ -124,6 +129,9 @@ struct resource_pool *dc_create_resource_pool(
|
||||
num_virtual_links, dc, asic_id);
|
||||
break;
|
||||
case DCE_VERSION_11_2:
|
||||
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
|
||||
case DCE_VERSION_11_22:
|
||||
#endif
|
||||
res_pool = dce112_create_resource_pool(
|
||||
num_virtual_links, dc);
|
||||
break;
|
||||
@ -835,7 +843,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
|
||||
struct view recout_skip = { 0 };
|
||||
bool res = false;
|
||||
struct dc_context *ctx = pipe_ctx->stream->ctx;
|
||||
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
||||
/* Important: scaling ratio calculation requires pixel format,
|
||||
* lb depth calculation requires recout and taps require scaling ratios.
|
||||
* Inits require viewport, taps, ratios and recout of split pipe
|
||||
@ -843,6 +851,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
|
||||
pipe_ctx->plane_state->format);
|
||||
|
||||
if (pipe_ctx->stream->timing.flags.INTERLACE)
|
||||
pipe_ctx->stream->dst.height *= 2;
|
||||
|
||||
calculate_scaling_ratios(pipe_ctx);
|
||||
|
||||
calculate_viewport(pipe_ctx);
|
||||
@ -863,6 +874,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
|
||||
pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
|
||||
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
|
||||
if (pipe_ctx->stream->timing.flags.INTERLACE)
|
||||
pipe_ctx->plane_res.scl_data.v_active *= 2;
|
||||
|
||||
|
||||
/* Taps calculations */
|
||||
@ -908,6 +921,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
plane_state->dst_rect.x,
|
||||
plane_state->dst_rect.y);
|
||||
|
||||
if (pipe_ctx->stream->timing.flags.INTERLACE)
|
||||
pipe_ctx->stream->dst.height /= 2;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -1294,6 +1310,19 @@ bool dc_add_all_planes_for_stream(
|
||||
}
|
||||
|
||||
|
||||
static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream,
|
||||
struct dc_stream_state *new_stream)
|
||||
{
|
||||
if (cur_stream == NULL)
|
||||
return true;
|
||||
|
||||
if (memcmp(&cur_stream->hdr_static_metadata,
|
||||
&new_stream->hdr_static_metadata,
|
||||
sizeof(struct dc_info_packet)) != 0)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_timing_changed(struct dc_stream_state *cur_stream,
|
||||
struct dc_stream_state *new_stream)
|
||||
@ -1329,6 +1358,9 @@ static bool are_stream_backends_same(
|
||||
if (is_timing_changed(stream_a, stream_b))
|
||||
return false;
|
||||
|
||||
if (is_hdr_static_meta_changed(stream_a, stream_b))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1599,18 +1631,6 @@ enum dc_status dc_remove_stream_from_ctx(
|
||||
return DC_OK;
|
||||
}
|
||||
|
||||
static void copy_pipe_ctx(
|
||||
const struct pipe_ctx *from_pipe_ctx, struct pipe_ctx *to_pipe_ctx)
|
||||
{
|
||||
struct dc_plane_state *plane_state = to_pipe_ctx->plane_state;
|
||||
struct dc_stream_state *stream = to_pipe_ctx->stream;
|
||||
|
||||
*to_pipe_ctx = *from_pipe_ctx;
|
||||
to_pipe_ctx->stream = stream;
|
||||
if (plane_state != NULL)
|
||||
to_pipe_ctx->plane_state = plane_state;
|
||||
}
|
||||
|
||||
static struct dc_stream_state *find_pll_sharable_stream(
|
||||
struct dc_stream_state *stream_needs_pll,
|
||||
struct dc_state *context)
|
||||
@ -1703,7 +1723,7 @@ enum dc_status resource_map_pool_resources(
|
||||
pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
|
||||
#endif
|
||||
|
||||
if (pipe_idx < 0)
|
||||
if (pipe_idx < 0 || context->res_ctx.pipe_ctx[pipe_idx].stream_res.tg == NULL)
|
||||
return DC_NO_CONTROLLER_RESOURCE;
|
||||
|
||||
pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
|
||||
@ -1752,26 +1772,6 @@ enum dc_status resource_map_pool_resources(
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
/* first stream in the context is used to populate the rest */
|
||||
void validate_guaranteed_copy_streams(
|
||||
struct dc_state *context,
|
||||
int max_streams)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 1; i < max_streams; i++) {
|
||||
context->streams[i] = context->streams[0];
|
||||
|
||||
copy_pipe_ctx(&context->res_ctx.pipe_ctx[0],
|
||||
&context->res_ctx.pipe_ctx[i]);
|
||||
context->res_ctx.pipe_ctx[i].stream =
|
||||
context->res_ctx.pipe_ctx[0].stream;
|
||||
|
||||
dc_stream_retain(context->streams[i]);
|
||||
context->stream_count++;
|
||||
}
|
||||
}
|
||||
|
||||
void dc_resource_state_copy_construct_current(
|
||||
const struct dc *dc,
|
||||
struct dc_state *dst_ctx)
|
||||
@ -1843,7 +1843,7 @@ enum dc_status dc_validate_global_state(
|
||||
}
|
||||
|
||||
static void patch_gamut_packet_checksum(
|
||||
struct encoder_info_packet *gamut_packet)
|
||||
struct dc_info_packet *gamut_packet)
|
||||
{
|
||||
/* For gamut we recalc checksum */
|
||||
if (gamut_packet->valid) {
|
||||
@ -1862,12 +1862,11 @@ static void patch_gamut_packet_checksum(
|
||||
}
|
||||
|
||||
static void set_avi_info_frame(
|
||||
struct encoder_info_packet *info_packet,
|
||||
struct dc_info_packet *info_packet,
|
||||
struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
|
||||
struct info_frame info_frame = { {0} };
|
||||
uint32_t pixel_encoding = 0;
|
||||
enum scanning_type scan_type = SCANNING_TYPE_NODATA;
|
||||
enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
|
||||
@ -1877,22 +1876,24 @@ static void set_avi_info_frame(
|
||||
unsigned int cn0_cn1_value = 0;
|
||||
uint8_t *check_sum = NULL;
|
||||
uint8_t byte_index = 0;
|
||||
union hdmi_info_packet *hdmi_info = &info_frame.avi_info_packet.info_packet_hdmi;
|
||||
union hdmi_info_packet hdmi_info;
|
||||
union display_content_support support = {0};
|
||||
unsigned int vic = pipe_ctx->stream->timing.vic;
|
||||
enum dc_timing_3d_format format;
|
||||
|
||||
memset(&hdmi_info, 0, sizeof(union hdmi_info_packet));
|
||||
|
||||
color_space = pipe_ctx->stream->output_color_space;
|
||||
if (color_space == COLOR_SPACE_UNKNOWN)
|
||||
color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
|
||||
COLOR_SPACE_SRGB:COLOR_SPACE_YCBCR709;
|
||||
|
||||
/* Initialize header */
|
||||
hdmi_info->bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI;
|
||||
hdmi_info.bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI;
|
||||
/* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
|
||||
* not be used in HDMI 2.0 (Section 10.1) */
|
||||
hdmi_info->bits.header.version = 2;
|
||||
hdmi_info->bits.header.length = HDMI_AVI_INFOFRAME_SIZE;
|
||||
hdmi_info.bits.header.version = 2;
|
||||
hdmi_info.bits.header.length = HDMI_AVI_INFOFRAME_SIZE;
|
||||
|
||||
/*
|
||||
* IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
|
||||
@ -1918,39 +1919,39 @@ static void set_avi_info_frame(
|
||||
|
||||
/* Y0_Y1_Y2 : The pixel encoding */
|
||||
/* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
|
||||
hdmi_info->bits.Y0_Y1_Y2 = pixel_encoding;
|
||||
hdmi_info.bits.Y0_Y1_Y2 = pixel_encoding;
|
||||
|
||||
/* A0 = 1 Active Format Information valid */
|
||||
hdmi_info->bits.A0 = ACTIVE_FORMAT_VALID;
|
||||
hdmi_info.bits.A0 = ACTIVE_FORMAT_VALID;
|
||||
|
||||
/* B0, B1 = 3; Bar info data is valid */
|
||||
hdmi_info->bits.B0_B1 = BAR_INFO_BOTH_VALID;
|
||||
hdmi_info.bits.B0_B1 = BAR_INFO_BOTH_VALID;
|
||||
|
||||
hdmi_info->bits.SC0_SC1 = PICTURE_SCALING_UNIFORM;
|
||||
hdmi_info.bits.SC0_SC1 = PICTURE_SCALING_UNIFORM;
|
||||
|
||||
/* S0, S1 : Underscan / Overscan */
|
||||
/* TODO: un-hardcode scan type */
|
||||
scan_type = SCANNING_TYPE_UNDERSCAN;
|
||||
hdmi_info->bits.S0_S1 = scan_type;
|
||||
hdmi_info.bits.S0_S1 = scan_type;
|
||||
|
||||
/* C0, C1 : Colorimetry */
|
||||
if (color_space == COLOR_SPACE_YCBCR709 ||
|
||||
color_space == COLOR_SPACE_YCBCR709_LIMITED)
|
||||
hdmi_info->bits.C0_C1 = COLORIMETRY_ITU709;
|
||||
hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
|
||||
else if (color_space == COLOR_SPACE_YCBCR601 ||
|
||||
color_space == COLOR_SPACE_YCBCR601_LIMITED)
|
||||
hdmi_info->bits.C0_C1 = COLORIMETRY_ITU601;
|
||||
hdmi_info.bits.C0_C1 = COLORIMETRY_ITU601;
|
||||
else {
|
||||
hdmi_info->bits.C0_C1 = COLORIMETRY_NO_DATA;
|
||||
hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA;
|
||||
}
|
||||
if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE ||
|
||||
color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE ||
|
||||
color_space == COLOR_SPACE_2020_YCBCR) {
|
||||
hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
|
||||
hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED;
|
||||
hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
|
||||
hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
|
||||
} else if (color_space == COLOR_SPACE_ADOBERGB) {
|
||||
hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
|
||||
hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED;
|
||||
hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
|
||||
hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
|
||||
}
|
||||
|
||||
/* TODO: un-hardcode aspect ratio */
|
||||
@ -1959,18 +1960,18 @@ static void set_avi_info_frame(
|
||||
switch (aspect) {
|
||||
case ASPECT_RATIO_4_3:
|
||||
case ASPECT_RATIO_16_9:
|
||||
hdmi_info->bits.M0_M1 = aspect;
|
||||
hdmi_info.bits.M0_M1 = aspect;
|
||||
break;
|
||||
|
||||
case ASPECT_RATIO_NO_DATA:
|
||||
case ASPECT_RATIO_64_27:
|
||||
case ASPECT_RATIO_256_135:
|
||||
default:
|
||||
hdmi_info->bits.M0_M1 = 0;
|
||||
hdmi_info.bits.M0_M1 = 0;
|
||||
}
|
||||
|
||||
/* Active Format Aspect ratio - same as Picture Aspect Ratio. */
|
||||
hdmi_info->bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
|
||||
hdmi_info.bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
|
||||
|
||||
/* TODO: un-hardcode cn0_cn1 and itc */
|
||||
|
||||
@ -2013,8 +2014,8 @@ static void set_avi_info_frame(
|
||||
}
|
||||
}
|
||||
}
|
||||
hdmi_info->bits.CN0_CN1 = cn0_cn1_value;
|
||||
hdmi_info->bits.ITC = itc_value;
|
||||
hdmi_info.bits.CN0_CN1 = cn0_cn1_value;
|
||||
hdmi_info.bits.ITC = itc_value;
|
||||
}
|
||||
|
||||
/* TODO : We should handle YCC quantization */
|
||||
@ -2023,19 +2024,19 @@ static void set_avi_info_frame(
|
||||
stream->sink->edid_caps.qy_bit == 1) {
|
||||
if (color_space == COLOR_SPACE_SRGB ||
|
||||
color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
|
||||
hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
|
||||
hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
|
||||
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
|
||||
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
|
||||
} else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
|
||||
color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
|
||||
hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
|
||||
hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
|
||||
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
|
||||
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
|
||||
} else {
|
||||
hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
|
||||
hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
|
||||
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
|
||||
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
|
||||
}
|
||||
} else {
|
||||
hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
|
||||
hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
|
||||
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
|
||||
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
|
||||
}
|
||||
|
||||
///VIC
|
||||
@ -2060,51 +2061,49 @@ static void set_avi_info_frame(
|
||||
break;
|
||||
}
|
||||
}
|
||||
hdmi_info->bits.VIC0_VIC7 = vic;
|
||||
hdmi_info.bits.VIC0_VIC7 = vic;
|
||||
|
||||
/* pixel repetition
|
||||
* PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
|
||||
* repetition start from 1 */
|
||||
hdmi_info->bits.PR0_PR3 = 0;
|
||||
hdmi_info.bits.PR0_PR3 = 0;
|
||||
|
||||
/* Bar Info
|
||||
* barTop: Line Number of End of Top Bar.
|
||||
* barBottom: Line Number of Start of Bottom Bar.
|
||||
* barLeft: Pixel Number of End of Left Bar.
|
||||
* barRight: Pixel Number of Start of Right Bar. */
|
||||
hdmi_info->bits.bar_top = stream->timing.v_border_top;
|
||||
hdmi_info->bits.bar_bottom = (stream->timing.v_total
|
||||
hdmi_info.bits.bar_top = stream->timing.v_border_top;
|
||||
hdmi_info.bits.bar_bottom = (stream->timing.v_total
|
||||
- stream->timing.v_border_bottom + 1);
|
||||
hdmi_info->bits.bar_left = stream->timing.h_border_left;
|
||||
hdmi_info->bits.bar_right = (stream->timing.h_total
|
||||
hdmi_info.bits.bar_left = stream->timing.h_border_left;
|
||||
hdmi_info.bits.bar_right = (stream->timing.h_total
|
||||
- stream->timing.h_border_right + 1);
|
||||
|
||||
/* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
|
||||
check_sum = &info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0];
|
||||
check_sum = &hdmi_info.packet_raw_data.sb[0];
|
||||
|
||||
*check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2;
|
||||
|
||||
for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++)
|
||||
*check_sum += hdmi_info->packet_raw_data.sb[byte_index];
|
||||
*check_sum += hdmi_info.packet_raw_data.sb[byte_index];
|
||||
|
||||
/* one byte complement */
|
||||
*check_sum = (uint8_t) (0x100 - *check_sum);
|
||||
|
||||
/* Store in hw_path_mode */
|
||||
info_packet->hb0 = hdmi_info->packet_raw_data.hb0;
|
||||
info_packet->hb1 = hdmi_info->packet_raw_data.hb1;
|
||||
info_packet->hb2 = hdmi_info->packet_raw_data.hb2;
|
||||
info_packet->hb0 = hdmi_info.packet_raw_data.hb0;
|
||||
info_packet->hb1 = hdmi_info.packet_raw_data.hb1;
|
||||
info_packet->hb2 = hdmi_info.packet_raw_data.hb2;
|
||||
|
||||
for (byte_index = 0; byte_index < sizeof(info_frame.avi_info_packet.
|
||||
info_packet_hdmi.packet_raw_data.sb); byte_index++)
|
||||
info_packet->sb[byte_index] = info_frame.avi_info_packet.
|
||||
info_packet_hdmi.packet_raw_data.sb[byte_index];
|
||||
for (byte_index = 0; byte_index < sizeof(hdmi_info.packet_raw_data.sb); byte_index++)
|
||||
info_packet->sb[byte_index] = hdmi_info.packet_raw_data.sb[byte_index];
|
||||
|
||||
info_packet->valid = true;
|
||||
}
|
||||
|
||||
static void set_vendor_info_packet(
|
||||
struct encoder_info_packet *info_packet,
|
||||
struct dc_info_packet *info_packet,
|
||||
struct dc_stream_state *stream)
|
||||
{
|
||||
uint32_t length = 0;
|
||||
@ -2217,7 +2216,7 @@ static void set_vendor_info_packet(
|
||||
}
|
||||
|
||||
static void set_spd_info_packet(
|
||||
struct encoder_info_packet *info_packet,
|
||||
struct dc_info_packet *info_packet,
|
||||
struct dc_stream_state *stream)
|
||||
{
|
||||
/* SPD info packet for FreeSync */
|
||||
@ -2338,104 +2337,19 @@ static void set_spd_info_packet(
|
||||
}
|
||||
|
||||
static void set_hdr_static_info_packet(
|
||||
struct encoder_info_packet *info_packet,
|
||||
struct dc_info_packet *info_packet,
|
||||
struct dc_stream_state *stream)
|
||||
{
|
||||
uint16_t i = 0;
|
||||
enum signal_type signal = stream->signal;
|
||||
uint32_t data;
|
||||
/* HDR Static Metadata info packet for HDR10 */
|
||||
|
||||
if (!stream->hdr_static_metadata.hdr_supported)
|
||||
if (!stream->hdr_static_metadata.valid)
|
||||
return;
|
||||
|
||||
if (dc_is_hdmi_signal(signal)) {
|
||||
info_packet->valid = true;
|
||||
|
||||
info_packet->hb0 = 0x87;
|
||||
info_packet->hb1 = 0x01;
|
||||
info_packet->hb2 = 0x1A;
|
||||
i = 1;
|
||||
} else if (dc_is_dp_signal(signal)) {
|
||||
info_packet->valid = true;
|
||||
|
||||
info_packet->hb0 = 0x00;
|
||||
info_packet->hb1 = 0x87;
|
||||
info_packet->hb2 = 0x1D;
|
||||
info_packet->hb3 = (0x13 << 2);
|
||||
i = 2;
|
||||
}
|
||||
|
||||
data = stream->hdr_static_metadata.is_hdr;
|
||||
info_packet->sb[i++] = data ? 0x02 : 0x00;
|
||||
info_packet->sb[i++] = 0x00;
|
||||
|
||||
data = stream->hdr_static_metadata.chromaticity_green_x / 2;
|
||||
info_packet->sb[i++] = data & 0xFF;
|
||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||
|
||||
data = stream->hdr_static_metadata.chromaticity_green_y / 2;
|
||||
info_packet->sb[i++] = data & 0xFF;
|
||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||
|
||||
data = stream->hdr_static_metadata.chromaticity_blue_x / 2;
|
||||
info_packet->sb[i++] = data & 0xFF;
|
||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||
|
||||
data = stream->hdr_static_metadata.chromaticity_blue_y / 2;
|
||||
info_packet->sb[i++] = data & 0xFF;
|
||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||
|
||||
data = stream->hdr_static_metadata.chromaticity_red_x / 2;
|
||||
info_packet->sb[i++] = data & 0xFF;
|
||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||
|
||||
data = stream->hdr_static_metadata.chromaticity_red_y / 2;
|
||||
info_packet->sb[i++] = data & 0xFF;
|
||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||
|
||||
data = stream->hdr_static_metadata.chromaticity_white_point_x / 2;
|
||||
info_packet->sb[i++] = data & 0xFF;
|
||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||
|
||||
data = stream->hdr_static_metadata.chromaticity_white_point_y / 2;
|
||||
info_packet->sb[i++] = data & 0xFF;
|
||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||
|
||||
data = stream->hdr_static_metadata.max_luminance;
|
||||
info_packet->sb[i++] = data & 0xFF;
|
||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||
|
||||
data = stream->hdr_static_metadata.min_luminance;
|
||||
info_packet->sb[i++] = data & 0xFF;
|
||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||
|
||||
data = stream->hdr_static_metadata.maximum_content_light_level;
|
||||
info_packet->sb[i++] = data & 0xFF;
|
||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||
|
||||
data = stream->hdr_static_metadata.maximum_frame_average_light_level;
|
||||
info_packet->sb[i++] = data & 0xFF;
|
||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||
|
||||
if (dc_is_hdmi_signal(signal)) {
|
||||
uint32_t checksum = 0;
|
||||
|
||||
checksum += info_packet->hb0;
|
||||
checksum += info_packet->hb1;
|
||||
checksum += info_packet->hb2;
|
||||
|
||||
for (i = 1; i <= info_packet->hb2; i++)
|
||||
checksum += info_packet->sb[i];
|
||||
|
||||
info_packet->sb[0] = 0x100 - checksum;
|
||||
} else if (dc_is_dp_signal(signal)) {
|
||||
info_packet->sb[0] = 0x01;
|
||||
info_packet->sb[1] = 0x1A;
|
||||
}
|
||||
*info_packet = stream->hdr_static_metadata;
|
||||
}
|
||||
|
||||
static void set_vsc_info_packet(
|
||||
struct encoder_info_packet *info_packet,
|
||||
struct dc_info_packet *info_packet,
|
||||
struct dc_stream_state *stream)
|
||||
{
|
||||
unsigned int vscPacketRevision = 0;
|
||||
@ -2650,6 +2564,8 @@ bool pipe_need_reprogram(
|
||||
if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
|
||||
return true;
|
||||
|
||||
if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -101,14 +101,16 @@ static void construct(struct dc_stream_state *stream,
|
||||
stream->status.link = stream->sink->link;
|
||||
|
||||
update_stream_signal(stream);
|
||||
|
||||
stream->out_transfer_func = dc_create_transfer_func();
|
||||
stream->out_transfer_func->type = TF_TYPE_BYPASS;
|
||||
}
|
||||
|
||||
static void destruct(struct dc_stream_state *stream)
|
||||
{
|
||||
dc_sink_release(stream->sink);
|
||||
if (stream->out_transfer_func != NULL) {
|
||||
dc_transfer_func_release(
|
||||
stream->out_transfer_func);
|
||||
dc_transfer_func_release(stream->out_transfer_func);
|
||||
stream->out_transfer_func = NULL;
|
||||
}
|
||||
}
|
||||
@ -176,6 +178,7 @@ bool dc_stream_set_cursor_attributes(
|
||||
int i;
|
||||
struct dc *core_dc;
|
||||
struct resource_context *res_ctx;
|
||||
struct pipe_ctx *pipe_to_program = NULL;
|
||||
|
||||
if (NULL == stream) {
|
||||
dm_error("DC: dc_stream is NULL!\n");
|
||||
@ -203,9 +206,17 @@ bool dc_stream_set_cursor_attributes(
|
||||
if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
|
||||
continue;
|
||||
|
||||
if (!pipe_to_program) {
|
||||
pipe_to_program = pipe_ctx;
|
||||
core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
|
||||
}
|
||||
|
||||
core_dc->hwss.set_cursor_attribute(pipe_ctx);
|
||||
}
|
||||
|
||||
if (pipe_to_program)
|
||||
core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -216,6 +227,7 @@ bool dc_stream_set_cursor_position(
|
||||
int i;
|
||||
struct dc *core_dc;
|
||||
struct resource_context *res_ctx;
|
||||
struct pipe_ctx *pipe_to_program = NULL;
|
||||
|
||||
if (NULL == stream) {
|
||||
dm_error("DC: dc_stream is NULL!\n");
|
||||
@ -241,9 +253,17 @@ bool dc_stream_set_cursor_position(
|
||||
!pipe_ctx->plane_res.ipp)
|
||||
continue;
|
||||
|
||||
if (!pipe_to_program) {
|
||||
pipe_to_program = pipe_ctx;
|
||||
core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
|
||||
}
|
||||
|
||||
core_dc->hwss.set_cursor_position(pipe_ctx);
|
||||
}
|
||||
|
||||
if (pipe_to_program)
|
||||
core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,12 @@
|
||||
static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
|
||||
{
|
||||
plane_state->ctx = ctx;
|
||||
|
||||
plane_state->gamma_correction = dc_create_gamma();
|
||||
plane_state->gamma_correction->is_identity = true;
|
||||
|
||||
plane_state->in_transfer_func = dc_create_transfer_func();
|
||||
plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
|
||||
}
|
||||
|
||||
static void destruct(struct dc_plane_state *plane_state)
|
||||
@ -66,7 +72,7 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
|
||||
{
|
||||
struct dc *core_dc = dc;
|
||||
|
||||
struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state),
|
||||
struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (NULL == plane_state)
|
||||
@ -120,7 +126,7 @@ static void dc_plane_state_free(struct kref *kref)
|
||||
{
|
||||
struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
|
||||
destruct(plane_state);
|
||||
kfree(plane_state);
|
||||
kvfree(plane_state);
|
||||
}
|
||||
|
||||
void dc_plane_state_release(struct dc_plane_state *plane_state)
|
||||
@ -136,7 +142,7 @@ void dc_gamma_retain(struct dc_gamma *gamma)
|
||||
static void dc_gamma_free(struct kref *kref)
|
||||
{
|
||||
struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount);
|
||||
kfree(gamma);
|
||||
kvfree(gamma);
|
||||
}
|
||||
|
||||
void dc_gamma_release(struct dc_gamma **gamma)
|
||||
@ -147,7 +153,7 @@ void dc_gamma_release(struct dc_gamma **gamma)
|
||||
|
||||
struct dc_gamma *dc_create_gamma(void)
|
||||
{
|
||||
struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL);
|
||||
struct dc_gamma *gamma = kvzalloc(sizeof(*gamma), GFP_KERNEL);
|
||||
|
||||
if (gamma == NULL)
|
||||
goto alloc_fail;
|
||||
@ -167,7 +173,7 @@ void dc_transfer_func_retain(struct dc_transfer_func *tf)
|
||||
static void dc_transfer_func_free(struct kref *kref)
|
||||
{
|
||||
struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount);
|
||||
kfree(tf);
|
||||
kvfree(tf);
|
||||
}
|
||||
|
||||
void dc_transfer_func_release(struct dc_transfer_func *tf)
|
||||
@ -175,9 +181,9 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
|
||||
kref_put(&tf->refcount, dc_transfer_func_free);
|
||||
}
|
||||
|
||||
struct dc_transfer_func *dc_create_transfer_func(void)
|
||||
struct dc_transfer_func *dc_create_transfer_func()
|
||||
{
|
||||
struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL);
|
||||
struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
|
||||
|
||||
if (tf == NULL)
|
||||
goto alloc_fail;
|
||||
|
@ -38,7 +38,7 @@
|
||||
#include "inc/compressor.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#define DC_VER "3.1.38"
|
||||
#define DC_VER "3.1.44"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_STREAMS 6
|
||||
@ -202,6 +202,7 @@ struct dc_debug {
|
||||
bool timing_trace;
|
||||
bool clock_trace;
|
||||
bool validation_trace;
|
||||
bool bandwidth_calcs_trace;
|
||||
|
||||
/* stutter efficiency related */
|
||||
bool disable_stutter;
|
||||
@ -332,20 +333,6 @@ enum {
|
||||
TRANSFER_FUNC_POINTS = 1025
|
||||
};
|
||||
|
||||
// Moved here from color module for linux
|
||||
enum color_transfer_func {
|
||||
transfer_func_unknown,
|
||||
transfer_func_srgb,
|
||||
transfer_func_bt709,
|
||||
transfer_func_pq2084,
|
||||
transfer_func_pq2084_interim,
|
||||
transfer_func_linear_0_1,
|
||||
transfer_func_linear_0_125,
|
||||
transfer_func_dolbyvision,
|
||||
transfer_func_gamma_22,
|
||||
transfer_func_gamma_26
|
||||
};
|
||||
|
||||
struct dc_hdr_static_metadata {
|
||||
/* display chromaticities and white point in units of 0.00001 */
|
||||
unsigned int chromaticity_green_x;
|
||||
@ -361,9 +348,6 @@ struct dc_hdr_static_metadata {
|
||||
uint32_t max_luminance;
|
||||
uint32_t maximum_content_light_level;
|
||||
uint32_t maximum_frame_average_light_level;
|
||||
|
||||
bool hdr_supported;
|
||||
bool is_hdr;
|
||||
};
|
||||
|
||||
enum dc_transfer_func_type {
|
||||
@ -419,7 +403,6 @@ union surface_update_flags {
|
||||
/* Medium updates */
|
||||
uint32_t dcc_change:1;
|
||||
uint32_t color_space_change:1;
|
||||
uint32_t input_tf_change:1;
|
||||
uint32_t horizontal_mirror_change:1;
|
||||
uint32_t per_pixel_alpha_change:1;
|
||||
uint32_t rotation_change:1;
|
||||
@ -428,6 +411,7 @@ union surface_update_flags {
|
||||
uint32_t position_change:1;
|
||||
uint32_t in_transfer_func_change:1;
|
||||
uint32_t input_csc_change:1;
|
||||
uint32_t coeff_reduction_change:1;
|
||||
uint32_t output_tf_change:1;
|
||||
uint32_t pixel_format_change:1;
|
||||
|
||||
@ -460,7 +444,7 @@ struct dc_plane_state {
|
||||
struct dc_gamma *gamma_correction;
|
||||
struct dc_transfer_func *in_transfer_func;
|
||||
struct dc_bias_and_scale *bias_and_scale;
|
||||
struct csc_transform input_csc_color_matrix;
|
||||
struct dc_csc_transform input_csc_color_matrix;
|
||||
struct fixed31_32 coeff_reduction_factor;
|
||||
uint32_t sdr_white_level;
|
||||
|
||||
@ -468,7 +452,6 @@ struct dc_plane_state {
|
||||
struct dc_hdr_static_metadata hdr_static_ctx;
|
||||
|
||||
enum dc_color_space color_space;
|
||||
enum color_transfer_func input_tf;
|
||||
|
||||
enum surface_pixel_format format;
|
||||
enum dc_rotation_angle rotation;
|
||||
@ -498,7 +481,6 @@ struct dc_plane_info {
|
||||
enum dc_rotation_angle rotation;
|
||||
enum plane_stereo_format stereo_format;
|
||||
enum dc_color_space color_space;
|
||||
enum color_transfer_func input_tf;
|
||||
unsigned int sdr_white_level;
|
||||
bool horizontal_mirror;
|
||||
bool visible;
|
||||
@ -525,10 +507,9 @@ struct dc_surface_update {
|
||||
* null means no updates
|
||||
*/
|
||||
struct dc_gamma *gamma;
|
||||
enum color_transfer_func color_input_tf;
|
||||
struct dc_transfer_func *in_transfer_func;
|
||||
|
||||
struct csc_transform *input_csc_color_matrix;
|
||||
struct dc_csc_transform *input_csc_color_matrix;
|
||||
struct fixed31_32 *coeff_reduction_factor;
|
||||
};
|
||||
|
||||
@ -699,6 +680,7 @@ struct dc_cursor {
|
||||
struct dc_cursor_attributes attributes;
|
||||
};
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
* Interrupt interfaces
|
||||
******************************************************************************/
|
||||
|
@ -117,6 +117,65 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
|
||||
return reg_val;
|
||||
}
|
||||
|
||||
uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
|
||||
uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
|
||||
uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
|
||||
uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
|
||||
uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
|
||||
uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
|
||||
uint8_t shift6, uint32_t mask6, uint32_t *field_value6)
|
||||
{
|
||||
uint32_t reg_val = dm_read_reg(ctx, addr);
|
||||
*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
|
||||
*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
|
||||
*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
|
||||
*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
|
||||
*field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
|
||||
*field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
|
||||
return reg_val;
|
||||
}
|
||||
|
||||
uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
|
||||
uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
|
||||
uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
|
||||
uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
|
||||
uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
|
||||
uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
|
||||
uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
|
||||
uint8_t shift7, uint32_t mask7, uint32_t *field_value7)
|
||||
{
|
||||
uint32_t reg_val = dm_read_reg(ctx, addr);
|
||||
*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
|
||||
*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
|
||||
*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
|
||||
*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
|
||||
*field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
|
||||
*field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
|
||||
*field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
|
||||
return reg_val;
|
||||
}
|
||||
|
||||
uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
|
||||
uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
|
||||
uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
|
||||
uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
|
||||
uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
|
||||
uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
|
||||
uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
|
||||
uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
|
||||
uint8_t shift8, uint32_t mask8, uint32_t *field_value8)
|
||||
{
|
||||
uint32_t reg_val = dm_read_reg(ctx, addr);
|
||||
*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
|
||||
*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
|
||||
*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
|
||||
*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
|
||||
*field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
|
||||
*field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
|
||||
*field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
|
||||
*field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8);
|
||||
return reg_val;
|
||||
}
|
||||
/* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer
|
||||
* compiler won't be able to check for size match and is prone to stack corruption type of bugs
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user