amd-drm-next-6.7-2023-10-20:
amdgpu: - SMU 13 updates - UMSCH updates - DC MPO fixes - RAS updates - MES 11 fixes - Fix possible memory leaks in error pathes - GC 11.5 fixes - Kernel doc updates - PSP updates - APU IMU fixes - Misc code cleanups - SMU 11 fixes - OD fix - Frame size warning fixes - SR-IOV fixes - NBIO 7.11 updates - NBIO 7.7 updates - XGMI fixes - devcoredump updates amdkfd: - Misc code cleanups - SVM fixes -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZTLX4QAKCRC93/aFa7yZ 2ORJAP9lnoyvUPIP63Hx5TADQtZHiA+ShkATGQmDia94ABtCxwEAlo88TipxAo7c tRX8Mn+rix3M739FDFxV0bp7hCXsbgQ= =fY9I -----END PGP SIGNATURE----- Merge tag 'amd-drm-next-6.7-2023-10-20' of https://gitlab.freedesktop.org/agd5f/linux into drm-next amd-drm-next-6.7-2023-10-20: amdgpu: - SMU 13 updates - UMSCH updates - DC MPO fixes - RAS updates - MES 11 fixes - Fix possible memory leaks in error pathes - GC 11.5 fixes - Kernel doc updates - PSP updates - APU IMU fixes - Misc code cleanups - SMU 11 fixes - OD fix - Frame size warning fixes - SR-IOV fixes - NBIO 7.11 updates - NBIO 7.7 updates - XGMI fixes - devcoredump updates amdkfd: - Misc code cleanups - SVM fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20231020195043.4937-1-alexander.deucher@amd.com
This commit is contained in:
commit
0ecf4aa32b
@ -773,6 +773,17 @@ struct amdgpu_mqd {
|
||||
struct amdgpu_reset_domain;
|
||||
struct amdgpu_fru_info;
|
||||
|
||||
struct amdgpu_reset_info {
|
||||
/* reset dump register */
|
||||
u32 *reset_dump_reg_list;
|
||||
u32 *reset_dump_reg_value;
|
||||
int num_regs;
|
||||
|
||||
#ifdef CONFIG_DEV_COREDUMP
|
||||
struct amdgpu_coredump_info *coredump_info;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
|
||||
*/
|
||||
@ -1081,10 +1092,7 @@ struct amdgpu_device {
|
||||
|
||||
struct mutex benchmark_mutex;
|
||||
|
||||
/* reset dump register */
|
||||
uint32_t *reset_dump_reg_list;
|
||||
uint32_t *reset_dump_reg_value;
|
||||
int num_regs;
|
||||
struct amdgpu_reset_info reset_info;
|
||||
|
||||
bool scpm_enabled;
|
||||
uint32_t scpm_status;
|
||||
@ -1111,15 +1119,6 @@ static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
|
||||
return adev->ip_versions[ip][inst] & ~0xFFU;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEV_COREDUMP
|
||||
struct amdgpu_coredump_info {
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_task_info reset_task_info;
|
||||
struct timespec64 reset_time;
|
||||
bool reset_vram_lost;
|
||||
};
|
||||
#endif
|
||||
|
||||
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
|
||||
{
|
||||
return container_of(ddev, struct amdgpu_device, ddev);
|
||||
|
@ -2016,8 +2016,8 @@ static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < adev->num_regs; i++) {
|
||||
sprintf(reg_offset, "0x%x\n", adev->reset_dump_reg_list[i]);
|
||||
for (i = 0; i < adev->reset_info.num_regs; i++) {
|
||||
sprintf(reg_offset, "0x%x\n", adev->reset_info.reset_dump_reg_list[i]);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
if (copy_to_user(buf + len, reg_offset, strlen(reg_offset)))
|
||||
return -EFAULT;
|
||||
@ -2074,9 +2074,9 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
|
||||
if (ret)
|
||||
goto error_free;
|
||||
|
||||
swap(adev->reset_dump_reg_list, tmp);
|
||||
swap(adev->reset_dump_reg_value, new);
|
||||
adev->num_regs = i;
|
||||
swap(adev->reset_info.reset_dump_reg_list, tmp);
|
||||
swap(adev->reset_info.reset_dump_reg_value, new);
|
||||
adev->reset_info.num_regs = i;
|
||||
up_write(&adev->reset_domain->sem);
|
||||
ret = size;
|
||||
|
||||
|
@ -32,8 +32,6 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/devcoredump.h>
|
||||
#include <generated/utsrelease.h>
|
||||
#include <linux/pci-p2pdma.h>
|
||||
#include <linux/apple-gmux.h>
|
||||
|
||||
@ -3578,9 +3576,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
|
||||
if (adev->asic_reset_res)
|
||||
goto fail;
|
||||
|
||||
if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
|
||||
adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
|
||||
adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
|
||||
amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
|
||||
} else {
|
||||
|
||||
task_barrier_full(&hive->tb);
|
||||
@ -5050,91 +5046,17 @@ static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
|
||||
|
||||
lockdep_assert_held(&adev->reset_domain->sem);
|
||||
|
||||
for (i = 0; i < adev->num_regs; i++) {
|
||||
adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
|
||||
trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
|
||||
adev->reset_dump_reg_value[i]);
|
||||
for (i = 0; i < adev->reset_info.num_regs; i++) {
|
||||
adev->reset_info.reset_dump_reg_value[i] =
|
||||
RREG32(adev->reset_info.reset_dump_reg_list[i]);
|
||||
|
||||
trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
|
||||
adev->reset_info.reset_dump_reg_value[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DEV_COREDUMP
|
||||
static void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
}
|
||||
#else
|
||||
static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
|
||||
size_t count, void *data, size_t datalen)
|
||||
{
|
||||
struct drm_printer p;
|
||||
struct amdgpu_coredump_info *coredump = data;
|
||||
struct drm_print_iterator iter;
|
||||
int i;
|
||||
|
||||
iter.data = buffer;
|
||||
iter.offset = 0;
|
||||
iter.start = offset;
|
||||
iter.remain = count;
|
||||
|
||||
p = drm_coredump_printer(&iter);
|
||||
|
||||
drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
|
||||
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
|
||||
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
|
||||
drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, coredump->reset_time.tv_nsec);
|
||||
if (coredump->reset_task_info.pid)
|
||||
drm_printf(&p, "process_name: %s PID: %d\n",
|
||||
coredump->reset_task_info.process_name,
|
||||
coredump->reset_task_info.pid);
|
||||
|
||||
if (coredump->reset_vram_lost)
|
||||
drm_printf(&p, "VRAM is lost due to GPU reset!\n");
|
||||
if (coredump->adev->num_regs) {
|
||||
drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
|
||||
|
||||
for (i = 0; i < coredump->adev->num_regs; i++)
|
||||
drm_printf(&p, "0x%08x: 0x%08x\n",
|
||||
coredump->adev->reset_dump_reg_list[i],
|
||||
coredump->adev->reset_dump_reg_value[i]);
|
||||
}
|
||||
|
||||
return count - iter.remain;
|
||||
}
|
||||
|
||||
static void amdgpu_devcoredump_free(void *data)
|
||||
{
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
struct amdgpu_coredump_info *coredump;
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
|
||||
coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
|
||||
|
||||
if (!coredump) {
|
||||
DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
coredump->reset_vram_lost = vram_lost;
|
||||
|
||||
if (reset_context->job && reset_context->job->vm)
|
||||
coredump->reset_task_info = reset_context->job->vm->task_info;
|
||||
|
||||
coredump->adev = adev;
|
||||
|
||||
ktime_get_ts64(&coredump->reset_time);
|
||||
|
||||
dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
|
||||
amdgpu_devcoredump_read, amdgpu_devcoredump_free);
|
||||
}
|
||||
#endif
|
||||
|
||||
int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
@ -5201,9 +5123,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
||||
|
||||
if (!r && amdgpu_ras_intr_triggered()) {
|
||||
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
|
||||
if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
|
||||
tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
|
||||
tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
|
||||
amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
|
||||
}
|
||||
|
||||
amdgpu_ras_intr_cleared();
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "amdgpu_rlc.h"
|
||||
#include "amdgpu_ras.h"
|
||||
#include "amdgpu_xcp.h"
|
||||
#include "amdgpu_xgmi.h"
|
||||
|
||||
/* delay 0.1 second to enable gfx off feature */
|
||||
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
|
||||
@ -501,6 +502,9 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
|
||||
{
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
|
||||
struct amdgpu_ring *kiq_ring = &kiq->ring;
|
||||
struct amdgpu_hive_info *hive;
|
||||
struct amdgpu_ras *ras;
|
||||
int hive_ras_recovery = 0;
|
||||
int i, r = 0;
|
||||
int j;
|
||||
|
||||
@ -521,6 +525,23 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
|
||||
RESET_QUEUES, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is workaround: only skip kiq_ring test
|
||||
* during ras recovery in suspend stage for gfx9.4.3
|
||||
*/
|
||||
hive = amdgpu_get_xgmi_hive(adev);
|
||||
if (hive) {
|
||||
hive_ras_recovery = atomic_read(&hive->ras_recovery);
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
}
|
||||
|
||||
ras = amdgpu_ras_get_context(adev);
|
||||
if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) &&
|
||||
ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) {
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (kiq_ring->sched.ready && !adev->job_hang)
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
|
@ -1267,6 +1267,8 @@ invoke:
|
||||
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
|
||||
|
||||
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
|
||||
/* note down the capbility flag for XGMI TA */
|
||||
psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1388,7 +1390,7 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
|
||||
|
||||
/* Fill in the shared memory with topology information as input */
|
||||
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
|
||||
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
|
||||
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
|
||||
topology_info_input->num_nodes = number_devices;
|
||||
|
||||
for (i = 0; i < topology_info_input->num_nodes; i++) {
|
||||
@ -1399,7 +1401,7 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
|
||||
}
|
||||
|
||||
/* Invoke xgmi ta to get the topology information */
|
||||
ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
|
||||
ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1424,28 +1426,53 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
|
||||
|
||||
/* Invoke xgmi ta again to get the link information */
|
||||
if (psp_xgmi_peer_link_info_supported(psp)) {
|
||||
struct ta_xgmi_cmd_get_peer_link_info_output *link_info_output;
|
||||
struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
|
||||
struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
|
||||
bool requires_reflection =
|
||||
(psp->xgmi_context.supports_extended_data &&
|
||||
get_extended_data) ||
|
||||
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
|
||||
IP_VERSION(13, 0, 6);
|
||||
bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps &
|
||||
EXTEND_PEER_LINK_INFO_CMD_FLAG;
|
||||
|
||||
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
|
||||
/* popluate the shared output buffer rather than the cmd input buffer
|
||||
* with node_ids as the input for GET_PEER_LINKS command execution.
|
||||
* This is required for GET_PEER_LINKS per xgmi ta implementation.
|
||||
* The same requirement for GET_EXTEND_PEER_LINKS command.
|
||||
*/
|
||||
if (ta_port_num_support) {
|
||||
link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
|
||||
|
||||
ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_PEER_LINKS);
|
||||
for (i = 0; i < topology->num_nodes; i++)
|
||||
link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
|
||||
|
||||
link_extend_info_output->num_nodes = topology->num_nodes;
|
||||
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
|
||||
} else {
|
||||
link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
|
||||
|
||||
for (i = 0; i < topology->num_nodes; i++)
|
||||
link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
|
||||
|
||||
link_info_output->num_nodes = topology->num_nodes;
|
||||
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
|
||||
}
|
||||
|
||||
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
|
||||
for (i = 0; i < topology->num_nodes; i++) {
|
||||
uint8_t node_num_links = ta_port_num_support ?
|
||||
link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
|
||||
/* accumulate num_links on extended data */
|
||||
topology->nodes[i].num_links = get_extended_data ?
|
||||
topology->nodes[i].num_links +
|
||||
link_info_output->nodes[i].num_links :
|
||||
((requires_reflection && topology->nodes[i].num_links) ? topology->nodes[i].num_links :
|
||||
link_info_output->nodes[i].num_links);
|
||||
if (get_extended_data) {
|
||||
topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
|
||||
} else {
|
||||
topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
|
||||
topology->nodes[i].num_links : node_num_links;
|
||||
}
|
||||
|
||||
/* reflect the topology information for bi-directionality */
|
||||
if (requires_reflection && topology->nodes[i].num_hops)
|
||||
|
@ -189,6 +189,7 @@ struct psp_xgmi_context {
|
||||
struct ta_context context;
|
||||
struct psp_xgmi_topology_info top_info;
|
||||
bool supports_extended_data;
|
||||
uint8_t xgmi_ta_caps;
|
||||
};
|
||||
|
||||
struct psp_ras_context {
|
||||
|
@ -1038,7 +1038,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
|
||||
ras_mgr->err_data.ue_count, blk_name);
|
||||
else
|
||||
dev_info(adev->dev, "%ld correctable hardware errors detected in %s block\n",
|
||||
ras_mgr->err_data.ue_count, blk_name);
|
||||
ras_mgr->err_data.ce_count, blk_name);
|
||||
|
||||
for_each_ras_error(err_node, err_data) {
|
||||
err_info = &err_node->err_info;
|
||||
@ -1055,7 +1055,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
|
||||
"%lld correctable hardware errors detected in %s block\n",
|
||||
mcm_info->socket_id,
|
||||
mcm_info->die_id,
|
||||
err_info->ue_count,
|
||||
err_info->ce_count,
|
||||
blk_name);
|
||||
}
|
||||
}
|
||||
@ -1170,23 +1170,34 @@ out_fini_err_data:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
|
||||
int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
|
||||
enum amdgpu_ras_block block)
|
||||
{
|
||||
struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
|
||||
|
||||
if (!block_obj || !block_obj->hw_ops) {
|
||||
dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
|
||||
ras_block_str(block));
|
||||
return 0;
|
||||
ras_block_str(block));
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, block))
|
||||
return 0;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (block_obj->hw_ops->reset_ras_error_count)
|
||||
block_obj->hw_ops->reset_ras_error_count(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
|
||||
enum amdgpu_ras_block block)
|
||||
{
|
||||
struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
|
||||
|
||||
if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
|
||||
return 0;
|
||||
|
||||
if ((block == AMDGPU_RAS_BLOCK__GFX) ||
|
||||
(block == AMDGPU_RAS_BLOCK__MMHUB)) {
|
||||
if (block_obj->hw_ops->reset_ras_error_status)
|
||||
@ -2140,9 +2151,11 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
|
||||
struct amdgpu_device *remote_adev = NULL;
|
||||
struct amdgpu_device *adev = ras->adev;
|
||||
struct list_head device_list, *device_list_handle = NULL;
|
||||
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
|
||||
|
||||
if (hive)
|
||||
atomic_set(&hive->ras_recovery, 1);
|
||||
if (!ras->disable_ras_err_cnt_harvest) {
|
||||
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
|
||||
|
||||
/* Build list of devices to query RAS related errors */
|
||||
if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||
@ -2159,7 +2172,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
|
||||
amdgpu_ras_log_on_err_counter(remote_adev);
|
||||
}
|
||||
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
}
|
||||
|
||||
if (amdgpu_device_should_recover_gpu(ras->adev)) {
|
||||
@ -2194,6 +2206,10 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
|
||||
amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
|
||||
}
|
||||
atomic_set(&ras->in_recovery, 0);
|
||||
if (hive) {
|
||||
atomic_set(&hive->ras_recovery, 0);
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
}
|
||||
}
|
||||
|
||||
/* alloc/realloc bps array */
|
||||
@ -2606,7 +2622,9 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
|
||||
if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
|
||||
IP_VERSION(2, 6, 0) ||
|
||||
amdgpu_ip_version(adev, VCN_HWIP, 0) ==
|
||||
IP_VERSION(4, 0, 0))
|
||||
IP_VERSION(4, 0, 0) ||
|
||||
amdgpu_ip_version(adev, VCN_HWIP, 0) ==
|
||||
IP_VERSION(4, 0, 3))
|
||||
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
|
||||
1 << AMDGPU_RAS_BLOCK__JPEG);
|
||||
else
|
||||
@ -2635,18 +2653,8 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
|
||||
/* hw_supported needs to be aligned with RAS block mask. */
|
||||
adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
|
||||
|
||||
|
||||
/*
|
||||
* Disable ras feature for aqua vanjaram
|
||||
* by default on apu platform.
|
||||
*/
|
||||
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) &&
|
||||
adev->gmc.is_app_apu)
|
||||
adev->ras_enabled = amdgpu_ras_enable != 1 ? 0 :
|
||||
adev->ras_hw_enabled & amdgpu_ras_mask;
|
||||
else
|
||||
adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
|
||||
adev->ras_hw_enabled & amdgpu_ras_mask;
|
||||
adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
|
||||
adev->ras_hw_enabled & amdgpu_ras_mask;
|
||||
}
|
||||
|
||||
static void amdgpu_ras_counte_dw(struct work_struct *work)
|
||||
@ -3303,6 +3311,27 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
|
||||
if (con)
|
||||
con->is_mca_debug_mode = enable;
|
||||
}
|
||||
|
||||
bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
|
||||
|
||||
if (!con)
|
||||
return false;
|
||||
|
||||
if (mca_funcs && mca_funcs->mca_set_debug_mode)
|
||||
return con->is_mca_debug_mode;
|
||||
else
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Register each ip ras block into amdgpu ras */
|
||||
int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
|
||||
@ -3485,10 +3514,8 @@ void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
|
||||
{
|
||||
struct ras_err_node *err_node, *tmp;
|
||||
|
||||
list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node) {
|
||||
list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
|
||||
amdgpu_ras_error_node_release(err_node);
|
||||
list_del(&err_node->node);
|
||||
}
|
||||
}
|
||||
|
||||
static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
|
||||
|
@ -434,6 +434,8 @@ struct amdgpu_ras {
|
||||
|
||||
/* Indicates smu whether need update bad channel info */
|
||||
bool update_channel_flag;
|
||||
/* Record status of smu mca debug mode */
|
||||
bool is_mca_debug_mode;
|
||||
|
||||
/* Record special requirements of gpu reset caller */
|
||||
uint32_t gpu_reset_flags;
|
||||
@ -714,6 +716,8 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
|
||||
int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
|
||||
struct ras_query_if *info);
|
||||
|
||||
int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
|
||||
enum amdgpu_ras_block block);
|
||||
int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
|
||||
enum amdgpu_ras_block block);
|
||||
|
||||
@ -766,6 +770,9 @@ struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con);
|
||||
|
||||
void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable);
|
||||
bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
|
||||
struct amdgpu_ras_block_object *ras_block_obj);
|
||||
void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev);
|
||||
|
@ -21,6 +21,9 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/devcoredump.h>
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
#include "amdgpu_reset.h"
|
||||
#include "aldebaran.h"
|
||||
#include "sienna_cichlid.h"
|
||||
@ -159,5 +162,82 @@ void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain)
|
||||
up_write(&reset_domain->sem);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DEV_COREDUMP
|
||||
void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
}
|
||||
#else
|
||||
static ssize_t
|
||||
amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
|
||||
void *data, size_t datalen)
|
||||
{
|
||||
struct drm_printer p;
|
||||
struct amdgpu_coredump_info *coredump = data;
|
||||
struct drm_print_iterator iter;
|
||||
int i;
|
||||
|
||||
iter.data = buffer;
|
||||
iter.offset = 0;
|
||||
iter.start = offset;
|
||||
iter.remain = count;
|
||||
|
||||
p = drm_coredump_printer(&iter);
|
||||
|
||||
drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
|
||||
drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n");
|
||||
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
|
||||
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
|
||||
drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
|
||||
coredump->reset_time.tv_nsec);
|
||||
|
||||
if (coredump->reset_task_info.pid)
|
||||
drm_printf(&p, "process_name: %s PID: %d\n",
|
||||
coredump->reset_task_info.process_name,
|
||||
coredump->reset_task_info.pid);
|
||||
|
||||
if (coredump->reset_vram_lost)
|
||||
drm_printf(&p, "VRAM is lost due to GPU reset!\n");
|
||||
if (coredump->adev->reset_info.num_regs) {
|
||||
drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
|
||||
|
||||
for (i = 0; i < coredump->adev->reset_info.num_regs; i++)
|
||||
drm_printf(&p, "0x%08x: 0x%08x\n",
|
||||
coredump->adev->reset_info.reset_dump_reg_list[i],
|
||||
coredump->adev->reset_info.reset_dump_reg_value[i]);
|
||||
}
|
||||
|
||||
return count - iter.remain;
|
||||
}
|
||||
|
||||
static void amdgpu_devcoredump_free(void *data)
|
||||
{
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
struct amdgpu_coredump_info *coredump;
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
|
||||
coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
|
||||
|
||||
if (!coredump) {
|
||||
DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
coredump->reset_vram_lost = vram_lost;
|
||||
|
||||
if (reset_context->job && reset_context->job->vm)
|
||||
coredump->reset_task_info = reset_context->job->vm->task_info;
|
||||
|
||||
coredump->adev = adev;
|
||||
|
||||
ktime_get_ts64(&coredump->reset_time);
|
||||
|
||||
dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
|
||||
amdgpu_devcoredump_read, amdgpu_devcoredump_free);
|
||||
}
|
||||
#endif
|
||||
|
@ -89,6 +89,17 @@ struct amdgpu_reset_domain {
|
||||
atomic_t reset_res;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEV_COREDUMP
|
||||
|
||||
#define AMDGPU_COREDUMP_VERSION "1"
|
||||
|
||||
struct amdgpu_coredump_info {
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_task_info reset_task_info;
|
||||
struct timespec64 reset_time;
|
||||
bool reset_vram_lost;
|
||||
};
|
||||
#endif
|
||||
|
||||
int amdgpu_reset_init(struct amdgpu_device *adev);
|
||||
int amdgpu_reset_fini(struct amdgpu_device *adev);
|
||||
@ -130,6 +141,9 @@ void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain);
|
||||
|
||||
void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain);
|
||||
|
||||
void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
|
||||
struct amdgpu_reset_context *reset_context);
|
||||
|
||||
#define for_each_handler(i, handler, reset_ctl) \
|
||||
for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \
|
||||
(handler = (*reset_ctl->reset_handlers)[i]); \
|
||||
|
@ -843,6 +843,20 @@ static int umsch_mm_hw_fini(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int umsch_mm_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return umsch_mm_hw_fini(adev);
|
||||
}
|
||||
|
||||
static int umsch_mm_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return umsch_mm_hw_init(adev);
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = {
|
||||
.name = "umsch_mm_v4_0",
|
||||
.early_init = umsch_mm_early_init,
|
||||
@ -851,6 +865,8 @@ static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = {
|
||||
.sw_fini = umsch_mm_sw_fini,
|
||||
.hw_init = umsch_mm_hw_init,
|
||||
.hw_fini = umsch_mm_hw_fini,
|
||||
.suspend = umsch_mm_suspend,
|
||||
.resume = umsch_mm_resume,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = {
|
||||
|
@ -169,6 +169,9 @@
|
||||
#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
|
||||
#define AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG (1 << 11)
|
||||
#define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 14)
|
||||
#define AMDGPU_VCN_VF_RB_DECOUPLE_FLAG (1 << 15)
|
||||
|
||||
#define MAX_NUM_VCN_RB_SETUP 4
|
||||
|
||||
#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001
|
||||
#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001
|
||||
@ -335,15 +338,30 @@ struct amdgpu_fw_shared {
|
||||
struct amdgpu_fw_shared_smu_interface_info smu_interface_info;
|
||||
};
|
||||
|
||||
struct amdgpu_vcn_rb_setup_info {
|
||||
uint32_t rb_addr_lo;
|
||||
uint32_t rb_addr_hi;
|
||||
uint32_t rb_size;
|
||||
};
|
||||
|
||||
struct amdgpu_fw_shared_rb_setup {
|
||||
uint32_t is_rb_enabled_flags;
|
||||
uint32_t rb_addr_lo;
|
||||
uint32_t rb_addr_hi;
|
||||
uint32_t rb_size;
|
||||
uint32_t rb4_addr_lo;
|
||||
uint32_t rb4_addr_hi;
|
||||
uint32_t rb4_size;
|
||||
uint32_t reserved[6];
|
||||
|
||||
union {
|
||||
struct {
|
||||
uint32_t rb_addr_lo;
|
||||
uint32_t rb_addr_hi;
|
||||
uint32_t rb_size;
|
||||
uint32_t rb4_addr_lo;
|
||||
uint32_t rb4_addr_hi;
|
||||
uint32_t rb4_size;
|
||||
uint32_t reserved[6];
|
||||
};
|
||||
|
||||
struct {
|
||||
struct amdgpu_vcn_rb_setup_info rb_info[MAX_NUM_VCN_RB_SETUP];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct amdgpu_fw_shared_drm_key_wa {
|
||||
@ -351,6 +369,11 @@ struct amdgpu_fw_shared_drm_key_wa {
|
||||
uint8_t reserved[3];
|
||||
};
|
||||
|
||||
struct amdgpu_fw_shared_queue_decouple {
|
||||
uint8_t is_enabled;
|
||||
uint8_t reserved[7];
|
||||
};
|
||||
|
||||
struct amdgpu_vcn4_fw_shared {
|
||||
uint32_t present_flag_0;
|
||||
uint8_t pad[12];
|
||||
@ -361,6 +384,8 @@ struct amdgpu_vcn4_fw_shared {
|
||||
struct amdgpu_fw_shared_rb_setup rb_setup;
|
||||
struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
|
||||
struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
|
||||
uint8_t pad3[9];
|
||||
struct amdgpu_fw_shared_queue_decouple decouple;
|
||||
};
|
||||
|
||||
struct amdgpu_vcn_fwlog {
|
||||
@ -378,6 +403,15 @@ struct amdgpu_vcn_decode_buffer {
|
||||
uint32_t pad[30];
|
||||
};
|
||||
|
||||
struct amdgpu_vcn_rb_metadata {
|
||||
uint32_t size;
|
||||
uint32_t present_flag_0;
|
||||
|
||||
uint8_t version;
|
||||
uint8_t ring_id;
|
||||
uint8_t pad[26];
|
||||
};
|
||||
|
||||
#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
|
||||
#define VCN_BLOCK_DECODE_DISABLE_MASK 0x40
|
||||
#define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0
|
||||
|
@ -126,6 +126,8 @@ enum AMDGIM_FEATURE_FLAG {
|
||||
AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
|
||||
/* AV1 Support MODE*/
|
||||
AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
|
||||
/* VCN RB decouple */
|
||||
AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7),
|
||||
};
|
||||
|
||||
enum AMDGIM_REG_ACCESS_FLAG {
|
||||
@ -326,6 +328,8 @@ static inline bool is_virtual_machine(void)
|
||||
((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
|
||||
#define amdgpu_sriov_is_av1_support(adev) \
|
||||
((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
|
||||
#define amdgpu_sriov_is_vcn_rb_decouple(adev) \
|
||||
((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
|
||||
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
|
||||
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
|
||||
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
|
||||
|
@ -908,7 +908,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm
|
||||
adev->gmc.xgmi.num_physical_nodes == 0)
|
||||
return 0;
|
||||
|
||||
adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev);
|
||||
amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
|
||||
|
||||
return amdgpu_ras_block_late_init(adev, ras_block);
|
||||
}
|
||||
@ -1075,7 +1075,7 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
|
||||
break;
|
||||
}
|
||||
|
||||
adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev);
|
||||
amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
|
||||
|
||||
err_data->ue_count += ue_cnt;
|
||||
err_data->ce_count += ce_cnt;
|
||||
|
@ -44,6 +44,7 @@ struct amdgpu_hive_info {
|
||||
|
||||
struct amdgpu_reset_domain *reset_domain;
|
||||
uint32_t device_remove_count;
|
||||
atomic_t ras_recovery;
|
||||
};
|
||||
|
||||
struct amdgpu_pcs_ras_field {
|
||||
|
@ -90,10 +90,11 @@ union amd_sriov_msg_feature_flags {
|
||||
uint32_t host_load_ucodes : 1;
|
||||
uint32_t host_flr_vramlost : 1;
|
||||
uint32_t mm_bw_management : 1;
|
||||
uint32_t pp_one_vf_mode : 1;
|
||||
uint32_t pp_one_vf_mode : 1;
|
||||
uint32_t reg_indirect_acc : 1;
|
||||
uint32_t av1_support : 1;
|
||||
uint32_t reserved : 25;
|
||||
uint32_t vcn_rb_decouple : 1;
|
||||
uint32_t reserved : 24;
|
||||
} flags;
|
||||
uint32_t all;
|
||||
};
|
||||
|
@ -589,6 +589,14 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
|
||||
adev->gfx.mec2_fw = NULL;
|
||||
|
||||
gfx_v11_0_check_fw_cp_gfx_shadow(adev);
|
||||
|
||||
if (adev->gfx.imu.funcs && adev->gfx.imu.funcs->init_microcode) {
|
||||
err = adev->gfx.imu.funcs->init_microcode(adev);
|
||||
if (err)
|
||||
DRM_ERROR("Failed to init imu firmware!\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
out:
|
||||
if (err) {
|
||||
amdgpu_ucode_release(&adev->gfx.pfp_fw);
|
||||
@ -1395,14 +1403,6 @@ static int gfx_v11_0_sw_init(void *handle)
|
||||
|
||||
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
|
||||
|
||||
if (adev->gfx.imu.funcs) {
|
||||
if (adev->gfx.imu.funcs->init_microcode) {
|
||||
r = adev->gfx.imu.funcs->init_microcode(adev);
|
||||
if (r)
|
||||
DRM_ERROR("Failed to load imu firmware!\n");
|
||||
}
|
||||
}
|
||||
|
||||
gfx_v11_0_me_init(adev);
|
||||
|
||||
r = gfx_v11_0_rlc_init(adev);
|
||||
@ -4373,6 +4373,10 @@ static int gfx_v11_0_hw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* get IMU version from HW if it's not set */
|
||||
if (!adev->gfx.imu_fw_version)
|
||||
adev->gfx.imu_fw_version = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_0);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -5170,45 +5174,17 @@ static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
|
||||
static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t *wptr_saved;
|
||||
uint32_t *is_queue_unmap;
|
||||
uint64_t aggregated_db_index;
|
||||
uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
|
||||
uint64_t wptr_tmp;
|
||||
|
||||
if (ring->is_mes_queue) {
|
||||
wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
|
||||
is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
|
||||
sizeof(uint32_t));
|
||||
aggregated_db_index =
|
||||
amdgpu_mes_get_aggregated_doorbell_index(adev,
|
||||
ring->hw_prio);
|
||||
|
||||
wptr_tmp = ring->wptr & ring->buf_mask;
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
|
||||
*wptr_saved = wptr_tmp;
|
||||
/* assume doorbell always being used by mes mapped queue */
|
||||
if (*is_queue_unmap) {
|
||||
WDOORBELL64(aggregated_db_index, wptr_tmp);
|
||||
WDOORBELL64(ring->doorbell_index, wptr_tmp);
|
||||
} else {
|
||||
WDOORBELL64(ring->doorbell_index, wptr_tmp);
|
||||
|
||||
if (*is_queue_unmap)
|
||||
WDOORBELL64(aggregated_db_index, wptr_tmp);
|
||||
}
|
||||
if (ring->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr);
|
||||
} else {
|
||||
if (ring->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr);
|
||||
} else {
|
||||
WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
|
||||
lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
|
||||
upper_32_bits(ring->wptr));
|
||||
}
|
||||
WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
|
||||
lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
|
||||
upper_32_bits(ring->wptr));
|
||||
}
|
||||
}
|
||||
|
||||
@ -5233,42 +5209,14 @@ static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
|
||||
static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t *wptr_saved;
|
||||
uint32_t *is_queue_unmap;
|
||||
uint64_t aggregated_db_index;
|
||||
uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
|
||||
uint64_t wptr_tmp;
|
||||
|
||||
if (ring->is_mes_queue) {
|
||||
wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
|
||||
is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
|
||||
sizeof(uint32_t));
|
||||
aggregated_db_index =
|
||||
amdgpu_mes_get_aggregated_doorbell_index(adev,
|
||||
ring->hw_prio);
|
||||
|
||||
wptr_tmp = ring->wptr & ring->buf_mask;
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
|
||||
*wptr_saved = wptr_tmp;
|
||||
/* assume doorbell always used by mes mapped queue */
|
||||
if (*is_queue_unmap) {
|
||||
WDOORBELL64(aggregated_db_index, wptr_tmp);
|
||||
WDOORBELL64(ring->doorbell_index, wptr_tmp);
|
||||
} else {
|
||||
WDOORBELL64(ring->doorbell_index, wptr_tmp);
|
||||
|
||||
if (*is_queue_unmap)
|
||||
WDOORBELL64(aggregated_db_index, wptr_tmp);
|
||||
}
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
if (ring->use_doorbell) {
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr);
|
||||
} else {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
if (ring->use_doorbell) {
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr);
|
||||
} else {
|
||||
BUG(); /* only DOORBELL method supported on gfx11 now */
|
||||
}
|
||||
BUG(); /* only DOORBELL method supported on gfx11 now */
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -623,7 +623,7 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
|
||||
int num_xccs_per_xcp)
|
||||
{
|
||||
int ret, i, num_xcc;
|
||||
u32 tmp = 0, regval;
|
||||
u32 tmp = 0;
|
||||
|
||||
if (adev->psp.funcs) {
|
||||
ret = psp_spatial_partition(&adev->psp,
|
||||
@ -631,24 +631,23 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
|
||||
num_xccs_per_xcp);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
|
||||
|
||||
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
|
||||
|
||||
for (i = 0; i < num_xcc; i++) {
|
||||
tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
|
||||
num_xccs_per_xcp);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
|
||||
i % num_xccs_per_xcp);
|
||||
regval = RREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL);
|
||||
if (regval != tmp)
|
||||
for (i = 0; i < num_xcc; i++) {
|
||||
tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
|
||||
num_xccs_per_xcp);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
|
||||
i % num_xccs_per_xcp);
|
||||
WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
|
||||
tmp);
|
||||
}
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
|
||||
@ -3755,10 +3754,6 @@ static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
|
||||
AMDGPU_GFX_LDS_MEM, 4},
|
||||
};
|
||||
|
||||
static const struct soc15_reg_entry gfx_v9_4_3_ea_err_status_regs = {
|
||||
SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16
|
||||
};
|
||||
|
||||
static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status, int xcc_id)
|
||||
{
|
||||
@ -3847,39 +3842,6 @@ static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_inst_query_ea_err_status(struct amdgpu_device *adev,
|
||||
int xcc_id)
|
||||
{
|
||||
uint32_t i, j;
|
||||
uint32_t reg_value;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
|
||||
for (i = 0; i < gfx_v9_4_3_ea_err_status_regs.se_num; i++) {
|
||||
for (j = 0; j < gfx_v9_4_3_ea_err_status_regs.instance; j++) {
|
||||
gfx_v9_4_3_xcc_select_se_sh(adev, i, 0, j, xcc_id);
|
||||
reg_value = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
|
||||
regGCEA_ERR_STATUS);
|
||||
if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
|
||||
REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
|
||||
REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
|
||||
dev_warn(adev->dev,
|
||||
"GCEA err detected at instance: %d, status: 0x%x!\n",
|
||||
j, reg_value);
|
||||
}
|
||||
/* clear after read */
|
||||
reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS,
|
||||
CLEAR_ERROR_STATUS, 0x1);
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS,
|
||||
reg_value);
|
||||
}
|
||||
}
|
||||
|
||||
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
xcc_id);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_inst_query_utc_err_status(struct amdgpu_device *adev,
|
||||
int xcc_id)
|
||||
{
|
||||
@ -3984,7 +3946,6 @@ static void gfx_v9_4_3_inst_query_sq_timeout_status(struct amdgpu_device *adev,
|
||||
static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev,
|
||||
void *ras_error_status, int xcc_id)
|
||||
{
|
||||
gfx_v9_4_3_inst_query_ea_err_status(adev, xcc_id);
|
||||
gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id);
|
||||
gfx_v9_4_3_inst_query_sq_timeout_status(adev, xcc_id);
|
||||
}
|
||||
@ -3997,27 +3958,6 @@ static void gfx_v9_4_3_inst_reset_utc_err_status(struct amdgpu_device *adev,
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, 0x3);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_inst_reset_ea_err_status(struct amdgpu_device *adev,
|
||||
int xcc_id)
|
||||
{
|
||||
uint32_t i, j;
|
||||
uint32_t value;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < gfx_v9_4_3_ea_err_status_regs.se_num; i++) {
|
||||
for (j = 0; j < gfx_v9_4_3_ea_err_status_regs.instance; j++) {
|
||||
gfx_v9_4_3_xcc_select_se_sh(adev, i, 0, j, xcc_id);
|
||||
value = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS);
|
||||
value = REG_SET_FIELD(value, GCEA_ERR_STATUS,
|
||||
CLEAR_ERROR_STATUS, 0x1);
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS, value);
|
||||
}
|
||||
}
|
||||
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
xcc_id);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_inst_reset_sq_timeout_status(struct amdgpu_device *adev,
|
||||
int xcc_id)
|
||||
{
|
||||
@ -4043,7 +3983,6 @@ static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev,
|
||||
void *ras_error_status, int xcc_id)
|
||||
{
|
||||
gfx_v9_4_3_inst_reset_utc_err_status(adev, xcc_id);
|
||||
gfx_v9_4_3_inst_reset_ea_err_status(adev, xcc_id);
|
||||
gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id);
|
||||
}
|
||||
|
||||
|
@ -1587,13 +1587,8 @@ static int gmc_v9_0_late_init(void *handle)
|
||||
}
|
||||
|
||||
if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
|
||||
if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
|
||||
adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
|
||||
adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
|
||||
|
||||
if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops &&
|
||||
adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count)
|
||||
adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev);
|
||||
amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
|
||||
amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP);
|
||||
}
|
||||
|
||||
r = amdgpu_gmc_ras_late_init(adev);
|
||||
|
@ -55,7 +55,6 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
|
||||
if (err)
|
||||
goto out;
|
||||
imu_hdr = (const struct imu_firmware_header_v1_0 *)adev->gfx.imu_fw->data;
|
||||
adev->gfx.imu_fw_version = le32_to_cpu(imu_hdr->header.ucode_version);
|
||||
//adev->gfx.imu_feature_version = le32_to_cpu(imu_hdr->ucode_feature_version);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
@ -69,7 +68,8 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
|
||||
info->fw = adev->gfx.imu_fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(imu_hdr->imu_dram_ucode_size_bytes), PAGE_SIZE);
|
||||
}
|
||||
} else
|
||||
adev->gfx.imu_fw_version = le32_to_cpu(imu_hdr->header.ucode_version);
|
||||
|
||||
out:
|
||||
if (err) {
|
||||
|
@ -414,60 +414,6 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
|
||||
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
|
||||
}
|
||||
|
||||
static void mes_v11_0_init_aggregated_doorbell(struct amdgpu_mes *mes)
|
||||
{
|
||||
struct amdgpu_device *adev = mes->adev;
|
||||
uint32_t data;
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL1__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_LOW] <<
|
||||
CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL1__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL2__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_NORMAL] <<
|
||||
CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL2__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL3__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_MEDIUM] <<
|
||||
CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL3__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL4__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_HIGH] <<
|
||||
CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL4__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL5__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_REALTIME] <<
|
||||
CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL5__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5, data);
|
||||
|
||||
data = 1 << CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, regCP_HQD_GFX_CONTROL, data);
|
||||
}
|
||||
|
||||
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
|
||||
.add_hw_queue = mes_v11_0_add_hw_queue,
|
||||
.remove_hw_queue = mes_v11_0_remove_hw_queue,
|
||||
@ -1243,8 +1189,6 @@ static int mes_v11_0_hw_init(void *handle)
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
mes_v11_0_init_aggregated_doorbell(&adev->mes);
|
||||
|
||||
r = mes_v11_0_query_sched_status(&adev->mes);
|
||||
if (r) {
|
||||
DRM_ERROR("MES is busy\n");
|
||||
|
@ -700,152 +700,9 @@ static void mmhub_v1_8_reset_ras_error_count(struct amdgpu_device *adev)
|
||||
mmhub_v1_8_inst_reset_ras_error_count(adev, i);
|
||||
}
|
||||
|
||||
static const u32 mmhub_v1_8_mmea_err_status_reg[] __maybe_unused = {
|
||||
regMMEA0_ERR_STATUS,
|
||||
regMMEA1_ERR_STATUS,
|
||||
regMMEA2_ERR_STATUS,
|
||||
regMMEA3_ERR_STATUS,
|
||||
regMMEA4_ERR_STATUS,
|
||||
};
|
||||
|
||||
static void mmhub_v1_8_inst_query_ras_err_status(struct amdgpu_device *adev,
|
||||
uint32_t mmhub_inst)
|
||||
{
|
||||
uint32_t reg_value;
|
||||
uint32_t mmea_err_status_addr_dist;
|
||||
uint32_t i;
|
||||
|
||||
/* query mmea ras err status */
|
||||
mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS;
|
||||
for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) {
|
||||
reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
|
||||
regMMEA0_ERR_STATUS,
|
||||
i * mmea_err_status_addr_dist);
|
||||
if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) ||
|
||||
REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) ||
|
||||
REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
|
||||
dev_warn(adev->dev,
|
||||
"Detected MMEA%d err in MMHUB%d, status: 0x%x\n",
|
||||
i, mmhub_inst, reg_value);
|
||||
}
|
||||
}
|
||||
|
||||
/* query mm_cane ras err status */
|
||||
reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS);
|
||||
if (REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_STATUS) ||
|
||||
REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_WRRSP_STATUS) ||
|
||||
REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_DATAPARITY_ERROR)) {
|
||||
dev_warn(adev->dev,
|
||||
"Detected MM CANE err in MMHUB%d, status: 0x%x\n",
|
||||
mmhub_inst, reg_value);
|
||||
}
|
||||
}
|
||||
|
||||
static void mmhub_v1_8_query_ras_error_status(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t inst_mask;
|
||||
uint32_t i;
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
|
||||
dev_warn(adev->dev, "MMHUB RAS is not supported\n");
|
||||
return;
|
||||
}
|
||||
|
||||
inst_mask = adev->aid_mask;
|
||||
for_each_inst(i, inst_mask)
|
||||
mmhub_v1_8_inst_query_ras_err_status(adev, i);
|
||||
}
|
||||
|
||||
static void mmhub_v1_8_inst_reset_ras_err_status(struct amdgpu_device *adev,
|
||||
uint32_t mmhub_inst)
|
||||
{
|
||||
uint32_t mmea_cgtt_clk_cntl_addr_dist;
|
||||
uint32_t mmea_err_status_addr_dist;
|
||||
uint32_t reg_value;
|
||||
uint32_t i;
|
||||
|
||||
/* reset mmea ras err status */
|
||||
mmea_cgtt_clk_cntl_addr_dist = regMMEA1_CGTT_CLK_CTRL - regMMEA0_CGTT_CLK_CTRL;
|
||||
mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS;
|
||||
for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) {
|
||||
/* force clk branch on for response path
|
||||
* set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 1
|
||||
*/
|
||||
reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
|
||||
regMMEA0_CGTT_CLK_CTRL,
|
||||
i * mmea_cgtt_clk_cntl_addr_dist);
|
||||
reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL,
|
||||
SOFT_OVERRIDE_RETURN, 1);
|
||||
WREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
|
||||
regMMEA0_CGTT_CLK_CTRL,
|
||||
i * mmea_cgtt_clk_cntl_addr_dist,
|
||||
reg_value);
|
||||
|
||||
/* set MMEA0_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */
|
||||
reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
|
||||
regMMEA0_ERR_STATUS,
|
||||
i * mmea_err_status_addr_dist);
|
||||
reg_value = REG_SET_FIELD(reg_value, MMEA0_ERR_STATUS,
|
||||
CLEAR_ERROR_STATUS, 1);
|
||||
WREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
|
||||
regMMEA0_ERR_STATUS,
|
||||
i * mmea_err_status_addr_dist,
|
||||
reg_value);
|
||||
|
||||
/* set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 0 */
|
||||
reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
|
||||
regMMEA0_CGTT_CLK_CTRL,
|
||||
i * mmea_cgtt_clk_cntl_addr_dist);
|
||||
reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL,
|
||||
SOFT_OVERRIDE_RETURN, 0);
|
||||
WREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
|
||||
regMMEA0_CGTT_CLK_CTRL,
|
||||
i * mmea_cgtt_clk_cntl_addr_dist,
|
||||
reg_value);
|
||||
}
|
||||
|
||||
/* reset mm_cane ras err status
|
||||
* force clk branch on for response path
|
||||
* set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 1
|
||||
*/
|
||||
reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL);
|
||||
reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL,
|
||||
SOFT_OVERRIDE_ATRET, 1);
|
||||
WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value);
|
||||
|
||||
/* set MM_CANE_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */
|
||||
reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS);
|
||||
reg_value = REG_SET_FIELD(reg_value, MM_CANE_ERR_STATUS,
|
||||
CLEAR_ERROR_STATUS, 1);
|
||||
WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS, reg_value);
|
||||
|
||||
/* set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 0 */
|
||||
reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL);
|
||||
reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL,
|
||||
SOFT_OVERRIDE_ATRET, 0);
|
||||
WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value);
|
||||
}
|
||||
|
||||
static void mmhub_v1_8_reset_ras_error_status(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t inst_mask;
|
||||
uint32_t i;
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
|
||||
dev_warn(adev->dev, "MMHUB RAS is not supported\n");
|
||||
return;
|
||||
}
|
||||
|
||||
inst_mask = adev->aid_mask;
|
||||
for_each_inst(i, inst_mask)
|
||||
mmhub_v1_8_inst_reset_ras_err_status(adev, i);
|
||||
}
|
||||
|
||||
static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = {
|
||||
.query_ras_error_count = mmhub_v1_8_query_ras_error_count,
|
||||
.reset_ras_error_count = mmhub_v1_8_reset_ras_error_count,
|
||||
.query_ras_error_status = mmhub_v1_8_query_ras_error_status,
|
||||
.reset_ras_error_status = mmhub_v1_8_reset_ras_error_status,
|
||||
};
|
||||
|
||||
struct amdgpu_mmhub_ras mmhub_v1_8_ras = {
|
||||
|
@ -272,6 +272,81 @@ static void nbio_v7_11_init_registers(struct amdgpu_device *adev)
|
||||
*/
|
||||
}
|
||||
|
||||
static void nbio_v7_11_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
|
||||
return;
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL);
|
||||
if (enable) {
|
||||
data |= (BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
|
||||
BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
|
||||
BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
|
||||
BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
|
||||
BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
|
||||
BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
|
||||
} else {
|
||||
data &= ~(BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
|
||||
BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
|
||||
BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
|
||||
BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
|
||||
BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
|
||||
BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
|
||||
}
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL, data);
|
||||
}
|
||||
|
||||
static void nbio_v7_11_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
|
||||
return;
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2);
|
||||
if (enable)
|
||||
data |= BIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
|
||||
else
|
||||
data &= ~BIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2, data);
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1);
|
||||
if (enable) {
|
||||
data |= (BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
|
||||
BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
|
||||
} else {
|
||||
data &= ~(BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
|
||||
BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
|
||||
}
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1, data);
|
||||
}
|
||||
|
||||
static void nbio_v7_11_get_clockgating_state(struct amdgpu_device *adev,
|
||||
u64 *flags)
|
||||
{
|
||||
uint32_t data;
|
||||
|
||||
/* AMD_CG_SUPPORT_BIF_MGCG */
|
||||
data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL);
|
||||
if (data & BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_BIF_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_BIF_LS */
|
||||
data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2);
|
||||
if (data & BIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_BIF_LS;
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v7_11_funcs = {
|
||||
.get_hdp_flush_req_offset = nbio_v7_11_get_hdp_flush_req_offset,
|
||||
.get_hdp_flush_done_offset = nbio_v7_11_get_hdp_flush_done_offset,
|
||||
@ -288,6 +363,9 @@ const struct amdgpu_nbio_funcs nbio_v7_11_funcs = {
|
||||
.enable_doorbell_aperture = nbio_v7_11_enable_doorbell_aperture,
|
||||
.enable_doorbell_selfring_aperture = nbio_v7_11_enable_doorbell_selfring_aperture,
|
||||
.ih_doorbell_range = nbio_v7_11_ih_doorbell_range,
|
||||
.update_medium_grain_clock_gating = nbio_v7_11_update_medium_grain_clock_gating,
|
||||
.update_medium_grain_light_sleep = nbio_v7_11_update_medium_grain_light_sleep,
|
||||
.get_clockgating_state = nbio_v7_11_get_clockgating_state,
|
||||
.ih_control = nbio_v7_11_ih_control,
|
||||
.init_registers = nbio_v7_11_init_registers,
|
||||
.remap_hdp_registers = nbio_v7_11_remap_hdp_registers,
|
||||
|
@ -168,7 +168,7 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
|
||||
* If there is an error in processing command, bits[7:0] will be set.
|
||||
* This is applicable for PSP v13.0.6 and newer.
|
||||
*/
|
||||
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
|
||||
for (retry_loop = 0; retry_loop < PSP_VMBX_POLLING_LIMIT; retry_loop++) {
|
||||
ret = psp_wait_for(
|
||||
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0xffffffff, false);
|
||||
|
@ -1749,11 +1749,8 @@ static int sdma_v4_0_late_init(void *handle)
|
||||
|
||||
sdma_v4_0_setup_ulv(adev);
|
||||
|
||||
if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
|
||||
if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
|
||||
adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
|
||||
adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
|
||||
}
|
||||
if (!amdgpu_persistent_edc_harvesting_supported(adev))
|
||||
amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1276,11 +1276,8 @@ static int sdma_v4_4_2_late_init(void *handle)
|
||||
.cb = sdma_v4_4_2_process_ras_data_cb,
|
||||
};
|
||||
#endif
|
||||
if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
|
||||
if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
|
||||
adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
|
||||
adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
|
||||
}
|
||||
if (!amdgpu_persistent_edc_harvesting_supported(adev))
|
||||
amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -156,68 +156,35 @@ static uint64_t sdma_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
static void sdma_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t *wptr_saved;
|
||||
uint32_t *is_queue_unmap;
|
||||
uint64_t aggregated_db_index;
|
||||
uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
|
||||
|
||||
DRM_DEBUG("Setting write pointer\n");
|
||||
|
||||
if (ring->is_mes_queue) {
|
||||
wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
|
||||
is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
|
||||
sizeof(uint32_t));
|
||||
aggregated_db_index =
|
||||
amdgpu_mes_get_aggregated_doorbell_index(adev,
|
||||
ring->hw_prio);
|
||||
|
||||
if (ring->use_doorbell) {
|
||||
DRM_DEBUG("Using doorbell -- "
|
||||
"wptr_offs == 0x%08x "
|
||||
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
|
||||
"upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
|
||||
ring->wptr_offs,
|
||||
lower_32_bits(ring->wptr << 2),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr << 2);
|
||||
*wptr_saved = ring->wptr << 2;
|
||||
if (*is_queue_unmap) {
|
||||
WDOORBELL64(aggregated_db_index, ring->wptr << 2);
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
} else {
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
|
||||
if (*is_queue_unmap)
|
||||
WDOORBELL64(aggregated_db_index,
|
||||
ring->wptr << 2);
|
||||
}
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
} else {
|
||||
if (ring->use_doorbell) {
|
||||
DRM_DEBUG("Using doorbell -- "
|
||||
"wptr_offs == 0x%08x "
|
||||
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
|
||||
"upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
|
||||
ring->wptr_offs,
|
||||
lower_32_bits(ring->wptr << 2),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr << 2);
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
} else {
|
||||
DRM_DEBUG("Not using doorbell -- "
|
||||
"regSDMA%i_GFX_RB_WPTR == 0x%08x "
|
||||
"regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
|
||||
ring->me,
|
||||
lower_32_bits(ring->wptr << 2),
|
||||
ring->me,
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
|
||||
ring->me, regSDMA0_QUEUE0_RB_WPTR),
|
||||
lower_32_bits(ring->wptr << 2));
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
|
||||
ring->me, regSDMA0_QUEUE0_RB_WPTR_HI),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
}
|
||||
DRM_DEBUG("Not using doorbell -- "
|
||||
"regSDMA%i_GFX_RB_WPTR == 0x%08x "
|
||||
"regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
|
||||
ring->me,
|
||||
lower_32_bits(ring->wptr << 2),
|
||||
ring->me,
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
|
||||
ring->me, regSDMA0_QUEUE0_RB_WPTR),
|
||||
lower_32_bits(ring->wptr << 2));
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
|
||||
ring->me, regSDMA0_QUEUE0_RB_WPTR_HI),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -863,6 +863,8 @@ static int soc21_common_set_clockgating_state(void *handle,
|
||||
case IP_VERSION(4, 3, 0):
|
||||
case IP_VERSION(4, 3, 1):
|
||||
case IP_VERSION(7, 7, 0):
|
||||
case IP_VERSION(7, 7, 1):
|
||||
case IP_VERSION(7, 11, 0):
|
||||
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
* Copyright 2018-2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@ -20,7 +20,6 @@
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _TA_XGMI_IF_H
|
||||
#define _TA_XGMI_IF_H
|
||||
|
||||
@ -28,20 +27,31 @@
|
||||
#define RSP_ID_MASK (1U << 31)
|
||||
#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
|
||||
|
||||
#define EXTEND_PEER_LINK_INFO_CMD_FLAG 1
|
||||
|
||||
enum ta_command_xgmi {
|
||||
/* Initialize the Context and Session Topology */
|
||||
TA_COMMAND_XGMI__INITIALIZE = 0x00,
|
||||
/* Gets the current GPU's node ID */
|
||||
TA_COMMAND_XGMI__GET_NODE_ID = 0x01,
|
||||
/* Gets the current GPU's hive ID */
|
||||
TA_COMMAND_XGMI__GET_HIVE_ID = 0x02,
|
||||
TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO = 0x03,
|
||||
/* Gets the Peer's topology Information */
|
||||
TA_COMMAND_XGMI__GET_TOPOLOGY_INFO = 0x03,
|
||||
/* Sets the Peer's topology Information */
|
||||
TA_COMMAND_XGMI__SET_TOPOLOGY_INFO = 0x04,
|
||||
TA_COMMAND_XGMI__GET_PEER_LINKS = 0x0B
|
||||
/* Gets the total links between adjacent peer dies in hive */
|
||||
TA_COMMAND_XGMI__GET_PEER_LINKS = 0x0B,
|
||||
/* Gets the total links and connected port numbers between adjacent peer dies in hive */
|
||||
TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS = 0x0C
|
||||
};
|
||||
|
||||
/* XGMI related enumerations */
|
||||
/**********************************************************/;
|
||||
enum ta_xgmi_connected_nodes {
|
||||
TA_XGMI__MAX_CONNECTED_NODES = 64
|
||||
};
|
||||
enum { TA_XGMI__MAX_CONNECTED_NODES = 64 };
|
||||
enum { TA_XGMI__MAX_INTERNAL_STATE = 32 };
|
||||
enum { TA_XGMI__MAX_INTERNAL_STATE_BUFFER = 128 };
|
||||
enum { TA_XGMI__MAX_PORT_NUM = 8 };
|
||||
|
||||
enum ta_xgmi_status {
|
||||
TA_XGMI_STATUS__SUCCESS = 0x00,
|
||||
@ -81,6 +91,18 @@ struct ta_xgmi_peer_link_info {
|
||||
uint8_t num_links;
|
||||
};
|
||||
|
||||
struct xgmi_connected_port_num {
|
||||
uint8_t dst_xgmi_port_num;
|
||||
uint8_t src_xgmi_port_num;
|
||||
};
|
||||
|
||||
/* support both the port num and num_links */
|
||||
struct ta_xgmi_extend_peer_link_info {
|
||||
uint64_t node_id;
|
||||
uint8_t num_links;
|
||||
struct xgmi_connected_port_num port_num[TA_XGMI__MAX_PORT_NUM];
|
||||
};
|
||||
|
||||
struct ta_xgmi_cmd_initialize_output {
|
||||
uint32_t status;
|
||||
};
|
||||
@ -103,16 +125,21 @@ struct ta_xgmi_cmd_get_topology_info_output {
|
||||
struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
|
||||
};
|
||||
|
||||
struct ta_xgmi_cmd_get_peer_link_info_output {
|
||||
uint32_t num_nodes;
|
||||
struct ta_xgmi_peer_link_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
|
||||
};
|
||||
|
||||
struct ta_xgmi_cmd_set_topology_info_input {
|
||||
uint32_t num_nodes;
|
||||
struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
|
||||
};
|
||||
|
||||
/* support XGMI TA w/ and w/o port_num both so two similar structs defined */
|
||||
struct ta_xgmi_cmd_get_peer_link_info {
|
||||
uint32_t num_nodes;
|
||||
struct ta_xgmi_peer_link_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
|
||||
};
|
||||
|
||||
struct ta_xgmi_cmd_get_extend_peer_link_info {
|
||||
uint32_t num_nodes;
|
||||
struct ta_xgmi_extend_peer_link_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
|
||||
};
|
||||
/**********************************************************/
|
||||
/* Common input structure for XGMI callbacks */
|
||||
union ta_xgmi_cmd_input {
|
||||
@ -126,16 +153,23 @@ union ta_xgmi_cmd_output {
|
||||
struct ta_xgmi_cmd_get_node_id_output get_node_id;
|
||||
struct ta_xgmi_cmd_get_hive_id_output get_hive_id;
|
||||
struct ta_xgmi_cmd_get_topology_info_output get_topology_info;
|
||||
struct ta_xgmi_cmd_get_peer_link_info_output get_link_info;
|
||||
struct ta_xgmi_cmd_get_peer_link_info get_link_info;
|
||||
struct ta_xgmi_cmd_get_extend_peer_link_info get_extend_link_info;
|
||||
};
|
||||
/**********************************************************/
|
||||
|
||||
struct ta_xgmi_shared_memory {
|
||||
uint32_t cmd_id;
|
||||
uint32_t resp_id;
|
||||
enum ta_xgmi_status xgmi_status;
|
||||
|
||||
/* if the number of xgmi link record is more than 128, driver will set the
|
||||
* flag 0 to get the first 128 of the link records and will set to 1, to get
|
||||
* the second set
|
||||
*/
|
||||
uint8_t flag_extend_link_record;
|
||||
uint8_t reserved0[3];
|
||||
/* bit0: port_num info support flag for GET_EXTEND_PEER_LINKS commmand */
|
||||
uint8_t caps_flag;
|
||||
uint8_t reserved[2];
|
||||
union ta_xgmi_cmd_input xgmi_in_message;
|
||||
union ta_xgmi_cmd_output xgmi_out_message;
|
||||
};
|
||||
|
@ -105,7 +105,9 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
|
||||
RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
|
||||
|
||||
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
|
||||
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
|
||||
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 &&
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0)))
|
||||
*error_count += 1;
|
||||
}
|
||||
|
||||
@ -125,7 +127,6 @@ static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev
|
||||
|
||||
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
|
||||
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
|
||||
@ -293,7 +294,7 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
|
||||
/* calculate error address if ue error is detected */
|
||||
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1) {
|
||||
|
||||
mc_umc_addrt0 =
|
||||
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
|
||||
|
@ -176,9 +176,6 @@ static int vcn_v4_0_sw_init(void *handle)
|
||||
AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
|
||||
|
||||
if (amdgpu_vcnfw_log)
|
||||
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
|
||||
}
|
||||
@ -1209,6 +1206,24 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v4_0_init_ring_metadata(struct amdgpu_device *adev, uint32_t vcn_inst, struct amdgpu_ring *ring_enc)
|
||||
{
|
||||
struct amdgpu_vcn_rb_metadata *rb_metadata = NULL;
|
||||
uint8_t *rb_ptr = (uint8_t *)ring_enc->ring;
|
||||
|
||||
rb_ptr += ring_enc->ring_size;
|
||||
rb_metadata = (struct amdgpu_vcn_rb_metadata *)rb_ptr;
|
||||
|
||||
memset(rb_metadata, 0, sizeof(struct amdgpu_vcn_rb_metadata));
|
||||
rb_metadata->size = sizeof(struct amdgpu_vcn_rb_metadata);
|
||||
rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
|
||||
rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG);
|
||||
rb_metadata->version = 1;
|
||||
rb_metadata->ring_id = vcn_inst & 0xFF;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
@ -1331,11 +1346,30 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
|
||||
rb_enc_addr = ring_enc->gpu_addr;
|
||||
|
||||
rb_setup->is_rb_enabled_flags |= RB_ENABLED;
|
||||
rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
|
||||
rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
|
||||
rb_setup->rb_size = ring_enc->ring_size / 4;
|
||||
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
|
||||
|
||||
if (amdgpu_sriov_is_vcn_rb_decouple(adev)) {
|
||||
vcn_v4_0_init_ring_metadata(adev, i, ring_enc);
|
||||
|
||||
memset((void *)&rb_setup->rb_info, 0, sizeof(struct amdgpu_vcn_rb_setup_info) * MAX_NUM_VCN_RB_SETUP);
|
||||
if (!(adev->vcn.harvest_config & (1 << 0))) {
|
||||
rb_setup->rb_info[0].rb_addr_lo = lower_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr);
|
||||
rb_setup->rb_info[0].rb_addr_hi = upper_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr);
|
||||
rb_setup->rb_info[0].rb_size = adev->vcn.inst[0].ring_enc[0].ring_size / 4;
|
||||
}
|
||||
if (!(adev->vcn.harvest_config & (1 << 1))) {
|
||||
rb_setup->rb_info[2].rb_addr_lo = lower_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr);
|
||||
rb_setup->rb_info[2].rb_addr_hi = upper_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr);
|
||||
rb_setup->rb_info[2].rb_size = adev->vcn.inst[1].ring_enc[0].ring_size / 4;
|
||||
}
|
||||
fw_shared->decouple.is_enabled = 1;
|
||||
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG);
|
||||
} else {
|
||||
rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
|
||||
rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
|
||||
rb_setup->rb_size = ring_enc->ring_size / 4;
|
||||
}
|
||||
|
||||
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
|
||||
regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
|
||||
lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
|
||||
@ -1807,6 +1841,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_ENC,
|
||||
.align_mask = 0x3f,
|
||||
.nop = VCN_ENC_CMD_NO_OP,
|
||||
.extra_dw = sizeof(struct amdgpu_vcn_rb_metadata),
|
||||
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
|
||||
.get_wptr = vcn_v4_0_unified_ring_get_wptr,
|
||||
.set_wptr = vcn_v4_0_unified_ring_set_wptr,
|
||||
@ -2020,16 +2055,20 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
|
||||
{
|
||||
uint32_t ip_instance;
|
||||
|
||||
switch (entry->client_id) {
|
||||
case SOC15_IH_CLIENTID_VCN:
|
||||
ip_instance = 0;
|
||||
break;
|
||||
case SOC15_IH_CLIENTID_VCN1:
|
||||
ip_instance = 1;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
|
||||
return 0;
|
||||
if (amdgpu_sriov_is_vcn_rb_decouple(adev)) {
|
||||
ip_instance = entry->ring_id;
|
||||
} else {
|
||||
switch (entry->client_id) {
|
||||
case SOC15_IH_CLIENTID_VCN:
|
||||
ip_instance = 0;
|
||||
break;
|
||||
case SOC15_IH_CLIENTID_VCN1:
|
||||
ip_instance = 1;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_DEBUG("IH: VCN TRAP\n");
|
||||
|
@ -1760,6 +1760,11 @@ static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
|
||||
SOC15_DPG_MODE_OFFSET(VCN, 0, regVCN_RAS_CNTL),
|
||||
tmp, 0, indirect);
|
||||
|
||||
tmp = UVD_VCPU_INT_EN2__RASCNTL_VCPU_VCODEC_EN_MASK;
|
||||
WREG32_SOC15_DPG_MODE(inst_idx,
|
||||
SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_VCPU_INT_EN2),
|
||||
tmp, 0, indirect);
|
||||
|
||||
tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
|
||||
WREG32_SOC15_DPG_MODE(inst_idx,
|
||||
SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_SYS_INT_EN),
|
||||
|
@ -302,7 +302,7 @@ static void svm_range_free(struct svm_range *prange, bool do_unmap)
|
||||
for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
|
||||
if (prange->dma_addr[gpuidx]) {
|
||||
kvfree(prange->dma_addr[gpuidx]);
|
||||
prange->dma_addr[gpuidx] = NULL;
|
||||
prange->dma_addr[gpuidx] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1106,26 +1106,32 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
|
||||
}
|
||||
|
||||
static int
|
||||
svm_range_split_tail(struct svm_range *prange,
|
||||
uint64_t new_last, struct list_head *insert_list)
|
||||
svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
|
||||
struct list_head *insert_list, struct list_head *remap_list)
|
||||
{
|
||||
struct svm_range *tail;
|
||||
int r = svm_range_split(prange, prange->start, new_last, &tail);
|
||||
|
||||
if (!r)
|
||||
if (!r) {
|
||||
list_add(&tail->list, insert_list);
|
||||
if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
|
||||
list_add(&tail->update_list, remap_list);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static int
|
||||
svm_range_split_head(struct svm_range *prange,
|
||||
uint64_t new_start, struct list_head *insert_list)
|
||||
svm_range_split_head(struct svm_range *prange, uint64_t new_start,
|
||||
struct list_head *insert_list, struct list_head *remap_list)
|
||||
{
|
||||
struct svm_range *head;
|
||||
int r = svm_range_split(prange, new_start, prange->last, &head);
|
||||
|
||||
if (!r)
|
||||
if (!r) {
|
||||
list_add(&head->list, insert_list);
|
||||
if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
|
||||
list_add(&head->update_list, remap_list);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1141,66 +1147,6 @@ svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
|
||||
list_add_tail(&pchild->child_list, &prange->child_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* svm_range_split_by_granularity - collect ranges within granularity boundary
|
||||
*
|
||||
* @p: the process with svms list
|
||||
* @mm: mm structure
|
||||
* @addr: the vm fault address in pages, to split the prange
|
||||
* @parent: parent range if prange is from child list
|
||||
* @prange: prange to split
|
||||
*
|
||||
* Trims @prange to be a single aligned block of prange->granularity if
|
||||
* possible. The head and tail are added to the child_list in @parent.
|
||||
*
|
||||
* Context: caller must hold mmap_read_lock and prange->lock
|
||||
*
|
||||
* Return:
|
||||
* 0 - OK, otherwise error code
|
||||
*/
|
||||
int
|
||||
svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
|
||||
unsigned long addr, struct svm_range *parent,
|
||||
struct svm_range *prange)
|
||||
{
|
||||
struct svm_range *head, *tail;
|
||||
unsigned long start, last, size;
|
||||
int r;
|
||||
|
||||
/* Align splited range start and size to granularity size, then a single
|
||||
* PTE will be used for whole range, this reduces the number of PTE
|
||||
* updated and the L1 TLB space used for translation.
|
||||
*/
|
||||
size = 1UL << prange->granularity;
|
||||
start = ALIGN_DOWN(addr, size);
|
||||
last = ALIGN(addr + 1, size) - 1;
|
||||
|
||||
pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
|
||||
prange->svms, prange->start, prange->last, start, last, size);
|
||||
|
||||
if (start > prange->start) {
|
||||
r = svm_range_split(prange, start, prange->last, &head);
|
||||
if (r)
|
||||
return r;
|
||||
svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
|
||||
}
|
||||
|
||||
if (last < prange->last) {
|
||||
r = svm_range_split(prange, prange->start, last, &tail);
|
||||
if (r)
|
||||
return r;
|
||||
svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
|
||||
}
|
||||
|
||||
/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
|
||||
if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
|
||||
prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
|
||||
pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
|
||||
prange, prange->start, prange->last,
|
||||
SVM_OP_ADD_RANGE_AND_MAP);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
static bool
|
||||
svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
|
||||
{
|
||||
@ -2112,7 +2058,7 @@ static int
|
||||
svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
|
||||
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
|
||||
struct list_head *update_list, struct list_head *insert_list,
|
||||
struct list_head *remove_list)
|
||||
struct list_head *remove_list, struct list_head *remap_list)
|
||||
{
|
||||
unsigned long last = start + size - 1UL;
|
||||
struct svm_range_list *svms = &p->svms;
|
||||
@ -2128,6 +2074,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
|
||||
INIT_LIST_HEAD(insert_list);
|
||||
INIT_LIST_HEAD(remove_list);
|
||||
INIT_LIST_HEAD(&new_list);
|
||||
INIT_LIST_HEAD(remap_list);
|
||||
|
||||
node = interval_tree_iter_first(&svms->objects, start, last);
|
||||
while (node) {
|
||||
@ -2164,14 +2111,14 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
|
||||
if (node->start < start) {
|
||||
pr_debug("change old range start\n");
|
||||
r = svm_range_split_head(prange, start,
|
||||
insert_list);
|
||||
insert_list, remap_list);
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
if (node->last > last) {
|
||||
pr_debug("change old range last\n");
|
||||
r = svm_range_split_tail(prange, last,
|
||||
insert_list);
|
||||
insert_list, remap_list);
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
@ -3561,6 +3508,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
|
||||
struct list_head update_list;
|
||||
struct list_head insert_list;
|
||||
struct list_head remove_list;
|
||||
struct list_head remap_list;
|
||||
struct svm_range_list *svms;
|
||||
struct svm_range *prange;
|
||||
struct svm_range *next;
|
||||
@ -3592,7 +3540,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
|
||||
|
||||
/* Add new range and split existing ranges as needed */
|
||||
r = svm_range_add(p, start, size, nattr, attrs, &update_list,
|
||||
&insert_list, &remove_list);
|
||||
&insert_list, &remove_list, &remap_list);
|
||||
if (r) {
|
||||
mutex_unlock(&svms->lock);
|
||||
mmap_write_unlock(mm);
|
||||
@ -3657,6 +3605,19 @@ out_unlock_range:
|
||||
ret = r;
|
||||
}
|
||||
|
||||
list_for_each_entry(prange, &remap_list, update_list) {
|
||||
pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
|
||||
prange, prange->start, prange->last);
|
||||
mutex_lock(&prange->migrate_mutex);
|
||||
r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
|
||||
true, true, prange->mapped_to_gpu);
|
||||
if (r)
|
||||
pr_debug("failed %d on remap svm range\n", r);
|
||||
mutex_unlock(&prange->migrate_mutex);
|
||||
if (r)
|
||||
ret = r;
|
||||
}
|
||||
|
||||
dynamic_svm_range_dump(svms);
|
||||
|
||||
mutex_unlock(&svms->lock);
|
||||
|
@ -172,9 +172,6 @@ struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange,
|
||||
int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
|
||||
bool clear);
|
||||
void svm_range_vram_node_free(struct svm_range *prange);
|
||||
int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
|
||||
unsigned long addr, struct svm_range *parent,
|
||||
struct svm_range *prange);
|
||||
int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
||||
uint32_t vmid, uint32_t node_id, uint64_t addr,
|
||||
bool write_fault);
|
||||
|
@ -2900,7 +2900,7 @@ static int dm_resume(void *handle)
|
||||
}
|
||||
|
||||
/* power on hardware */
|
||||
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
|
||||
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
|
||||
|
||||
/* program HPD filter */
|
||||
dc_resume(dm->dc);
|
||||
|
@ -3096,6 +3096,9 @@ static bool update_planes_and_stream_state(struct dc *dc,
|
||||
if (update_type >= update_surface_trace_level)
|
||||
update_surface_trace(dc, srf_updates, surface_count);
|
||||
|
||||
for (i = 0; i < surface_count; i++)
|
||||
copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
|
||||
|
||||
if (update_type >= UPDATE_TYPE_FULL) {
|
||||
struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
|
||||
|
||||
@ -3137,8 +3140,6 @@ static bool update_planes_and_stream_state(struct dc *dc,
|
||||
for (i = 0; i < surface_count; i++) {
|
||||
struct dc_plane_state *surface = srf_updates[i].surface;
|
||||
|
||||
copy_surface_update_to_plane(surface, &srf_updates[i]);
|
||||
|
||||
if (update_type >= UPDATE_TYPE_MED) {
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
||||
|
@ -187,7 +187,6 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
|
||||
/* CLK SRC */
|
||||
#define CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) \
|
||||
( \
|
||||
SRI_ARR_ALPHABET(PIXCLK_RESYNC_CNTL, PHYPLL, index, pllid), \
|
||||
SRII_ARR_2(PHASE, DP_DTO, 0, index), \
|
||||
SRII_ARR_2(PHASE, DP_DTO, 1, index), \
|
||||
@ -200,12 +199,10 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 0, index), \
|
||||
SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 1, index), \
|
||||
SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 2, index), \
|
||||
SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 3, index) \
|
||||
)
|
||||
SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 3, index)
|
||||
|
||||
/* ABM */
|
||||
#define ABM_DCN32_REG_LIST_RI(id) \
|
||||
( \
|
||||
SRI_ARR(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
|
||||
SRI_ARR(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
|
||||
SRI_ARR(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
|
||||
@ -217,12 +214,10 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRI_ARR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
|
||||
SRI_ARR(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
|
||||
SRI_ARR(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \
|
||||
SRI_ARR(DC_ABM1_ACE_THRES_12, ABM, id), NBIO_SR_ARR(BIOS_SCRATCH_2, id) \
|
||||
)
|
||||
SRI_ARR(DC_ABM1_ACE_THRES_12, ABM, id), NBIO_SR_ARR(BIOS_SCRATCH_2, id)
|
||||
|
||||
/* Audio */
|
||||
#define AUD_COMMON_REG_LIST_RI(id) \
|
||||
( \
|
||||
SRI_ARR(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZF0ENDPOINT, id), \
|
||||
SRI_ARR(AZALIA_F0_CODEC_ENDPOINT_DATA, AZF0ENDPOINT, id), \
|
||||
SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS, id), \
|
||||
@ -231,41 +226,33 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SR_ARR(DCCG_AUDIO_DTO_SOURCE, id), SR_ARR(DCCG_AUDIO_DTO0_MODULE, id), \
|
||||
SR_ARR(DCCG_AUDIO_DTO0_PHASE, id), SR_ARR(DCCG_AUDIO_DTO1_MODULE, id), \
|
||||
SR_ARR(DCCG_AUDIO_DTO1_PHASE, id) \
|
||||
)
|
||||
|
||||
/* VPG */
|
||||
|
||||
#define VPG_DCN3_REG_LIST_RI(id) \
|
||||
( \
|
||||
SRI_ARR(VPG_GENERIC_STATUS, VPG, id), \
|
||||
SRI_ARR(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \
|
||||
SRI_ARR(VPG_GENERIC_PACKET_DATA, VPG, id), \
|
||||
SRI_ARR(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \
|
||||
SRI_ARR(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id) \
|
||||
)
|
||||
SRI_ARR(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id)
|
||||
|
||||
/* AFMT */
|
||||
#define AFMT_DCN3_REG_LIST_RI(id) \
|
||||
( \
|
||||
SRI_ARR(AFMT_INFOFRAME_CONTROL0, AFMT, id), \
|
||||
SRI_ARR(AFMT_VBI_PACKET_CONTROL, AFMT, id), \
|
||||
SRI_ARR(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \
|
||||
SRI_ARR(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \
|
||||
SRI_ARR(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \
|
||||
SRI_ARR(AFMT_60958_0, AFMT, id), SRI_ARR(AFMT_60958_1, AFMT, id), \
|
||||
SRI_ARR(AFMT_60958_2, AFMT, id), SRI_ARR(AFMT_MEM_PWR, AFMT, id) \
|
||||
)
|
||||
SRI_ARR(AFMT_60958_2, AFMT, id), SRI_ARR(AFMT_MEM_PWR, AFMT, id)
|
||||
|
||||
/* APG */
|
||||
#define APG_DCN31_REG_LIST_RI(id) \
|
||||
(\
|
||||
SRI_ARR(APG_CONTROL, APG, id), SRI_ARR(APG_CONTROL2, APG, id), \
|
||||
SRI_ARR(APG_MEM_PWR, APG, id), SRI_ARR(APG_DBG_GEN_CONTROL, APG, id) \
|
||||
)
|
||||
SRI_ARR(APG_MEM_PWR, APG, id), SRI_ARR(APG_DBG_GEN_CONTROL, APG, id)
|
||||
|
||||
/* Stream encoder */
|
||||
#define SE_DCN32_REG_LIST_RI(id) \
|
||||
( \
|
||||
SRI_ARR(AFMT_CNTL, DIG, id), SRI_ARR(DIG_FE_CNTL, DIG, id), \
|
||||
SRI_ARR(HDMI_CONTROL, DIG, id), SRI_ARR(HDMI_DB_CONTROL, DIG, id), \
|
||||
SRI_ARR(HDMI_GC, DIG, id), \
|
||||
@ -309,28 +296,22 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \
|
||||
SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
|
||||
SRI_ARR(DIG_FE_CNTL, DIG, id), SRI_ARR(DIG_CLOCK_PATTERN, DIG, id), \
|
||||
SRI_ARR(DIG_FIFO_CTRL0, DIG, id) \
|
||||
)
|
||||
SRI_ARR(DIG_FIFO_CTRL0, DIG, id)
|
||||
|
||||
/* Aux regs */
|
||||
|
||||
#define AUX_REG_LIST_RI(id) \
|
||||
( \
|
||||
SRI_ARR(AUX_CONTROL, DP_AUX, id), SRI_ARR(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \
|
||||
SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id) \
|
||||
)
|
||||
SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id)
|
||||
|
||||
#define DCN2_AUX_REG_LIST_RI(id) \
|
||||
( \
|
||||
AUX_REG_LIST_RI(id), SRI_ARR(AUX_DPHY_TX_CONTROL, DP_AUX, id) \
|
||||
)
|
||||
AUX_REG_LIST_RI(id), SRI_ARR(AUX_DPHY_TX_CONTROL, DP_AUX, id)
|
||||
|
||||
/* HDP */
|
||||
#define HPD_REG_LIST_RI(id) SRI_ARR(DC_HPD_CONTROL, HPD, id)
|
||||
|
||||
/* Link encoder */
|
||||
#define LE_DCN3_REG_LIST_RI(id) \
|
||||
( \
|
||||
SRI_ARR(DIG_BE_CNTL, DIG, id), SRI_ARR(DIG_BE_EN_CNTL, DIG, id), \
|
||||
SRI_ARR(TMDS_CTL_BITS, DIG, id), \
|
||||
SRI_ARR(TMDS_DCBALANCER_CONTROL, DIG, id), SRI_ARR(DP_CONFIG, DP, id), \
|
||||
@ -344,26 +325,20 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRI_ARR(DP_SEC_CNTL, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \
|
||||
SRI_ARR(DP_DPHY_FAST_TRAINING, DP, id), SRI_ARR(DP_SEC_CNTL1, DP, id), \
|
||||
SRI_ARR(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
|
||||
SRI_ARR(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) \
|
||||
)
|
||||
SRI_ARR(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
|
||||
|
||||
#define LE_DCN31_REG_LIST_RI(id) \
|
||||
( \
|
||||
LE_DCN3_REG_LIST_RI(id), SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \
|
||||
SR_ARR(DIO_LINKA_CNTL, id), SR_ARR(DIO_LINKB_CNTL, id), \
|
||||
SR_ARR(DIO_LINKC_CNTL, id), SR_ARR(DIO_LINKD_CNTL, id), \
|
||||
SR_ARR(DIO_LINKE_CNTL, id), SR_ARR(DIO_LINKF_CNTL, id) \
|
||||
)
|
||||
SR_ARR(DIO_LINKE_CNTL, id), SR_ARR(DIO_LINKF_CNTL, id)
|
||||
|
||||
#define UNIPHY_DCN2_REG_LIST_RI(id, phyid) \
|
||||
( \
|
||||
SRI_ARR_ALPHABET(CLOCK_ENABLE, SYMCLK, id, phyid), \
|
||||
SRI_ARR_ALPHABET(CHANNEL_XBAR_CNTL, UNIPHY, id, phyid) \
|
||||
)
|
||||
SRI_ARR_ALPHABET(CHANNEL_XBAR_CNTL, UNIPHY, id, phyid)
|
||||
|
||||
/* HPO DP stream encoder */
|
||||
#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) \
|
||||
( \
|
||||
SR_ARR(DP_STREAM_MAPPER_CONTROL0, id), \
|
||||
SR_ARR(DP_STREAM_MAPPER_CONTROL1, id), \
|
||||
SR_ARR(DP_STREAM_MAPPER_CONTROL2, id), \
|
||||
@ -398,12 +373,10 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRI_ARR(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id), \
|
||||
SRI_ARR(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id), \
|
||||
SRI_ARR(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id), \
|
||||
SRI_ARR(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id) \
|
||||
)
|
||||
SRI_ARR(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id)
|
||||
|
||||
/* HPO DP link encoder regs */
|
||||
#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) \
|
||||
( \
|
||||
SRI_ARR(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \
|
||||
SRI_ARR(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \
|
||||
SRI_ARR(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \
|
||||
@ -432,12 +405,10 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \
|
||||
SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \
|
||||
SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \
|
||||
SRI_ARR(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id) \
|
||||
)
|
||||
SRI_ARR(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id)
|
||||
|
||||
/* DPP */
|
||||
#define DPP_REG_LIST_DCN30_COMMON_RI(id) \
|
||||
( \
|
||||
SRI_ARR(CM_DEALPHA, CM, id), SRI_ARR(CM_MEM_PWR_STATUS, CM, id), \
|
||||
SRI_ARR(CM_BIAS_CR_R, CM, id), SRI_ARR(CM_BIAS_Y_G_CB_B, CM, id), \
|
||||
SRI_ARR(PRE_DEGAM, CNVC_CFG, id), SRI_ARR(CM_GAMCOR_CONTROL, CM, id), \
|
||||
@ -552,12 +523,10 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \
|
||||
SRI_ARR(OBUF_MEM_PWR_CTRL, DSCL, id), \
|
||||
SRI_ARR(DSCL_MEM_PWR_STATUS, DSCL, id), \
|
||||
SRI_ARR(DSCL_MEM_PWR_CTRL, DSCL, id) \
|
||||
)
|
||||
SRI_ARR(DSCL_MEM_PWR_CTRL, DSCL, id)
|
||||
|
||||
/* OPP */
|
||||
#define OPP_REG_LIST_DCN_RI(id) \
|
||||
( \
|
||||
SRI_ARR(FMT_BIT_DEPTH_CONTROL, FMT, id), SRI_ARR(FMT_CONTROL, FMT, id), \
|
||||
SRI_ARR(FMT_DITHER_RAND_R_SEED, FMT, id), \
|
||||
SRI_ARR(FMT_DITHER_RAND_G_SEED, FMT, id), \
|
||||
@ -569,37 +538,29 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRI_ARR(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \
|
||||
SRI_ARR(OPPBUF_3D_PARAMETERS_1, OPPBUF, id), \
|
||||
SRI_ARR(OPP_PIPE_CONTROL, OPP_PIPE, id) \
|
||||
)
|
||||
|
||||
#define OPP_REG_LIST_DCN10_RI(id) OPP_REG_LIST_DCN_RI(id)
|
||||
|
||||
#define OPP_DPG_REG_LIST_RI(id) \
|
||||
( \
|
||||
SRI_ARR(DPG_CONTROL, DPG, id), SRI_ARR(DPG_DIMENSIONS, DPG, id), \
|
||||
SRI_ARR(DPG_OFFSET_SEGMENT, DPG, id), SRI_ARR(DPG_COLOUR_B_CB, DPG, id), \
|
||||
SRI_ARR(DPG_COLOUR_G_Y, DPG, id), SRI_ARR(DPG_COLOUR_R_CR, DPG, id), \
|
||||
SRI_ARR(DPG_RAMP_CONTROL, DPG, id), SRI_ARR(DPG_STATUS, DPG, id) \
|
||||
)
|
||||
SRI_ARR(DPG_RAMP_CONTROL, DPG, id), SRI_ARR(DPG_STATUS, DPG, id)
|
||||
|
||||
#define OPP_REG_LIST_DCN30_RI(id) \
|
||||
( \
|
||||
OPP_REG_LIST_DCN10_RI(id), OPP_DPG_REG_LIST_RI(id), \
|
||||
SRI_ARR(FMT_422_CONTROL, FMT, id) \
|
||||
)
|
||||
SRI_ARR(FMT_422_CONTROL, FMT, id)
|
||||
|
||||
/* Aux engine regs */
|
||||
#define AUX_COMMON_REG_LIST0_RI(id) \
|
||||
( \
|
||||
SRI_ARR(AUX_CONTROL, DP_AUX, id), SRI_ARR(AUX_ARB_CONTROL, DP_AUX, id), \
|
||||
SRI_ARR(AUX_SW_DATA, DP_AUX, id), SRI_ARR(AUX_SW_CONTROL, DP_AUX, id), \
|
||||
SRI_ARR(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
|
||||
SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \
|
||||
SRI_ARR(AUX_SW_STATUS, DP_AUX, id) \
|
||||
)
|
||||
SRI_ARR(AUX_SW_STATUS, DP_AUX, id)
|
||||
|
||||
/* DWBC */
|
||||
#define DWBC_COMMON_REG_LIST_DCN30_RI(id) \
|
||||
( \
|
||||
SR_ARR(DWB_ENABLE_CLK_CTRL, id), SR_ARR(DWB_MEM_PWR_CTRL, id), \
|
||||
SR_ARR(FC_MODE_CTRL, id), SR_ARR(FC_FLOW_CTRL, id), \
|
||||
SR_ARR(FC_WINDOW_START, id), SR_ARR(FC_WINDOW_SIZE, id), \
|
||||
@ -693,13 +654,11 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SR_ARR(DWB_OGAM_RAMB_REGION_26_27, id), \
|
||||
SR_ARR(DWB_OGAM_RAMB_REGION_28_29, id), \
|
||||
SR_ARR(DWB_OGAM_RAMB_REGION_30_31, id), \
|
||||
SR_ARR(DWB_OGAM_RAMB_REGION_32_33, id) \
|
||||
)
|
||||
SR_ARR(DWB_OGAM_RAMB_REGION_32_33, id)
|
||||
|
||||
/* MCIF */
|
||||
|
||||
#define MCIF_WB_COMMON_REG_LIST_DCN32_RI(inst) \
|
||||
( \
|
||||
SRI2_ARR(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst), \
|
||||
SRI2_ARR(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst), \
|
||||
SRI2_ARR(MCIF_WB_BUF_PITCH, MCIF_WB, inst), \
|
||||
@ -747,13 +706,11 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRI2_ARR(MMHUBBUB_WARMUP_ADDR_REGION, MMHUBBUB, inst), \
|
||||
SRI2_ARR(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, MMHUBBUB, inst), \
|
||||
SRI2_ARR(MMHUBBUB_WARMUP_BASE_ADDR_LOW, MMHUBBUB, inst), \
|
||||
SRI2_ARR(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB, inst) \
|
||||
)
|
||||
SRI2_ARR(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB, inst)
|
||||
|
||||
/* DSC */
|
||||
|
||||
#define DSC_REG_LIST_DCN20_RI(id) \
|
||||
( \
|
||||
SRI_ARR(DSC_TOP_CONTROL, DSC_TOP, id), \
|
||||
SRI_ARR(DSC_DEBUG_CONTROL, DSC_TOP, id), \
|
||||
SRI_ARR(DSCC_CONFIG0, DSCC, id), SRI_ARR(DSCC_CONFIG1, DSCC, id), \
|
||||
@ -801,8 +758,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRI_ARR(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \
|
||||
SRI_ARR(DSCCIF_CONFIG0, DSCCIF, id), \
|
||||
SRI_ARR(DSCCIF_CONFIG1, DSCCIF, id), \
|
||||
SRI_ARR(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id) \
|
||||
)
|
||||
SRI_ARR(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
|
||||
|
||||
/* MPC */
|
||||
|
||||
@ -810,32 +766,25 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRII_DWB(DWB_MUX, MUX, MPC_DWB, inst)
|
||||
|
||||
#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst) \
|
||||
( \
|
||||
SRII(MUX, MPC_OUT, inst), VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst) \
|
||||
)
|
||||
SRII(MUX, MPC_OUT, inst), VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst)
|
||||
|
||||
#define MPC_OUT_MUX_REG_LIST_DCN3_0_RI(inst) \
|
||||
( \
|
||||
MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst), SRII(CSC_MODE, MPC_OUT, inst), \
|
||||
SRII(CSC_C11_C12_A, MPC_OUT, inst), SRII(CSC_C33_C34_A, MPC_OUT, inst), \
|
||||
SRII(CSC_C11_C12_B, MPC_OUT, inst), SRII(CSC_C33_C34_B, MPC_OUT, inst), \
|
||||
SRII(DENORM_CONTROL, MPC_OUT, inst), \
|
||||
SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst), \
|
||||
SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst), SR(MPC_OUT_CSC_COEF_FORMAT) \
|
||||
)
|
||||
SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst), SR(MPC_OUT_CSC_COEF_FORMAT)
|
||||
|
||||
#define MPC_COMMON_REG_LIST_DCN1_0_RI(inst) \
|
||||
( \
|
||||
SRII(MPCC_TOP_SEL, MPCC, inst), SRII(MPCC_BOT_SEL, MPCC, inst), \
|
||||
SRII(MPCC_CONTROL, MPCC, inst), SRII(MPCC_STATUS, MPCC, inst), \
|
||||
SRII(MPCC_OPP_ID, MPCC, inst), SRII(MPCC_BG_G_Y, MPCC, inst), \
|
||||
SRII(MPCC_BG_R_CR, MPCC, inst), SRII(MPCC_BG_B_CB, MPCC, inst), \
|
||||
SRII(MPCC_SM_CONTROL, MPCC, inst), \
|
||||
SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst) \
|
||||
)
|
||||
SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst)
|
||||
|
||||
#define MPC_REG_LIST_DCN3_0_RI(inst) \
|
||||
( \
|
||||
MPC_COMMON_REG_LIST_DCN1_0_RI(inst), SRII(MPCC_TOP_GAIN, MPCC, inst), \
|
||||
SRII(MPCC_BOT_GAIN_INSIDE, MPCC, inst), \
|
||||
SRII(MPCC_BOT_GAIN_OUTSIDE, MPCC, inst), \
|
||||
@ -889,8 +838,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_G, MPCC_OGAM, inst), \
|
||||
SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_R, MPCC_OGAM, inst), \
|
||||
SRII(MPCC_OGAM_CONTROL, MPCC_OGAM, inst), \
|
||||
SRII(MPCC_OGAM_LUT_CONTROL, MPCC_OGAM, inst) \
|
||||
)
|
||||
SRII(MPCC_OGAM_LUT_CONTROL, MPCC_OGAM, inst)
|
||||
|
||||
#define MPC_REG_LIST_DCN3_2_RI(inst) \
|
||||
MPC_REG_LIST_DCN3_0_RI(inst),\
|
||||
@ -1034,11 +982,9 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRII(MPCC_MCM_1DLUT_RAMB_REGION_30_31, MPCC_MCM, inst),\
|
||||
SRII(MPCC_MCM_1DLUT_RAMB_REGION_32_33, MPCC_MCM, inst),\
|
||||
SRII(MPCC_MCM_MEM_PWR_CTRL, MPCC_MCM, inst)
|
||||
|
||||
/* OPTC */
|
||||
|
||||
#define OPTC_COMMON_REG_LIST_DCN3_2_RI(inst) \
|
||||
( \
|
||||
SRI_ARR(OTG_VSTARTUP_PARAM, OTG, inst), \
|
||||
SRI_ARR(OTG_VUPDATE_PARAM, OTG, inst), \
|
||||
SRI_ARR(OTG_VREADY_PARAM, OTG, inst), \
|
||||
@ -1100,22 +1046,17 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst), \
|
||||
SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
|
||||
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
|
||||
SRI_ARR(OTG_DRR_CONTROL, OTG, inst) \
|
||||
)
|
||||
SRI_ARR(OTG_DRR_CONTROL, OTG, inst)
|
||||
|
||||
/* HUBP */
|
||||
|
||||
#define HUBP_REG_LIST_DCN_VM_RI(id) \
|
||||
( \
|
||||
SRI_ARR(NOM_PARAMETERS_0, HUBPREQ, id), \
|
||||
SRI_ARR(NOM_PARAMETERS_1, HUBPREQ, id), \
|
||||
SRI_ARR(NOM_PARAMETERS_2, HUBPREQ, id), \
|
||||
SRI_ARR(NOM_PARAMETERS_3, HUBPREQ, id), \
|
||||
SRI_ARR(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id) \
|
||||
)
|
||||
|
||||
SRI_ARR(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id)
|
||||
#define HUBP_REG_LIST_DCN_RI(id) \
|
||||
( \
|
||||
SRI_ARR(DCHUBP_CNTL, HUBP, id), SRI_ARR(HUBPREQ_DEBUG_DB, HUBP, id), \
|
||||
SRI_ARR(HUBPREQ_DEBUG, HUBP, id), SRI_ARR(DCSURF_ADDR_CONFIG, HUBP, id), \
|
||||
SRI_ARR(DCSURF_TILING_CONFIG, HUBP, id), \
|
||||
@ -1186,11 +1127,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \
|
||||
SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \
|
||||
SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \
|
||||
SRI_ARR(HUBP_CLK_CNTL, HUBP, id) \
|
||||
)
|
||||
|
||||
SRI_ARR(HUBP_CLK_CNTL, HUBP, id)
|
||||
#define HUBP_REG_LIST_DCN2_COMMON_RI(id) \
|
||||
( \
|
||||
HUBP_REG_LIST_DCN_RI(id), HUBP_REG_LIST_DCN_VM_RI(id), \
|
||||
SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \
|
||||
SRI_ARR(PREFETCH_SETTINGS_C, HUBPREQ, id), \
|
||||
@ -1217,35 +1155,24 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SRI_ARR(DCN_CUR1_TTU_CNTL0, HUBPREQ, id), \
|
||||
SRI_ARR(DCN_CUR1_TTU_CNTL1, HUBPREQ, id), \
|
||||
SRI_ARR(DCSURF_FLIP_CONTROL2, HUBPREQ, id), \
|
||||
SRI_ARR(VMID_SETTINGS_0, HUBPREQ, id) \
|
||||
)
|
||||
|
||||
SRI_ARR(VMID_SETTINGS_0, HUBPREQ, id)
|
||||
#define HUBP_REG_LIST_DCN21_RI(id) \
|
||||
( \
|
||||
HUBP_REG_LIST_DCN2_COMMON_RI(id), SRI_ARR(FLIP_PARAMETERS_3, HUBPREQ, id), \
|
||||
SRI_ARR(FLIP_PARAMETERS_4, HUBPREQ, id), \
|
||||
SRI_ARR(FLIP_PARAMETERS_5, HUBPREQ, id), \
|
||||
SRI_ARR(FLIP_PARAMETERS_6, HUBPREQ, id), \
|
||||
SRI_ARR(VBLANK_PARAMETERS_5, HUBPREQ, id), \
|
||||
SRI_ARR(VBLANK_PARAMETERS_6, HUBPREQ, id) \
|
||||
)
|
||||
|
||||
SRI_ARR(VBLANK_PARAMETERS_6, HUBPREQ, id)
|
||||
#define HUBP_REG_LIST_DCN30_RI(id) \
|
||||
( \
|
||||
HUBP_REG_LIST_DCN21_RI(id), SRI_ARR(DCN_DMDATA_VM_CNTL, HUBPREQ, id) \
|
||||
)
|
||||
|
||||
HUBP_REG_LIST_DCN21_RI(id), SRI_ARR(DCN_DMDATA_VM_CNTL, HUBPREQ, id)
|
||||
#define HUBP_REG_LIST_DCN32_RI(id) \
|
||||
( \
|
||||
HUBP_REG_LIST_DCN30_RI(id), SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \
|
||||
SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \
|
||||
SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id) \
|
||||
)
|
||||
SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id)
|
||||
|
||||
/* HUBBUB */
|
||||
|
||||
#define HUBBUB_REG_LIST_DCN32_RI(id) \
|
||||
( \
|
||||
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A), \
|
||||
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B), \
|
||||
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C), \
|
||||
@ -1286,13 +1213,11 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
SR(DCHUBBUB_ARB_MALL_CNTL), \
|
||||
SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \
|
||||
SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS), \
|
||||
SR(SDPIF_REQUEST_RATE_LIMIT) \
|
||||
)
|
||||
SR(SDPIF_REQUEST_RATE_LIMIT)
|
||||
|
||||
/* DCCG */
|
||||
|
||||
#define DCCG_REG_LIST_DCN32_RI() \
|
||||
( \
|
||||
SR(DPPCLK_DTO_CTRL), DCCG_SRII(DTO_PARAM, DPPCLK, 0), \
|
||||
DCCG_SRII(DTO_PARAM, DPPCLK, 1), DCCG_SRII(DTO_PARAM, DPPCLK, 2), \
|
||||
DCCG_SRII(DTO_PARAM, DPPCLK, 3), DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0), \
|
||||
@ -1308,38 +1233,31 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
DCCG_SRII(PHASE, DTBCLK_DTO, 2), DCCG_SRII(PHASE, DTBCLK_DTO, 3), \
|
||||
SR(DCCG_AUDIO_DTBCLK_DTO_MODULO), SR(DCCG_AUDIO_DTBCLK_DTO_PHASE), \
|
||||
SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), \
|
||||
SR(DCCG_AUDIO_DTO_SOURCE), SR(DENTIST_DISPCLK_CNTL) \
|
||||
)
|
||||
SR(DCCG_AUDIO_DTO_SOURCE), SR(DENTIST_DISPCLK_CNTL)
|
||||
|
||||
/* VMID */
|
||||
#define DCN20_VMID_REG_LIST_RI(id) \
|
||||
( \
|
||||
SRI_ARR(CNTL, DCN_VM_CONTEXT, id), \
|
||||
SRI_ARR(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id), \
|
||||
SRI_ARR(PAGE_TABLE_BASE_ADDR_LO32, DCN_VM_CONTEXT, id), \
|
||||
SRI_ARR(PAGE_TABLE_START_ADDR_HI32, DCN_VM_CONTEXT, id), \
|
||||
SRI_ARR(PAGE_TABLE_START_ADDR_LO32, DCN_VM_CONTEXT, id), \
|
||||
SRI_ARR(PAGE_TABLE_END_ADDR_HI32, DCN_VM_CONTEXT, id), \
|
||||
SRI_ARR(PAGE_TABLE_END_ADDR_LO32, DCN_VM_CONTEXT, id) \
|
||||
)
|
||||
SRI_ARR(PAGE_TABLE_END_ADDR_LO32, DCN_VM_CONTEXT, id)
|
||||
|
||||
/* I2C HW */
|
||||
|
||||
#define I2C_HW_ENGINE_COMMON_REG_LIST_RI(id) \
|
||||
( \
|
||||
SRI_ARR_I2C(SETUP, DC_I2C_DDC, id), SRI_ARR_I2C(SPEED, DC_I2C_DDC, id), \
|
||||
SRI_ARR_I2C(HW_STATUS, DC_I2C_DDC, id), \
|
||||
SR_ARR_I2C(DC_I2C_ARBITRATION, id), \
|
||||
SR_ARR_I2C(DC_I2C_CONTROL, id), SR_ARR_I2C(DC_I2C_SW_STATUS, id), \
|
||||
SR_ARR_I2C(DC_I2C_TRANSACTION0, id), SR_ARR_I2C(DC_I2C_TRANSACTION1, id),\
|
||||
SR_ARR_I2C(DC_I2C_TRANSACTION2, id), SR_ARR_I2C(DC_I2C_TRANSACTION3, id),\
|
||||
SR_ARR_I2C(DC_I2C_DATA, id), SR_ARR_I2C(MICROSECOND_TIME_BASE_DIV, id) \
|
||||
)
|
||||
SR_ARR_I2C(DC_I2C_DATA, id), SR_ARR_I2C(MICROSECOND_TIME_BASE_DIV, id)
|
||||
|
||||
#define I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) \
|
||||
( \
|
||||
I2C_HW_ENGINE_COMMON_REG_LIST_RI(id), SR_ARR_I2C(DIO_MEM_PWR_CTRL, id), \
|
||||
SR_ARR_I2C(DIO_MEM_PWR_STATUS, id) \
|
||||
)
|
||||
SR_ARR_I2C(DIO_MEM_PWR_STATUS, id)
|
||||
|
||||
#endif /* _DCN32_RESOURCE_H_ */
|
||||
|
@ -236,85 +236,85 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
|
||||
|
||||
dc_assert_fp_enabled();
|
||||
|
||||
dcn3_5_ip.max_num_otg =
|
||||
dc->res_pool->res_cap->num_timing_generator;
|
||||
dcn3_5_ip.max_num_dpp = dc->res_pool->pipe_count;
|
||||
dcn3_5_soc.num_chans = bw_params->num_channels;
|
||||
dcn3_5_ip.max_num_otg =
|
||||
dc->res_pool->res_cap->num_timing_generator;
|
||||
dcn3_5_ip.max_num_dpp = dc->res_pool->pipe_count;
|
||||
dcn3_5_soc.num_chans = bw_params->num_channels;
|
||||
|
||||
ASSERT(clk_table->num_entries);
|
||||
ASSERT(clk_table->num_entries);
|
||||
|
||||
/* Prepass to find max clocks independent of voltage level. */
|
||||
for (i = 0; i < clk_table->num_entries; ++i) {
|
||||
if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
|
||||
max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
|
||||
if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
|
||||
max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
|
||||
/* Prepass to find max clocks independent of voltage level. */
|
||||
for (i = 0; i < clk_table->num_entries; ++i) {
|
||||
if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
|
||||
max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
|
||||
if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
|
||||
max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < clk_table->num_entries; i++) {
|
||||
/* loop backwards*/
|
||||
for (closest_clk_lvl = 0, j = dcn3_5_soc.num_states - 1;
|
||||
j >= 0; j--) {
|
||||
if (dcn3_5_soc.clock_limits[j].dcfclk_mhz <=
|
||||
clk_table->entries[i].dcfclk_mhz) {
|
||||
closest_clk_lvl = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (clk_table->num_entries == 1) {
|
||||
/*smu gives one DPM level, let's take the highest one*/
|
||||
closest_clk_lvl = dcn3_5_soc.num_states - 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < clk_table->num_entries; i++) {
|
||||
/* loop backwards*/
|
||||
for (closest_clk_lvl = 0, j = dcn3_5_soc.num_states - 1;
|
||||
j >= 0; j--) {
|
||||
if (dcn3_5_soc.clock_limits[j].dcfclk_mhz <=
|
||||
clk_table->entries[i].dcfclk_mhz) {
|
||||
closest_clk_lvl = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (clk_table->num_entries == 1) {
|
||||
/*smu gives one DPM level, let's take the highest one*/
|
||||
closest_clk_lvl = dcn3_5_soc.num_states - 1;
|
||||
}
|
||||
clock_limits[i].state = i;
|
||||
|
||||
clock_limits[i].state = i;
|
||||
|
||||
/* Clocks dependent on voltage level. */
|
||||
clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
|
||||
if (clk_table->num_entries == 1 &&
|
||||
clock_limits[i].dcfclk_mhz <
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
|
||||
/*SMU fix not released yet*/
|
||||
clock_limits[i].dcfclk_mhz =
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
|
||||
}
|
||||
|
||||
clock_limits[i].fabricclk_mhz =
|
||||
clk_table->entries[i].fclk_mhz;
|
||||
clock_limits[i].socclk_mhz =
|
||||
clk_table->entries[i].socclk_mhz;
|
||||
|
||||
if (clk_table->entries[i].memclk_mhz &&
|
||||
clk_table->entries[i].wck_ratio)
|
||||
clock_limits[i].dram_speed_mts =
|
||||
clk_table->entries[i].memclk_mhz * 2 *
|
||||
clk_table->entries[i].wck_ratio;
|
||||
|
||||
/* Clocks independent of voltage level. */
|
||||
clock_limits[i].dispclk_mhz = max_dispclk_mhz ?
|
||||
max_dispclk_mhz :
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
|
||||
|
||||
clock_limits[i].dppclk_mhz = max_dppclk_mhz ?
|
||||
max_dppclk_mhz :
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
|
||||
|
||||
clock_limits[i].dram_bw_per_chan_gbps =
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
|
||||
clock_limits[i].dscclk_mhz =
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
|
||||
clock_limits[i].dtbclk_mhz =
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
|
||||
clock_limits[i].phyclk_d18_mhz =
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
|
||||
clock_limits[i].phyclk_mhz =
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
|
||||
/* Clocks dependent on voltage level. */
|
||||
clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
|
||||
if (clk_table->num_entries == 1 &&
|
||||
clock_limits[i].dcfclk_mhz <
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
|
||||
/*SMU fix not released yet*/
|
||||
clock_limits[i].dcfclk_mhz =
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
|
||||
}
|
||||
|
||||
memcpy(dcn3_5_soc.clock_limits, clock_limits,
|
||||
sizeof(dcn3_5_soc.clock_limits));
|
||||
clock_limits[i].fabricclk_mhz =
|
||||
clk_table->entries[i].fclk_mhz;
|
||||
clock_limits[i].socclk_mhz =
|
||||
clk_table->entries[i].socclk_mhz;
|
||||
|
||||
if (clk_table->num_entries)
|
||||
dcn3_5_soc.num_states = clk_table->num_entries;
|
||||
if (clk_table->entries[i].memclk_mhz &&
|
||||
clk_table->entries[i].wck_ratio)
|
||||
clock_limits[i].dram_speed_mts =
|
||||
clk_table->entries[i].memclk_mhz * 2 *
|
||||
clk_table->entries[i].wck_ratio;
|
||||
|
||||
/* Clocks independent of voltage level. */
|
||||
clock_limits[i].dispclk_mhz = max_dispclk_mhz ?
|
||||
max_dispclk_mhz :
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
|
||||
|
||||
clock_limits[i].dppclk_mhz = max_dppclk_mhz ?
|
||||
max_dppclk_mhz :
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
|
||||
|
||||
clock_limits[i].dram_bw_per_chan_gbps =
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
|
||||
clock_limits[i].dscclk_mhz =
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
|
||||
clock_limits[i].dtbclk_mhz =
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
|
||||
clock_limits[i].phyclk_d18_mhz =
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
|
||||
clock_limits[i].phyclk_mhz =
|
||||
dcn3_5_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
|
||||
}
|
||||
|
||||
memcpy(dcn3_5_soc.clock_limits, clock_limits,
|
||||
sizeof(dcn3_5_soc.clock_limits));
|
||||
|
||||
if (clk_table->num_entries)
|
||||
dcn3_5_soc.num_states = clk_table->num_entries;
|
||||
|
||||
if (max_dispclk_mhz) {
|
||||
dcn3_5_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
|
||||
|
@ -61,7 +61,7 @@ ifneq ($(CONFIG_FRAME_WARN),0)
|
||||
frame_warn_flag := -Wframe-larger-than=2048
|
||||
endif
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) -Wframe-larger-than=2048
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_ccflags)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -461,7 +461,7 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
|
||||
swapped = false;
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -65,8 +65,6 @@
|
||||
|
||||
#include "dcn10/dcn10_hwseq.h"
|
||||
|
||||
#include "dce110_hwseq.h"
|
||||
|
||||
#define GAMMA_HW_POINTS_NUM 256
|
||||
|
||||
/*
|
||||
|
@ -273,6 +273,8 @@ enum amd_dpm_forced_level;
|
||||
* @hw_init: sets up the hw state
|
||||
* @hw_fini: tears down the hw state
|
||||
* @late_fini: final cleanup
|
||||
* @prepare_suspend: handle IP specific changes to prepare for suspend
|
||||
* (such as allocating any required memory)
|
||||
* @suspend: handles IP specific hw/sw changes for suspend
|
||||
* @resume: handles IP specific hw/sw changes for resume
|
||||
* @is_idle: returns current IP block idle status
|
||||
|
@ -19227,6 +19227,9 @@
|
||||
#define CB_COLOR0_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
|
||||
#define CB_COLOR0_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
|
||||
#define CB_COLOR0_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
|
||||
#define CB_COLOR0_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS__SHIFT 0x19
|
||||
#define CB_COLOR0_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE__SHIFT 0x1a
|
||||
#define CB_COLOR0_FDCC_CONTROL__MAX_COMP_FRAGS__SHIFT 0x1b
|
||||
#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
|
||||
#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
|
||||
#define CB_COLOR0_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
|
||||
@ -19241,6 +19244,9 @@
|
||||
#define CB_COLOR0_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
|
||||
#define CB_COLOR0_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
|
||||
#define CB_COLOR0_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
|
||||
#define CB_COLOR0_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS_MASK 0x02000000L
|
||||
#define CB_COLOR0_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE_MASK 0x04000000L
|
||||
#define CB_COLOR0_FDCC_CONTROL__MAX_COMP_FRAGS_MASK 0x38000000L
|
||||
//CB_COLOR0_DCC_BASE
|
||||
#define CB_COLOR0_DCC_BASE__BASE_256B__SHIFT 0x0
|
||||
#define CB_COLOR0_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
|
||||
@ -19301,6 +19307,9 @@
|
||||
#define CB_COLOR1_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
|
||||
#define CB_COLOR1_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
|
||||
#define CB_COLOR1_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
|
||||
#define CB_COLOR1_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS__SHIFT 0x19
|
||||
#define CB_COLOR1_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE__SHIFT 0x1a
|
||||
#define CB_COLOR1_FDCC_CONTROL__MAX_COMP_FRAGS__SHIFT 0x1b
|
||||
#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
|
||||
#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
|
||||
#define CB_COLOR1_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
|
||||
@ -19315,6 +19324,9 @@
|
||||
#define CB_COLOR1_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
|
||||
#define CB_COLOR1_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
|
||||
#define CB_COLOR1_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
|
||||
#define CB_COLOR1_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS_MASK 0x02000000L
|
||||
#define CB_COLOR1_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE_MASK 0x04000000L
|
||||
#define CB_COLOR1_FDCC_CONTROL__MAX_COMP_FRAGS_MASK 0x38000000L
|
||||
//CB_COLOR1_DCC_BASE
|
||||
#define CB_COLOR1_DCC_BASE__BASE_256B__SHIFT 0x0
|
||||
#define CB_COLOR1_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
|
||||
@ -19375,6 +19387,9 @@
|
||||
#define CB_COLOR2_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
|
||||
#define CB_COLOR2_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
|
||||
#define CB_COLOR2_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
|
||||
#define CB_COLOR2_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS__SHIFT 0x19
|
||||
#define CB_COLOR2_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE__SHIFT 0x1a
|
||||
#define CB_COLOR2_FDCC_CONTROL__MAX_COMP_FRAGS__SHIFT 0x1b
|
||||
#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
|
||||
#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
|
||||
#define CB_COLOR2_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
|
||||
@ -19389,6 +19404,9 @@
|
||||
#define CB_COLOR2_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
|
||||
#define CB_COLOR2_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
|
||||
#define CB_COLOR2_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
|
||||
#define CB_COLOR2_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS_MASK 0x02000000L
|
||||
#define CB_COLOR2_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE_MASK 0x04000000L
|
||||
#define CB_COLOR2_FDCC_CONTROL__MAX_COMP_FRAGS_MASK 0x38000000L
|
||||
//CB_COLOR2_DCC_BASE
|
||||
#define CB_COLOR2_DCC_BASE__BASE_256B__SHIFT 0x0
|
||||
#define CB_COLOR2_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
|
||||
@ -19449,6 +19467,9 @@
|
||||
#define CB_COLOR3_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
|
||||
#define CB_COLOR3_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
|
||||
#define CB_COLOR3_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
|
||||
#define CB_COLOR3_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS__SHIFT 0x19
|
||||
#define CB_COLOR3_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE__SHIFT 0x1a
|
||||
#define CB_COLOR3_FDCC_CONTROL__MAX_COMP_FRAGS__SHIFT 0x1b
|
||||
#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
|
||||
#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
|
||||
#define CB_COLOR3_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
|
||||
@ -19463,6 +19484,9 @@
|
||||
#define CB_COLOR3_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
|
||||
#define CB_COLOR3_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
|
||||
#define CB_COLOR3_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
|
||||
#define CB_COLOR3_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS_MASK 0x02000000L
|
||||
#define CB_COLOR3_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE_MASK 0x04000000L
|
||||
#define CB_COLOR3_FDCC_CONTROL__MAX_COMP_FRAGS_MASK 0x38000000L
|
||||
//CB_COLOR3_DCC_BASE
|
||||
#define CB_COLOR3_DCC_BASE__BASE_256B__SHIFT 0x0
|
||||
#define CB_COLOR3_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
|
||||
@ -19523,6 +19547,9 @@
|
||||
#define CB_COLOR4_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
|
||||
#define CB_COLOR4_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
|
||||
#define CB_COLOR4_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
|
||||
#define CB_COLOR4_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS__SHIFT 0x19
|
||||
#define CB_COLOR4_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE__SHIFT 0x1a
|
||||
#define CB_COLOR4_FDCC_CONTROL__MAX_COMP_FRAGS__SHIFT 0x1b
|
||||
#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
|
||||
#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
|
||||
#define CB_COLOR4_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
|
||||
@ -19537,6 +19564,9 @@
|
||||
#define CB_COLOR4_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
|
||||
#define CB_COLOR4_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
|
||||
#define CB_COLOR4_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
|
||||
#define CB_COLOR4_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS_MASK 0x02000000L
|
||||
#define CB_COLOR4_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE_MASK 0x04000000L
|
||||
#define CB_COLOR4_FDCC_CONTROL__MAX_COMP_FRAGS_MASK 0x38000000L
|
||||
//CB_COLOR4_DCC_BASE
|
||||
#define CB_COLOR4_DCC_BASE__BASE_256B__SHIFT 0x0
|
||||
#define CB_COLOR4_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
|
||||
@ -19597,6 +19627,9 @@
|
||||
#define CB_COLOR5_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
|
||||
#define CB_COLOR5_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
|
||||
#define CB_COLOR5_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
|
||||
#define CB_COLOR5_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS__SHIFT 0x19
|
||||
#define CB_COLOR5_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE__SHIFT 0x1a
|
||||
#define CB_COLOR5_FDCC_CONTROL__MAX_COMP_FRAGS__SHIFT 0x1b
|
||||
#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
|
||||
#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
|
||||
#define CB_COLOR5_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
|
||||
@ -19611,6 +19644,9 @@
|
||||
#define CB_COLOR5_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
|
||||
#define CB_COLOR5_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
|
||||
#define CB_COLOR5_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
|
||||
#define CB_COLOR5_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS_MASK 0x02000000L
|
||||
#define CB_COLOR5_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE_MASK 0x04000000L
|
||||
#define CB_COLOR5_FDCC_CONTROL__MAX_COMP_FRAGS_MASK 0x38000000L
|
||||
//CB_COLOR5_DCC_BASE
|
||||
#define CB_COLOR5_DCC_BASE__BASE_256B__SHIFT 0x0
|
||||
#define CB_COLOR5_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
|
||||
@ -19671,6 +19707,9 @@
|
||||
#define CB_COLOR6_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
|
||||
#define CB_COLOR6_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
|
||||
#define CB_COLOR6_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
|
||||
#define CB_COLOR6_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS__SHIFT 0x19
|
||||
#define CB_COLOR6_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE__SHIFT 0x1a
|
||||
#define CB_COLOR6_FDCC_CONTROL__MAX_COMP_FRAGS__SHIFT 0x1b
|
||||
#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
|
||||
#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
|
||||
#define CB_COLOR6_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
|
||||
@ -19685,6 +19724,9 @@
|
||||
#define CB_COLOR6_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
|
||||
#define CB_COLOR6_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
|
||||
#define CB_COLOR6_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
|
||||
#define CB_COLOR6_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS_MASK 0x02000000L
|
||||
#define CB_COLOR6_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE_MASK 0x04000000L
|
||||
#define CB_COLOR6_FDCC_CONTROL__MAX_COMP_FRAGS_MASK 0x38000000L
|
||||
//CB_COLOR6_DCC_BASE
|
||||
#define CB_COLOR6_DCC_BASE__BASE_256B__SHIFT 0x0
|
||||
#define CB_COLOR6_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
|
||||
@ -19745,6 +19787,9 @@
|
||||
#define CB_COLOR7_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
|
||||
#define CB_COLOR7_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
|
||||
#define CB_COLOR7_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
|
||||
#define CB_COLOR7_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS__SHIFT 0x19
|
||||
#define CB_COLOR7_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE__SHIFT 0x1a
|
||||
#define CB_COLOR7_FDCC_CONTROL__MAX_COMP_FRAGS__SHIFT 0x1b
|
||||
#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
|
||||
#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
|
||||
#define CB_COLOR7_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
|
||||
@ -19759,6 +19804,9 @@
|
||||
#define CB_COLOR7_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
|
||||
#define CB_COLOR7_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
|
||||
#define CB_COLOR7_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
|
||||
#define CB_COLOR7_FDCC_CONTROL__DISABLE_OVERRIDE_INCONSISTENT_KEYS_MASK 0x02000000L
|
||||
#define CB_COLOR7_FDCC_CONTROL__ENABLE_MAX_COMP_FRAG_OVERRIDE_MASK 0x04000000L
|
||||
#define CB_COLOR7_FDCC_CONTROL__MAX_COMP_FRAGS_MASK 0x38000000L
|
||||
//CB_COLOR7_DCC_BASE
|
||||
#define CB_COLOR7_DCC_BASE__BASE_256B__SHIFT 0x0
|
||||
#define CB_COLOR7_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
|
||||
|
@ -775,6 +775,12 @@
|
||||
#define regPCIE_USB4_ERR_CNTL5_BASE_IDX 5
|
||||
#define regPCIE_USB4_LC_CNTL1 0x420179
|
||||
#define regPCIE_USB4_LC_CNTL1_BASE_IDX 5
|
||||
#define regBIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL 0x420118
|
||||
#define regBIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL_BASE_IDX 5
|
||||
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2 0x42001c
|
||||
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2_BASE_IDX 5
|
||||
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1 0x420187
|
||||
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
|
||||
|
||||
|
||||
// addressBlock: nbio_nbif0_bif_cfg_dev0_rc_bifcfgdecp
|
||||
|
@ -24634,7 +24634,18 @@
|
||||
//PCIE_USB4_LC_CNTL1
|
||||
#define PCIE_USB4_LC_CNTL1__PCIE_USB_ROUTER_CLEAR_PATH_MODE__SHIFT 0x0
|
||||
#define PCIE_USB4_LC_CNTL1__PCIE_USB_ROUTER_CLEAR_PATH_MODE_MASK 0x00000001L
|
||||
|
||||
//BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK 0x00000001L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK 0x00000002L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK 0x00000020L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK 0x00000040L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK 0x00000080L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK 0x00000100L
|
||||
//BIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2__SLV_MEM_LS_EN_MASK 0x00010000L
|
||||
//BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
|
||||
|
||||
// addressBlock: nbio_nbif0_bif_cfg_dev0_rc_bifcfgdecp
|
||||
//BIF_CFG_DEV0_RC0_VENDOR_ID
|
||||
|
@ -761,7 +761,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||||
if (adev->in_suspend && !adev->in_runpm)
|
||||
return -EPERM;
|
||||
|
||||
if (count > 127)
|
||||
if (count > 127 || count == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (*buf == 's')
|
||||
@ -781,7 +781,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(buf_cpy, buf, count+1);
|
||||
memcpy(buf_cpy, buf, count);
|
||||
buf_cpy[count] = 0;
|
||||
|
||||
tmp_str = buf_cpy;
|
||||
|
||||
@ -798,6 +799,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||||
return -EINVAL;
|
||||
parameter_size++;
|
||||
|
||||
if (!tmp_str)
|
||||
break;
|
||||
|
||||
while (isspace(*tmp_str))
|
||||
tmp_str++;
|
||||
}
|
||||
@ -2921,14 +2925,6 @@ static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
|
||||
return sysfs_emit(buf, "%zd\n", val);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%i\n", 0);
|
||||
}
|
||||
|
||||
|
||||
static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf,
|
||||
@ -2965,6 +2961,12 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
|
@ -849,7 +849,8 @@ static int smu_late_init(void *handle)
|
||||
ret = smu_get_asic_power_limits(smu,
|
||||
&smu->current_power_limit,
|
||||
&smu->default_power_limit,
|
||||
&smu->max_power_limit);
|
||||
&smu->max_power_limit,
|
||||
&smu->min_power_limit);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to get asic power limits!\n");
|
||||
return ret;
|
||||
@ -2447,6 +2448,8 @@ int smu_get_power_limit(void *handle,
|
||||
limit_level = SMU_PPT_LIMIT_MAX;
|
||||
break;
|
||||
case PP_PWR_LIMIT_MIN:
|
||||
limit_level = SMU_PPT_LIMIT_MIN;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
break;
|
||||
@ -2466,8 +2469,7 @@ int smu_get_power_limit(void *handle,
|
||||
case IP_VERSION(11, 0, 13):
|
||||
ret = smu_get_asic_power_limits(smu,
|
||||
&smu->current_power_limit,
|
||||
NULL,
|
||||
NULL);
|
||||
NULL, NULL, NULL);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -2480,6 +2482,9 @@ int smu_get_power_limit(void *handle,
|
||||
case SMU_PPT_LIMIT_MAX:
|
||||
*limit = smu->max_power_limit;
|
||||
break;
|
||||
case SMU_PPT_LIMIT_MIN:
|
||||
*limit = smu->min_power_limit;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -2502,10 +2507,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
|
||||
if (smu->ppt_funcs->set_power_limit)
|
||||
return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
|
||||
|
||||
if (limit > smu->max_power_limit) {
|
||||
if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
|
||||
dev_err(smu->adev->dev,
|
||||
"New power limit (%d) is over the max allowed %d\n",
|
||||
limit, smu->max_power_limit);
|
||||
"New power limit (%d) is out of range [%d,%d]\n",
|
||||
limit, smu->min_power_limit, smu->max_power_limit);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -500,6 +500,7 @@ struct smu_context {
|
||||
uint32_t current_power_limit;
|
||||
uint32_t default_power_limit;
|
||||
uint32_t max_power_limit;
|
||||
uint32_t min_power_limit;
|
||||
|
||||
/* soft pptable */
|
||||
uint32_t ppt_offset_bytes;
|
||||
@ -821,9 +822,10 @@ struct pptable_funcs {
|
||||
* @get_power_limit: Get the device's power limits.
|
||||
*/
|
||||
int (*get_power_limit)(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit);
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit,
|
||||
uint32_t *min_power_limit);
|
||||
|
||||
/**
|
||||
* @get_ppt_limit: Get the device's ppt limits.
|
||||
|
@ -1278,14 +1278,15 @@ static int arcturus_get_fan_parameters(struct smu_context *smu)
|
||||
}
|
||||
|
||||
static int arcturus_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit,
|
||||
uint32_t *min_power_limit)
|
||||
{
|
||||
struct smu_11_0_powerplay_table *powerplay_table =
|
||||
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
uint32_t power_limit, od_percent;
|
||||
uint32_t power_limit, od_percent_upper, od_percent_lower;
|
||||
|
||||
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
|
||||
/* the last hope to figure out the ppt limit */
|
||||
@ -1302,17 +1303,25 @@ static int arcturus_get_power_limit(struct smu_context *smu,
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
if (smu->od_enabled) {
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
} else {
|
||||
od_percent_upper = 0;
|
||||
od_percent_lower = 100;
|
||||
}
|
||||
|
||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||
od_percent_upper, od_percent_lower, power_limit);
|
||||
|
||||
if (max_power_limit) {
|
||||
if (smu->od_enabled) {
|
||||
od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
*max_power_limit = power_limit * (100 + od_percent_upper);
|
||||
*max_power_limit /= 100;
|
||||
}
|
||||
|
||||
dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
|
||||
|
||||
power_limit *= (100 + od_percent);
|
||||
power_limit /= 100;
|
||||
}
|
||||
|
||||
*max_power_limit = power_limit;
|
||||
if (min_power_limit) {
|
||||
*min_power_limit = power_limit * (100 - od_percent_lower);
|
||||
*min_power_limit /= 100;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2330,15 +2330,16 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
|
||||
}
|
||||
|
||||
static int navi10_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit,
|
||||
uint32_t *min_power_limit)
|
||||
{
|
||||
struct smu_11_0_powerplay_table *powerplay_table =
|
||||
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
|
||||
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
uint32_t power_limit, od_percent;
|
||||
uint32_t power_limit, od_percent_upper, od_percent_lower;
|
||||
|
||||
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
|
||||
/* the last hope to figure out the ppt limit */
|
||||
@ -2355,18 +2356,26 @@ static int navi10_get_power_limit(struct smu_context *smu,
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
if (max_power_limit) {
|
||||
if (smu->od_enabled &&
|
||||
if (smu->od_enabled &&
|
||||
navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
|
||||
od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
} else {
|
||||
od_percent_upper = 0;
|
||||
od_percent_lower = 100;
|
||||
}
|
||||
|
||||
dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
|
||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||
od_percent_upper, od_percent_lower, power_limit);
|
||||
|
||||
power_limit *= (100 + od_percent);
|
||||
power_limit /= 100;
|
||||
}
|
||||
if (max_power_limit) {
|
||||
*max_power_limit = power_limit * (100 + od_percent_upper);
|
||||
*max_power_limit /= 100;
|
||||
}
|
||||
|
||||
*max_power_limit = power_limit;
|
||||
if (min_power_limit) {
|
||||
*min_power_limit = power_limit * (100 - od_percent_lower);
|
||||
*min_power_limit /= 100;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -620,11 +620,12 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
|
||||
static int sienna_cichlid_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
uint32_t *max_power_limit,
|
||||
uint32_t *min_power_limit)
|
||||
{
|
||||
struct smu_11_0_7_powerplay_table *powerplay_table =
|
||||
(struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
|
||||
uint32_t power_limit, od_percent;
|
||||
uint32_t power_limit, od_percent_upper, od_percent_lower;
|
||||
uint16_t *table_member;
|
||||
|
||||
GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
|
||||
@ -639,21 +640,26 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
if (max_power_limit) {
|
||||
if (smu->od_enabled) {
|
||||
od_percent =
|
||||
le32_to_cpu(powerplay_table->overdrive_table.max[
|
||||
SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
|
||||
dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n",
|
||||
od_percent, power_limit);
|
||||
|
||||
power_limit *= (100 + od_percent);
|
||||
power_limit /= 100;
|
||||
}
|
||||
*max_power_limit = power_limit;
|
||||
if (smu->od_enabled) {
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
} else {
|
||||
od_percent_upper = 0;
|
||||
od_percent_lower = 100;
|
||||
}
|
||||
|
||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||
od_percent_upper, od_percent_lower, power_limit);
|
||||
|
||||
if (max_power_limit) {
|
||||
*max_power_limit = power_limit * (100 + od_percent_upper);
|
||||
*max_power_limit /= 100;
|
||||
}
|
||||
|
||||
if (min_power_limit) {
|
||||
*min_power_limit = power_limit * (100 - od_percent_lower);
|
||||
*min_power_limit /= 100;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -672,7 +678,7 @@ static void sienna_cichlid_get_smartshift_power_percentage(struct smu_context *s
|
||||
uint32_t cur_power_limit;
|
||||
|
||||
if (metrics_v4->ApuSTAPMSmartShiftLimit != 0) {
|
||||
sienna_cichlid_get_power_limit(smu, &cur_power_limit, NULL, NULL);
|
||||
sienna_cichlid_get_power_limit(smu, &cur_power_limit, NULL, NULL, NULL);
|
||||
apu_power_limit = metrics_v4->ApuSTAPMLimit;
|
||||
dgpu_power_limit = cur_power_limit;
|
||||
powerRatio = (((apu_power_limit +
|
||||
|
@ -2314,7 +2314,8 @@ static u32 vangogh_get_gfxoff_status(struct smu_context *smu)
|
||||
static int vangogh_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
uint32_t *max_power_limit,
|
||||
uint32_t *min_power_limit)
|
||||
{
|
||||
struct smu_11_5_power_context *power_context =
|
||||
smu->smu_power.power_context;
|
||||
@ -2336,6 +2337,8 @@ static int vangogh_get_power_limit(struct smu_context *smu,
|
||||
*default_power_limit = ppt_limit / 1000;
|
||||
if (max_power_limit)
|
||||
*max_power_limit = 29;
|
||||
if (min_power_limit)
|
||||
*min_power_limit = 0;
|
||||
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
|
||||
if (ret) {
|
||||
|
@ -1139,9 +1139,10 @@ static int aldebaran_read_sensor(struct smu_context *smu,
|
||||
}
|
||||
|
||||
static int aldebaran_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit,
|
||||
uint32_t *min_power_limit)
|
||||
{
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
uint32_t power_limit = 0;
|
||||
@ -1154,7 +1155,8 @@ static int aldebaran_get_power_limit(struct smu_context *smu,
|
||||
*default_power_limit = 0;
|
||||
if (max_power_limit)
|
||||
*max_power_limit = 0;
|
||||
|
||||
if (min_power_limit)
|
||||
*min_power_limit = 0;
|
||||
dev_warn(smu->adev->dev,
|
||||
"PPT feature is not enabled, power values can't be fetched.");
|
||||
|
||||
@ -1189,6 +1191,9 @@ static int aldebaran_get_power_limit(struct smu_context *smu,
|
||||
*max_power_limit = pptable->PptLimit;
|
||||
}
|
||||
|
||||
if (min_power_limit)
|
||||
*min_power_limit = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2341,16 +2341,17 @@ static int smu_v13_0_0_enable_mgpu_fan_boost(struct smu_context *smu)
|
||||
}
|
||||
|
||||
static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit,
|
||||
uint32_t *min_power_limit)
|
||||
{
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
struct smu_13_0_0_powerplay_table *powerplay_table =
|
||||
(struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
|
||||
PPTable_t *pptable = table_context->driver_pptable;
|
||||
SkuTable_t *skutable = &pptable->SkuTable;
|
||||
uint32_t power_limit, od_percent;
|
||||
uint32_t power_limit, od_percent_upper, od_percent_lower;
|
||||
|
||||
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
|
||||
power_limit = smu->adev->pm.ac_power ?
|
||||
@ -2362,16 +2363,25 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
if (smu->od_enabled) {
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
|
||||
} else {
|
||||
od_percent_upper = 0;
|
||||
od_percent_lower = 100;
|
||||
}
|
||||
|
||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||
od_percent_upper, od_percent_lower, power_limit);
|
||||
|
||||
if (max_power_limit) {
|
||||
if (smu->od_enabled) {
|
||||
od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
|
||||
*max_power_limit = power_limit * (100 + od_percent_upper);
|
||||
*max_power_limit /= 100;
|
||||
}
|
||||
|
||||
dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
|
||||
|
||||
power_limit *= (100 + od_percent);
|
||||
power_limit /= 100;
|
||||
}
|
||||
*max_power_limit = power_limit;
|
||||
if (min_power_limit) {
|
||||
*min_power_limit = power_limit * (100 - od_percent_lower);
|
||||
*min_power_limit /= 100;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2562,7 +2572,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
|
||||
(amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) &&
|
||||
((smu->adev->pm.fw_version == 0x004e6601) ||
|
||||
(smu->adev->pm.fw_version >= 0x004e7300))) {
|
||||
(smu->adev->pm.fw_version >= 0x004e7400))) {
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
PP_SMC_POWER_PROFILE_POWERSAVING);
|
||||
|
@ -1309,9 +1309,10 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
|
||||
}
|
||||
|
||||
static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit,
|
||||
uint32_t *min_power_limit)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct PPTable_t *pptable =
|
||||
@ -1335,6 +1336,8 @@ static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
|
||||
*max_power_limit = pptable->MaxSocketPowerLimit;
|
||||
}
|
||||
|
||||
if (min_power_limit)
|
||||
*min_power_limit = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2035,8 +2038,10 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
|
||||
|
||||
metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
|
||||
ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(metrics);
|
||||
return ret;
|
||||
}
|
||||
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 4);
|
||||
|
||||
@ -2238,16 +2243,24 @@ failed:
|
||||
static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
struct amdgpu_hive_info *hive = NULL;
|
||||
u32 hive_ras_recovery = 0;
|
||||
struct amdgpu_ras *ras;
|
||||
u32 fatal_err, param;
|
||||
int ret = 0;
|
||||
|
||||
hive = amdgpu_get_xgmi_hive(adev);
|
||||
ras = amdgpu_ras_get_context(adev);
|
||||
fatal_err = 0;
|
||||
param = SMU_RESET_MODE_1;
|
||||
|
||||
if (hive) {
|
||||
hive_ras_recovery = atomic_read(&hive->ras_recovery);
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
}
|
||||
|
||||
/* fatal error triggered by ras, PMFW supports the flag */
|
||||
if (ras && atomic_read(&ras->in_recovery))
|
||||
if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
|
||||
fatal_err = 1;
|
||||
|
||||
param |= (fatal_err << 16);
|
||||
@ -2290,7 +2303,7 @@ static int smu_v13_0_6_post_init(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev) && amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
|
||||
if (!amdgpu_sriov_vf(adev) && adev->ras_enabled)
|
||||
return smu_v13_0_6_mca_set_debug_mode(smu, true);
|
||||
|
||||
return 0;
|
||||
|
@ -2304,16 +2304,17 @@ static int smu_v13_0_7_enable_mgpu_fan_boost(struct smu_context *smu)
|
||||
}
|
||||
|
||||
static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit,
|
||||
uint32_t *min_power_limit)
|
||||
{
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
struct smu_13_0_7_powerplay_table *powerplay_table =
|
||||
(struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
|
||||
PPTable_t *pptable = table_context->driver_pptable;
|
||||
SkuTable_t *skutable = &pptable->SkuTable;
|
||||
uint32_t power_limit, od_percent;
|
||||
uint32_t power_limit, od_percent_upper, od_percent_lower;
|
||||
|
||||
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
|
||||
power_limit = smu->adev->pm.ac_power ?
|
||||
@ -2325,16 +2326,25 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
if (smu->od_enabled) {
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
} else {
|
||||
od_percent_upper = 0;
|
||||
od_percent_lower = 100;
|
||||
}
|
||||
|
||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||
od_percent_upper, od_percent_lower, power_limit);
|
||||
|
||||
if (max_power_limit) {
|
||||
if (smu->od_enabled) {
|
||||
od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
*max_power_limit = power_limit * (100 + od_percent_upper);
|
||||
*max_power_limit /= 100;
|
||||
}
|
||||
|
||||
dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
|
||||
|
||||
power_limit *= (100 + od_percent);
|
||||
power_limit /= 100;
|
||||
}
|
||||
*max_power_limit = power_limit;
|
||||
if (min_power_limit) {
|
||||
*min_power_limit = power_limit * (100 - od_percent_lower);
|
||||
*min_power_limit /= 100;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -85,7 +85,7 @@
|
||||
#define smu_i2c_fini(smu) smu_ppt_funcs(i2c_fini, 0, smu)
|
||||
#define smu_get_unique_id(smu) smu_ppt_funcs(get_unique_id, 0, smu)
|
||||
#define smu_log_thermal_throttling(smu) smu_ppt_funcs(log_thermal_throttling_event, 0, smu)
|
||||
#define smu_get_asic_power_limits(smu, current, default, max) smu_ppt_funcs(get_power_limit, 0, smu, current, default, max)
|
||||
#define smu_get_asic_power_limits(smu, current, default, max, min) smu_ppt_funcs(get_power_limit, 0, smu, current, default, max, min)
|
||||
#define smu_get_pp_feature_mask(smu, buf) smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf)
|
||||
#define smu_set_pp_feature_mask(smu, new_mask) smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask)
|
||||
#define smu_gfx_ulv_control(smu, enablement) smu_ppt_funcs(gfx_ulv_control, 0, smu, enablement)
|
||||
|
Loading…
x
Reference in New Issue
Block a user