drm-fixes for 6.3-rc5
- i915 fixes for color mgmt, psr, lmem flush, hibernate oops, and more - amdgpu: dp mst and hibernate regression fix - etnaviv: revert fdinfo support (incl drm/sched revert), leak fix - misc ivpu fixes, nouveau backlight, drm buddy allocator 32bit fixes -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEb4nG6jLu8Y5XI+PfTA9ye/CYqnEFAmQl5pYACgkQTA9ye/CY qnHYXRAAqMRpJz+EmQAKZ2Y1dDNmRTVe+2Rd+DdVWD3WulTkBAA6uZzV/xFnlcwT oglXaXyjMUzCJhMzZ5D9gUG1kusneqeKqkPrz6p2jBovYZzJuEGoQuv4Y1cnl80x BSJMbwVUyU6Wdo7W2SR1OYcNwKLeO43wv3v9pMUlV0OzIGc1YTdKhTmCqAql6KtJ X6j2pbekjMeldxBH7PiFKPWAfUb7tDChH5kUwPntLmuc6DvRp/7bgon3Hz5gcOUK 5WPT+vh3u6Nh7oSINYnec2HayTkOWwFF0IIISwdNs7qfafSh4mebOMnnv4OnleZE uvAJv5ArGy0eV9PbA20pft1+ErMvGTEi4H+zDe7PaI1UbVpqkeNiQ3Rz6FnYu+Bs U14PZ/Llhes/tqtmd6iAZ5pg8F03GdtZMgCHSNYIs0zXALUGwu1uPQIUU/lklVlB C+ZA1BcoVaJ2sDJr+QdAtkXkpC97Pxtw0ny5CQTCOuiZgOm+7Bbuvyes4ld1fKAu pRfjmxnptPq1/QgevEY7523MkzbOGsVsGtq8DkZASCOSUVLuu2XddppMigH2f9cW WQjomKCdfVFhLMy/f74dCUp0gYSaoBAPm4DSJtivkyBtiTRyEwdHhFiXXoEpbZuJ QR/GF+kVVmtX06WnPg8TgwF/1ZPXPs8Z0rqGx5gqSG53zhG2j3o= =nO9/ -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2023-03-30' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Daniel Vetter: "Two regression fixes in here, otherwise just the usual stuff: - i915 fixes for color mgmt, psr, lmem flush, hibernate oops, and more - amdgpu: dp mst and hibernate regression fix - etnaviv: revert fdinfo support (incl drm/sched revert), leak fix - misc ivpu fixes, nouveau backlight, drm buddy allocator 32bit fixes" * tag 'drm-fixes-2023-03-30' of git://anongit.freedesktop.org/drm/drm: (27 commits) Revert "drm/scheduler: track GPU active time per entity" Revert "drm/etnaviv: export client GPU usage statistics via fdinfo" drm/etnaviv: fix reference leak when mmaping imported buffer drm/amdgpu: allow more APUs to do mode2 reset when go to S4 drm/amd/display: Take FEC Overhead into Timeslot Calculation drm/amd/display: Add DSC Support for Synaptics Cascaded MST Hub drm: test: Fix 32-bit issue in drm_buddy_test drm: buddy_allocator: Fix buddy allocator init on 32-bit systems drm/nouveau/kms: Fix backlight registration drm/i915/perf: Drop wakeref on GuC RC error drm/i915/dpt: Treat the DPT BO as a framebuffer drm/i915/gem: Flush lmem contents after construction drm/i915/tc: Fix the ICL PHY ownership check in TC-cold state drm/i915: Disable DC states for all commits drm/i915: Workaround ICL CSC_MODE sticky arming drm/i915: Add a .color_post_update() hook drm/i915: Move CSC load back into .color_commit_arm() when PSR is enabled on skl/glk drm/i915: Split icl_color_commit_noarm() from skl_color_commit_noarm() drm/i915/pmu: Use functions common with sysfs to read actual freq accel/ivpu: Fix IPC buffer header status field value ...
This commit is contained in:
commit
0d3ff8087b
@ -8,7 +8,6 @@
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_accel.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_ioctl.h>
|
||||
@ -118,6 +117,10 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
|
||||
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
|
||||
struct drm_ivpu_param *args = data;
|
||||
int ret = 0;
|
||||
int idx;
|
||||
|
||||
if (!drm_dev_enter(dev, &idx))
|
||||
return -ENODEV;
|
||||
|
||||
switch (args->param) {
|
||||
case DRM_IVPU_PARAM_DEVICE_ID:
|
||||
@ -171,6 +174,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
|
||||
break;
|
||||
}
|
||||
|
||||
drm_dev_exit(idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -470,8 +474,8 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
|
||||
|
||||
vdev->hw->ops = &ivpu_hw_mtl_ops;
|
||||
vdev->platform = IVPU_PLATFORM_INVALID;
|
||||
vdev->context_xa_limit.min = IVPU_GLOBAL_CONTEXT_MMU_SSID + 1;
|
||||
vdev->context_xa_limit.max = IVPU_CONTEXT_LIMIT;
|
||||
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
|
||||
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
|
||||
atomic64_set(&vdev->unique_id_counter, 0);
|
||||
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
|
||||
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
|
||||
@ -565,6 +569,8 @@ err_mmu_gctx_fini:
|
||||
ivpu_mmu_global_context_fini(vdev);
|
||||
err_power_down:
|
||||
ivpu_hw_power_down(vdev);
|
||||
if (IVPU_WA(d3hot_after_power_off))
|
||||
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
|
||||
err_xa_destroy:
|
||||
xa_destroy(&vdev->submitted_jobs_xa);
|
||||
xa_destroy(&vdev->context_xa);
|
||||
@ -575,7 +581,11 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_pm_disable(vdev);
|
||||
ivpu_shutdown(vdev);
|
||||
if (IVPU_WA(d3hot_after_power_off))
|
||||
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
|
||||
ivpu_job_done_thread_fini(vdev);
|
||||
ivpu_pm_cancel_recovery(vdev);
|
||||
|
||||
ivpu_ipc_fini(vdev);
|
||||
ivpu_fw_fini(vdev);
|
||||
ivpu_mmu_global_context_fini(vdev);
|
||||
@ -622,7 +632,7 @@ static void ivpu_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct ivpu_device *vdev = pci_get_drvdata(pdev);
|
||||
|
||||
drm_dev_unregister(&vdev->drm);
|
||||
drm_dev_unplug(&vdev->drm);
|
||||
ivpu_dev_fini(vdev);
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
#define __IVPU_DRV_H__
|
||||
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_mm.h>
|
||||
#include <drm/drm_print.h>
|
||||
@ -24,7 +25,10 @@
|
||||
#define PCI_DEVICE_ID_MTL 0x7d1d
|
||||
|
||||
#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
|
||||
#define IVPU_CONTEXT_LIMIT 64
|
||||
/* SSID 1 is used by the VPU to represent invalid context */
|
||||
#define IVPU_USER_CONTEXT_MIN_SSID 2
|
||||
#define IVPU_USER_CONTEXT_MAX_SSID (IVPU_USER_CONTEXT_MIN_SSID + 63)
|
||||
|
||||
#define IVPU_NUM_ENGINES 2
|
||||
|
||||
#define IVPU_PLATFORM_SILICON 0
|
||||
@ -70,6 +74,7 @@
|
||||
struct ivpu_wa_table {
|
||||
bool punit_disabled;
|
||||
bool clear_runtime_mem;
|
||||
bool d3hot_after_power_off;
|
||||
};
|
||||
|
||||
struct ivpu_hw_info;
|
||||
|
@ -12,24 +12,23 @@
|
||||
#include "ivpu_mmu.h"
|
||||
#include "ivpu_pm.h"
|
||||
|
||||
#define TILE_FUSE_ENABLE_BOTH 0x0
|
||||
#define TILE_FUSE_ENABLE_UPPER 0x1
|
||||
#define TILE_FUSE_ENABLE_LOWER 0x2
|
||||
|
||||
#define TILE_SKU_BOTH_MTL 0x3630
|
||||
#define TILE_SKU_LOWER_MTL 0x3631
|
||||
#define TILE_SKU_UPPER_MTL 0x3632
|
||||
#define TILE_FUSE_ENABLE_BOTH 0x0
|
||||
#define TILE_SKU_BOTH_MTL 0x3630
|
||||
|
||||
/* Work point configuration values */
|
||||
#define WP_CONFIG_1_TILE_5_3_RATIO 0x0101
|
||||
#define WP_CONFIG_1_TILE_4_3_RATIO 0x0102
|
||||
#define WP_CONFIG_2_TILE_5_3_RATIO 0x0201
|
||||
#define WP_CONFIG_2_TILE_4_3_RATIO 0x0202
|
||||
#define WP_CONFIG_0_TILE_PLL_OFF 0x0000
|
||||
#define CONFIG_1_TILE 0x01
|
||||
#define CONFIG_2_TILE 0x02
|
||||
#define PLL_RATIO_5_3 0x01
|
||||
#define PLL_RATIO_4_3 0x02
|
||||
#define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
|
||||
#define WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3)
|
||||
#define WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3)
|
||||
#define WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3)
|
||||
#define WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3)
|
||||
#define WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
|
||||
|
||||
#define PLL_REF_CLK_FREQ (50 * 1000000)
|
||||
#define PLL_SIMULATION_FREQ (10 * 1000000)
|
||||
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
|
||||
#define PLL_DEFAULT_EPP_VALUE 0x80
|
||||
|
||||
#define TIM_SAFE_ENABLE 0xf1d0dead
|
||||
@ -101,6 +100,7 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
|
||||
{
|
||||
vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
|
||||
vdev->wa.clear_runtime_mem = false;
|
||||
vdev->wa.d3hot_after_power_off = true;
|
||||
}
|
||||
|
||||
static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
|
||||
@ -218,7 +218,8 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
|
||||
config = 0;
|
||||
}
|
||||
|
||||
ivpu_dbg(vdev, PM, "PLL workpoint request: %d Hz\n", PLL_RATIO_TO_FREQ(target_ratio));
|
||||
ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n",
|
||||
config, target_ratio);
|
||||
|
||||
ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config);
|
||||
if (ret) {
|
||||
@ -403,11 +404,6 @@ static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
|
||||
return ivpu_boot_host_ss_axi_drive(vdev, true);
|
||||
}
|
||||
|
||||
static int ivpu_boot_host_ss_axi_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_boot_host_ss_axi_drive(vdev, false);
|
||||
}
|
||||
|
||||
static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
|
||||
{
|
||||
int ret;
|
||||
@ -441,11 +437,6 @@ static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
|
||||
return ivpu_boot_host_ss_top_noc_drive(vdev, true);
|
||||
}
|
||||
|
||||
static int ivpu_boot_host_ss_top_noc_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_boot_host_ss_top_noc_drive(vdev, false);
|
||||
}
|
||||
|
||||
static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
|
||||
{
|
||||
u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
|
||||
@ -504,16 +495,6 @@ static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
|
||||
REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val);
|
||||
}
|
||||
|
||||
static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_boot_dpu_active_drive(vdev, false);
|
||||
ivpu_boot_pwr_island_isolation_drive(vdev, true);
|
||||
ivpu_boot_pwr_island_trickle_drive(vdev, false);
|
||||
ivpu_boot_pwr_island_drive(vdev, false);
|
||||
|
||||
return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
|
||||
}
|
||||
|
||||
static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
@ -629,34 +610,10 @@ static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
|
||||
static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_hw_info *hw = vdev->hw;
|
||||
u32 tile_fuse;
|
||||
|
||||
tile_fuse = REGB_RD32(MTL_BUTTRESS_TILE_FUSE);
|
||||
if (!REG_TEST_FLD(MTL_BUTTRESS_TILE_FUSE, VALID, tile_fuse))
|
||||
ivpu_warn(vdev, "Tile Fuse: Invalid (0x%x)\n", tile_fuse);
|
||||
|
||||
hw->tile_fuse = REG_GET_FLD(MTL_BUTTRESS_TILE_FUSE, SKU, tile_fuse);
|
||||
switch (hw->tile_fuse) {
|
||||
case TILE_FUSE_ENABLE_LOWER:
|
||||
hw->sku = TILE_SKU_LOWER_MTL;
|
||||
hw->config = WP_CONFIG_1_TILE_5_3_RATIO;
|
||||
ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Lower\n");
|
||||
break;
|
||||
case TILE_FUSE_ENABLE_UPPER:
|
||||
hw->sku = TILE_SKU_UPPER_MTL;
|
||||
hw->config = WP_CONFIG_1_TILE_4_3_RATIO;
|
||||
ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Upper\n");
|
||||
break;
|
||||
case TILE_FUSE_ENABLE_BOTH:
|
||||
hw->sku = TILE_SKU_BOTH_MTL;
|
||||
hw->config = WP_CONFIG_2_TILE_5_3_RATIO;
|
||||
ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Both\n");
|
||||
break;
|
||||
default:
|
||||
hw->config = WP_CONFIG_0_TILE_PLL_OFF;
|
||||
ivpu_dbg(vdev, MISC, "Tile Fuse: Disable\n");
|
||||
break;
|
||||
}
|
||||
hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
|
||||
hw->sku = TILE_SKU_BOTH_MTL;
|
||||
hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
|
||||
|
||||
ivpu_pll_init_frequency_ratios(vdev);
|
||||
|
||||
@ -797,21 +754,8 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* FPGA requires manual clearing of IP_Reset bit by enabling quiescent state */
|
||||
if (ivpu_is_fpga(vdev)) {
|
||||
if (ivpu_boot_host_ss_top_noc_disable(vdev)) {
|
||||
ivpu_err(vdev, "Failed to disable TOP NOC\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
if (ivpu_boot_host_ss_axi_disable(vdev)) {
|
||||
ivpu_err(vdev, "Failed to disable AXI\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
if (ivpu_boot_pwr_domain_disable(vdev)) {
|
||||
ivpu_err(vdev, "Failed to disable power domain\n");
|
||||
if (ivpu_hw_mtl_reset(vdev)) {
|
||||
ivpu_err(vdev, "Failed to reset the VPU\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
@ -844,6 +788,19 @@ static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
|
||||
REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
|
||||
}
|
||||
|
||||
static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
|
||||
{
|
||||
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
|
||||
u32 cpu_clock;
|
||||
|
||||
if ((config & 0xff) == PLL_RATIO_4_3)
|
||||
cpu_clock = pll_clock * 2 / 4;
|
||||
else
|
||||
cpu_clock = pll_clock * 2 / 5;
|
||||
|
||||
return cpu_clock;
|
||||
}
|
||||
|
||||
/* Register indirect accesses */
|
||||
static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
|
||||
{
|
||||
@ -855,7 +812,7 @@ static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
|
||||
if (!ivpu_is_silicon(vdev))
|
||||
return PLL_SIMULATION_FREQ;
|
||||
|
||||
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
|
||||
return ivpu_hw_mtl_pll_to_freq(pll_curr_ratio, vdev->hw->config);
|
||||
}
|
||||
|
||||
static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev)
|
||||
|
@ -21,7 +21,7 @@ struct ivpu_bo;
|
||||
#define IVPU_IPC_ALIGNMENT 64
|
||||
|
||||
#define IVPU_IPC_HDR_FREE 0
|
||||
#define IVPU_IPC_HDR_ALLOCATED 0
|
||||
#define IVPU_IPC_HDR_ALLOCATED 1
|
||||
|
||||
/**
|
||||
* struct ivpu_ipc_hdr - The IPC message header structure, exchanged
|
||||
|
@ -489,12 +489,12 @@ unlock_reservations:
|
||||
|
||||
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ivpu_file_priv *file_priv = file->driver_priv;
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
struct drm_ivpu_submit *params = data;
|
||||
struct ivpu_job *job;
|
||||
u32 *buf_handles;
|
||||
int idx, ret;
|
||||
|
||||
if (params->engine > DRM_IVPU_ENGINE_COPY)
|
||||
return -EINVAL;
|
||||
@ -523,6 +523,11 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
goto free_handles;
|
||||
}
|
||||
|
||||
if (!drm_dev_enter(&vdev->drm, &idx)) {
|
||||
ret = -ENODEV;
|
||||
goto free_handles;
|
||||
}
|
||||
|
||||
ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
|
||||
file_priv->ctx.id, params->buffer_count);
|
||||
|
||||
@ -530,7 +535,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
if (!job) {
|
||||
ivpu_err(vdev, "Failed to create job\n");
|
||||
ret = -ENOMEM;
|
||||
goto free_handles;
|
||||
goto dev_exit;
|
||||
}
|
||||
|
||||
ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
|
||||
@ -548,6 +553,8 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
|
||||
job_put:
|
||||
job_put(job);
|
||||
dev_exit:
|
||||
drm_dev_exit(idx);
|
||||
free_handles:
|
||||
kfree(buf_handles);
|
||||
|
||||
|
@ -98,12 +98,18 @@ retry:
|
||||
static void ivpu_pm_recovery_work(struct work_struct *work)
|
||||
{
|
||||
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
|
||||
struct ivpu_device *vdev = pm->vdev;
|
||||
struct ivpu_device *vdev = pm->vdev;
|
||||
char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
|
||||
int ret;
|
||||
|
||||
ret = pci_reset_function(to_pci_dev(vdev->drm.dev));
|
||||
if (ret)
|
||||
retry:
|
||||
ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
|
||||
if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
|
||||
cond_resched();
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (ret && ret != -EAGAIN)
|
||||
ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
|
||||
|
||||
kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
|
||||
@ -306,6 +312,11 @@ int ivpu_pm_init(struct ivpu_device *vdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
|
||||
{
|
||||
cancel_work_sync(&vdev->pm->recovery_work);
|
||||
}
|
||||
|
||||
void ivpu_pm_enable(struct ivpu_device *vdev)
|
||||
{
|
||||
struct device *dev = vdev->drm.dev;
|
||||
|
@ -21,6 +21,7 @@ struct ivpu_pm_info {
|
||||
int ivpu_pm_init(struct ivpu_device *vdev);
|
||||
void ivpu_pm_enable(struct ivpu_device *vdev);
|
||||
void ivpu_pm_disable(struct ivpu_device *vdev);
|
||||
void ivpu_pm_cancel_recovery(struct ivpu_device *vdev);
|
||||
|
||||
int ivpu_pm_suspend_cb(struct device *dev);
|
||||
int ivpu_pm_resume_cb(struct device *dev);
|
||||
|
@ -981,7 +981,12 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
|
||||
*/
|
||||
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
if ((adev->flags & AMD_IS_APU) &&
|
||||
adev->gfx.imu.funcs) /* Not need to do mode2 reset for IMU enabled APUs */
|
||||
return false;
|
||||
|
||||
if ((adev->flags & AMD_IS_APU) &&
|
||||
amdgpu_acpi_is_s3_active(adev))
|
||||
return false;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
|
@ -212,6 +212,21 @@ bool needs_dsc_aux_workaround(struct dc_link *link)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
|
||||
{
|
||||
u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
|
||||
|
||||
if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
|
||||
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
|
||||
IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
|
||||
DRM_INFO("Synaptics Cascaded MST hub\n");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct dc_sink *dc_sink = aconnector->dc_sink;
|
||||
@ -235,6 +250,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
|
||||
needs_dsc_aux_workaround(aconnector->dc_link))
|
||||
aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
|
||||
|
||||
/* synaptics cascaded MST hub case */
|
||||
if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
|
||||
aconnector->dsc_aux = port->mgr->aux;
|
||||
|
||||
if (!aconnector->dsc_aux)
|
||||
return false;
|
||||
|
||||
@ -662,12 +681,25 @@ struct dsc_mst_fairness_params {
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
};
|
||||
|
||||
static int kbps_to_peak_pbn(int kbps)
|
||||
static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
|
||||
{
|
||||
u8 link_coding_cap;
|
||||
uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
|
||||
|
||||
link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
|
||||
if (link_coding_cap == DP_128b_132b_ENCODING)
|
||||
fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
|
||||
|
||||
return fec_overhead_multiplier_x1000;
|
||||
}
|
||||
|
||||
static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
|
||||
{
|
||||
u64 peak_kbps = kbps;
|
||||
|
||||
peak_kbps *= 1006;
|
||||
peak_kbps = div_u64(peak_kbps, 1000);
|
||||
peak_kbps *= fec_overhead_multiplier_x1000;
|
||||
peak_kbps = div_u64(peak_kbps, 1000 * 1000);
|
||||
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
|
||||
}
|
||||
|
||||
@ -761,11 +793,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
|
||||
int link_timeslots_used;
|
||||
int fair_pbn_alloc;
|
||||
int ret = 0;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i + k].dsc_enabled) {
|
||||
initial_slack[i] =
|
||||
kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
|
||||
kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
|
||||
bpp_increased[i] = false;
|
||||
remaining_to_increase += 1;
|
||||
} else {
|
||||
@ -861,6 +894,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
int next_index;
|
||||
int remaining_to_try = 0;
|
||||
int ret;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i + k].dsc_enabled
|
||||
@ -890,7 +924,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
if (next_index == -1)
|
||||
break;
|
||||
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
@ -903,7 +937,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
vars[next_index].dsc_enabled = false;
|
||||
vars[next_index].bpp_x16 = 0;
|
||||
} else {
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
@ -932,6 +966,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
int count = 0;
|
||||
int i, k, ret;
|
||||
bool debugfs_overwrite = false;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
|
||||
memset(params, 0, sizeof(params));
|
||||
|
||||
@ -993,7 +1028,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
/* Try no compression */
|
||||
for (i = 0; i < count; i++) {
|
||||
vars[i + k].aconnector = params[i].aconnector;
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
|
||||
@ -1012,7 +1047,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
/* Try max compression */
|
||||
for (i = 0; i < count; i++) {
|
||||
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].dsc_enabled = true;
|
||||
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
@ -1020,7 +1055,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
|
@ -34,6 +34,21 @@
|
||||
#define SYNAPTICS_RC_OFFSET 0x4BC
|
||||
#define SYNAPTICS_RC_DATA 0x4C0
|
||||
|
||||
#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
|
||||
|
||||
/**
|
||||
* Panamera MST Hub detection
|
||||
* Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case
|
||||
* Check from beginning of branch device vendor specific field (050Ch)
|
||||
*/
|
||||
#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0)
|
||||
#define BRANCH_HW_REVISION_PANAMERA_A2 0x10
|
||||
#define SYNAPTICS_CASCADED_HUB_ID 0x5A
|
||||
#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
|
||||
|
||||
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
|
||||
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
|
||||
|
||||
struct amdgpu_display_manager;
|
||||
struct amdgpu_dm_connector;
|
||||
|
||||
|
@ -146,8 +146,8 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
|
||||
unsigned int order;
|
||||
u64 root_size;
|
||||
|
||||
root_size = rounddown_pow_of_two(size);
|
||||
order = ilog2(root_size) - ilog2(chunk_size);
|
||||
order = ilog2(size) - ilog2(chunk_size);
|
||||
root_size = chunk_size << order;
|
||||
|
||||
root = drm_block_alloc(mm, NULL, order, offset);
|
||||
if (!root)
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include "etnaviv_gem.h"
|
||||
#include "etnaviv_mmu.h"
|
||||
#include "etnaviv_perfmon.h"
|
||||
#include "common.xml.h"
|
||||
|
||||
/*
|
||||
* DRM operations:
|
||||
@ -476,47 +475,7 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
|
||||
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
static void etnaviv_fop_show_fdinfo(struct seq_file *m, struct file *f)
|
||||
{
|
||||
struct drm_file *file = f->private_data;
|
||||
struct drm_device *dev = file->minor->dev;
|
||||
struct etnaviv_drm_private *priv = dev->dev_private;
|
||||
struct etnaviv_file_private *ctx = file->driver_priv;
|
||||
|
||||
/*
|
||||
* For a description of the text output format used here, see
|
||||
* Documentation/gpu/drm-usage-stats.rst.
|
||||
*/
|
||||
seq_printf(m, "drm-driver:\t%s\n", dev->driver->name);
|
||||
seq_printf(m, "drm-client-id:\t%u\n", ctx->id);
|
||||
|
||||
for (int i = 0; i < ETNA_MAX_PIPES; i++) {
|
||||
struct etnaviv_gpu *gpu = priv->gpu[i];
|
||||
char engine[10] = "UNK";
|
||||
int cur = 0;
|
||||
|
||||
if (!gpu)
|
||||
continue;
|
||||
|
||||
if (gpu->identity.features & chipFeatures_PIPE_2D)
|
||||
cur = snprintf(engine, sizeof(engine), "2D");
|
||||
if (gpu->identity.features & chipFeatures_PIPE_3D)
|
||||
cur = snprintf(engine + cur, sizeof(engine) - cur,
|
||||
"%s3D", cur ? "/" : "");
|
||||
if (gpu->identity.nn_core_count > 0)
|
||||
cur = snprintf(engine + cur, sizeof(engine) - cur,
|
||||
"%sNN", cur ? "/" : "");
|
||||
|
||||
seq_printf(m, "drm-engine-%s:\t%llu ns\n", engine,
|
||||
ctx->sched_entity[i].elapsed_ns);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct file_operations fops = {
|
||||
.owner = THIS_MODULE,
|
||||
DRM_GEM_FOPS,
|
||||
.show_fdinfo = etnaviv_fop_show_fdinfo,
|
||||
};
|
||||
DEFINE_DRM_GEM_FOPS(fops);
|
||||
|
||||
static const struct drm_driver etnaviv_drm_driver = {
|
||||
.driver_features = DRIVER_GEM | DRIVER_RENDER,
|
||||
|
@ -91,7 +91,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
|
||||
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
|
||||
int ret;
|
||||
|
||||
ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
|
||||
if (!ret) {
|
||||
/* Drop the reference acquired by drm_gem_mmap_obj(). */
|
||||
drm_gem_object_put(&etnaviv_obj->base);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
|
||||
|
@ -46,6 +46,11 @@ struct intel_color_funcs {
|
||||
* registers involved with the same commit.
|
||||
*/
|
||||
void (*color_commit_arm)(const struct intel_crtc_state *crtc_state);
|
||||
/*
|
||||
* Perform any extra tasks needed after all the
|
||||
* double buffered registers have been latched.
|
||||
*/
|
||||
void (*color_post_update)(const struct intel_crtc_state *crtc_state);
|
||||
/*
|
||||
* Load LUTs (and other single buffered color management
|
||||
* registers). Will (hopefully) be called during the vblank
|
||||
@ -614,9 +619,33 @@ static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
|
||||
|
||||
static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
/*
|
||||
* Despite Wa_1406463849, ICL no longer suffers from the SKL
|
||||
* DC5/PSR CSC black screen issue (see skl_color_commit_noarm()).
|
||||
* Possibly due to the extra sticky CSC arming
|
||||
* (see icl_color_post_update()).
|
||||
*
|
||||
* On TGL+ all CSC arming issues have been properly fixed.
|
||||
*/
|
||||
icl_load_csc_matrix(crtc_state);
|
||||
}
|
||||
|
||||
static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
/*
|
||||
* Possibly related to display WA #1184, SKL CSC loses the latched
|
||||
* CSC coeff/offset register values if the CSC registers are disarmed
|
||||
* between DC5 exit and PSR exit. This will cause the plane(s) to
|
||||
* output all black (until CSC_MODE is rearmed and properly latched).
|
||||
* Once PSR exit (and proper register latching) has occurred the
|
||||
* danger is over. Thus when PSR is enabled the CSC coeff/offset
|
||||
* register programming will be peformed from skl_color_commit_arm()
|
||||
* which is called after PSR exit.
|
||||
*/
|
||||
if (!crtc_state->has_psr)
|
||||
ilk_load_csc_matrix(crtc_state);
|
||||
}
|
||||
|
||||
static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
ilk_load_csc_matrix(crtc_state);
|
||||
@ -659,6 +688,9 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 val = 0;
|
||||
|
||||
if (crtc_state->has_psr)
|
||||
ilk_load_csc_matrix(crtc_state);
|
||||
|
||||
/*
|
||||
* We don't (yet) allow userspace to control the pipe background color,
|
||||
* so force it to black, but apply pipe gamma and CSC appropriately
|
||||
@ -677,6 +709,47 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
|
||||
crtc_state->csc_mode);
|
||||
}
|
||||
|
||||
static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
/*
|
||||
* We don't (yet) allow userspace to control the pipe background color,
|
||||
* so force it to black.
|
||||
*/
|
||||
intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
|
||||
|
||||
intel_de_write(i915, GAMMA_MODE(crtc->pipe),
|
||||
crtc_state->gamma_mode);
|
||||
|
||||
intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
|
||||
crtc_state->csc_mode);
|
||||
}
|
||||
|
||||
static void icl_color_post_update(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
|
||||
/*
|
||||
* Despite Wa_1406463849, ICL CSC is no longer disarmed by
|
||||
* coeff/offset register *writes*. Instead, once CSC_MODE
|
||||
* is armed it stays armed, even after it has been latched.
|
||||
* Afterwards the coeff/offset registers become effectively
|
||||
* self-arming. That self-arming must be disabled before the
|
||||
* next icl_color_commit_noarm() tries to write the next set
|
||||
* of coeff/offset registers. Fortunately register *reads*
|
||||
* do still disarm the CSC. Naturally this must not be done
|
||||
* until the previously written CSC registers have actually
|
||||
* been latched.
|
||||
*
|
||||
* TGL+ no longer need this workaround.
|
||||
*/
|
||||
intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe));
|
||||
}
|
||||
|
||||
static struct drm_property_blob *
|
||||
create_linear_lut(struct drm_i915_private *i915, int lut_size)
|
||||
{
|
||||
@ -1373,6 +1446,14 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
|
||||
i915->display.funcs.color->color_commit_arm(crtc_state);
|
||||
}
|
||||
|
||||
void intel_color_post_update(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
|
||||
|
||||
if (i915->display.funcs.color->color_post_update)
|
||||
i915->display.funcs.color->color_post_update(crtc_state);
|
||||
}
|
||||
|
||||
void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
@ -3064,10 +3145,20 @@ static const struct intel_color_funcs i9xx_color_funcs = {
|
||||
.lut_equal = i9xx_lut_equal,
|
||||
};
|
||||
|
||||
static const struct intel_color_funcs tgl_color_funcs = {
|
||||
.color_check = icl_color_check,
|
||||
.color_commit_noarm = icl_color_commit_noarm,
|
||||
.color_commit_arm = icl_color_commit_arm,
|
||||
.load_luts = icl_load_luts,
|
||||
.read_luts = icl_read_luts,
|
||||
.lut_equal = icl_lut_equal,
|
||||
};
|
||||
|
||||
static const struct intel_color_funcs icl_color_funcs = {
|
||||
.color_check = icl_color_check,
|
||||
.color_commit_noarm = icl_color_commit_noarm,
|
||||
.color_commit_arm = skl_color_commit_arm,
|
||||
.color_commit_arm = icl_color_commit_arm,
|
||||
.color_post_update = icl_color_post_update,
|
||||
.load_luts = icl_load_luts,
|
||||
.read_luts = icl_read_luts,
|
||||
.lut_equal = icl_lut_equal,
|
||||
@ -3075,7 +3166,7 @@ static const struct intel_color_funcs icl_color_funcs = {
|
||||
|
||||
static const struct intel_color_funcs glk_color_funcs = {
|
||||
.color_check = glk_color_check,
|
||||
.color_commit_noarm = ilk_color_commit_noarm,
|
||||
.color_commit_noarm = skl_color_commit_noarm,
|
||||
.color_commit_arm = skl_color_commit_arm,
|
||||
.load_luts = glk_load_luts,
|
||||
.read_luts = glk_read_luts,
|
||||
@ -3084,7 +3175,7 @@ static const struct intel_color_funcs glk_color_funcs = {
|
||||
|
||||
static const struct intel_color_funcs skl_color_funcs = {
|
||||
.color_check = ivb_color_check,
|
||||
.color_commit_noarm = ilk_color_commit_noarm,
|
||||
.color_commit_noarm = skl_color_commit_noarm,
|
||||
.color_commit_arm = skl_color_commit_arm,
|
||||
.load_luts = bdw_load_luts,
|
||||
.read_luts = bdw_read_luts,
|
||||
@ -3180,7 +3271,9 @@ void intel_color_init_hooks(struct drm_i915_private *i915)
|
||||
else
|
||||
i915->display.funcs.color = &i9xx_color_funcs;
|
||||
} else {
|
||||
if (DISPLAY_VER(i915) >= 11)
|
||||
if (DISPLAY_VER(i915) >= 12)
|
||||
i915->display.funcs.color = &tgl_color_funcs;
|
||||
else if (DISPLAY_VER(i915) == 11)
|
||||
i915->display.funcs.color = &icl_color_funcs;
|
||||
else if (DISPLAY_VER(i915) == 10)
|
||||
i915->display.funcs.color = &glk_color_funcs;
|
||||
|
@ -21,6 +21,7 @@ void intel_color_prepare_commit(struct intel_crtc_state *crtc_state);
|
||||
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
|
||||
void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
|
||||
void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
|
||||
void intel_color_post_update(const struct intel_crtc_state *crtc_state);
|
||||
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
|
||||
void intel_color_get_config(struct intel_crtc_state *crtc_state);
|
||||
bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
|
||||
|
@ -1209,6 +1209,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
|
||||
if (needs_cursorclk_wa(old_crtc_state) &&
|
||||
!needs_cursorclk_wa(new_crtc_state))
|
||||
icl_wa_cursorclkgating(dev_priv, pipe, false);
|
||||
|
||||
if (intel_crtc_needs_color_update(new_crtc_state))
|
||||
intel_color_post_update(new_crtc_state);
|
||||
}
|
||||
|
||||
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
|
||||
@ -7091,6 +7094,8 @@ static void intel_update_crtc(struct intel_atomic_state *state,
|
||||
|
||||
intel_fbc_update(state, crtc);
|
||||
|
||||
drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
|
||||
|
||||
if (!modeset &&
|
||||
intel_crtc_needs_color_update(new_crtc_state))
|
||||
intel_color_commit_noarm(new_crtc_state);
|
||||
@ -7458,8 +7463,28 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
||||
drm_atomic_helper_wait_for_dependencies(&state->base);
|
||||
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
|
||||
|
||||
if (state->modeset)
|
||||
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
|
||||
/*
|
||||
* During full modesets we write a lot of registers, wait
|
||||
* for PLLs, etc. Doing that while DC states are enabled
|
||||
* is not a good idea.
|
||||
*
|
||||
* During fastsets and other updates we also need to
|
||||
* disable DC states due to the following scenario:
|
||||
* 1. DC5 exit and PSR exit happen
|
||||
* 2. Some or all _noarm() registers are written
|
||||
* 3. Due to some long delay PSR is re-entered
|
||||
* 4. DC5 entry -> DMC saves the already written new
|
||||
* _noarm() registers and the old not yet written
|
||||
* _arm() registers
|
||||
* 5. DC5 exit -> DMC restores a mixture of old and
|
||||
* new register values and arms the update
|
||||
* 6. PSR exit -> hardware latches a mixture of old and
|
||||
* new register values -> corrupted frame, or worse
|
||||
* 7. New _arm() registers are finally written
|
||||
* 8. Hardware finally latches a complete set of new
|
||||
* register values, and subsequent frames will be OK again
|
||||
*/
|
||||
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
|
||||
|
||||
intel_atomic_prepare_plane_clear_colors(state);
|
||||
|
||||
@ -7608,8 +7633,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
||||
* the culprit.
|
||||
*/
|
||||
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
|
||||
}
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
|
||||
|
||||
/*
|
||||
|
@ -301,6 +301,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
|
||||
vm->pte_encode = gen8_ggtt_pte_encode;
|
||||
|
||||
dpt->obj = dpt_obj;
|
||||
dpt->obj->is_dpt = true;
|
||||
|
||||
return &dpt->vm;
|
||||
}
|
||||
@ -309,5 +310,6 @@ void intel_dpt_destroy(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
|
||||
|
||||
dpt->obj->is_dpt = false;
|
||||
i915_vm_put(&dpt->vm);
|
||||
}
|
||||
|
@ -418,9 +418,9 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
|
||||
val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
|
||||
if (val == 0xffffffff) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Port %s: PHY in TCCOLD, assume safe mode\n",
|
||||
"Port %s: PHY in TCCOLD, assume not owned\n",
|
||||
dig_port->tc_port_name);
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
|
||||
|
@ -127,7 +127,8 @@ i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
|
||||
|
||||
memcpy(map, data, size);
|
||||
|
||||
i915_gem_object_unpin_map(obj);
|
||||
i915_gem_object_flush_map(obj);
|
||||
__i915_gem_object_release_map(obj);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
@ -303,7 +303,7 @@ i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
|
||||
static inline bool
|
||||
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return READ_ONCE(obj->frontbuffer);
|
||||
return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
|
@ -491,6 +491,9 @@ struct drm_i915_gem_object {
|
||||
*/
|
||||
unsigned int cache_dirty:1;
|
||||
|
||||
/* @is_dpt: Object houses a display page table (DPT) */
|
||||
unsigned int is_dpt:1;
|
||||
|
||||
/**
|
||||
* @read_domains: Read memory domains.
|
||||
*
|
||||
|
@ -2075,16 +2075,6 @@ void intel_rps_sanitize(struct intel_rps *rps)
|
||||
rps_disable_interrupts(rps);
|
||||
}
|
||||
|
||||
u32 intel_rps_read_rpstat_fw(struct intel_rps *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
i915_reg_t rpstat;
|
||||
|
||||
rpstat = (GRAPHICS_VER(i915) >= 12) ? GEN12_RPSTAT1 : GEN6_RPSTAT1;
|
||||
|
||||
return intel_uncore_read_fw(rps_to_gt(rps)->uncore, rpstat);
|
||||
}
|
||||
|
||||
u32 intel_rps_read_rpstat(struct intel_rps *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
@ -2095,7 +2085,7 @@ u32 intel_rps_read_rpstat(struct intel_rps *rps)
|
||||
return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat);
|
||||
}
|
||||
|
||||
u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
|
||||
static u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
u32 cagf;
|
||||
@ -2118,10 +2108,11 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
|
||||
return cagf;
|
||||
}
|
||||
|
||||
static u32 read_cagf(struct intel_rps *rps)
|
||||
static u32 __read_cagf(struct intel_rps *rps, bool take_fw)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
struct intel_uncore *uncore = rps_to_uncore(rps);
|
||||
i915_reg_t r = INVALID_MMIO_REG;
|
||||
u32 freq;
|
||||
|
||||
/*
|
||||
@ -2129,22 +2120,30 @@ static u32 read_cagf(struct intel_rps *rps)
|
||||
* registers will return 0 freq when GT is in RC6
|
||||
*/
|
||||
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
|
||||
freq = intel_uncore_read(uncore, MTL_MIRROR_TARGET_WP1);
|
||||
r = MTL_MIRROR_TARGET_WP1;
|
||||
} else if (GRAPHICS_VER(i915) >= 12) {
|
||||
freq = intel_uncore_read(uncore, GEN12_RPSTAT1);
|
||||
r = GEN12_RPSTAT1;
|
||||
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
|
||||
vlv_punit_get(i915);
|
||||
freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
|
||||
vlv_punit_put(i915);
|
||||
} else if (GRAPHICS_VER(i915) >= 6) {
|
||||
freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
|
||||
r = GEN6_RPSTAT1;
|
||||
} else {
|
||||
freq = intel_uncore_read(uncore, MEMSTAT_ILK);
|
||||
r = MEMSTAT_ILK;
|
||||
}
|
||||
|
||||
if (i915_mmio_reg_valid(r))
|
||||
freq = take_fw ? intel_uncore_read(uncore, r) : intel_uncore_read_fw(uncore, r);
|
||||
|
||||
return intel_rps_get_cagf(rps, freq);
|
||||
}
|
||||
|
||||
static u32 read_cagf(struct intel_rps *rps)
|
||||
{
|
||||
return __read_cagf(rps, true);
|
||||
}
|
||||
|
||||
u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
|
||||
{
|
||||
struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
|
||||
@ -2157,7 +2156,12 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
|
||||
return freq;
|
||||
}
|
||||
|
||||
u32 intel_rps_read_punit_req(struct intel_rps *rps)
|
||||
u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps)
|
||||
{
|
||||
return intel_gpu_freq(rps, __read_cagf(rps, false));
|
||||
}
|
||||
|
||||
static u32 intel_rps_read_punit_req(struct intel_rps *rps)
|
||||
{
|
||||
struct intel_uncore *uncore = rps_to_uncore(rps);
|
||||
struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
|
||||
|
@ -37,8 +37,8 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
|
||||
|
||||
int intel_gpu_freq(struct intel_rps *rps, int val);
|
||||
int intel_freq_opcode(struct intel_rps *rps, int val);
|
||||
u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
|
||||
u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
|
||||
u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps);
|
||||
u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
|
||||
u32 intel_rps_get_min_frequency(struct intel_rps *rps);
|
||||
u32 intel_rps_get_min_raw_freq(struct intel_rps *rps);
|
||||
@ -49,10 +49,8 @@ int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
|
||||
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
|
||||
u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
|
||||
u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
|
||||
u32 intel_rps_read_punit_req(struct intel_rps *rps);
|
||||
u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
|
||||
u32 intel_rps_read_rpstat(struct intel_rps *rps);
|
||||
u32 intel_rps_read_rpstat_fw(struct intel_rps *rps);
|
||||
void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps);
|
||||
void intel_rps_raise_unslice(struct intel_rps *rps);
|
||||
void intel_rps_lower_unslice(struct intel_rps *rps);
|
||||
|
@ -1592,9 +1592,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
|
||||
/*
|
||||
* Wa_16011777198:dg2: Unset the override of GUCRC mode to enable rc6.
|
||||
*/
|
||||
if (intel_uc_uses_guc_rc(>->uc) &&
|
||||
(IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
|
||||
IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)))
|
||||
if (stream->override_gucrc)
|
||||
drm_WARN_ON(>->i915->drm,
|
||||
intel_guc_slpc_unset_gucrc_mode(>->uc.guc.slpc));
|
||||
|
||||
@ -3305,8 +3303,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
|
||||
if (ret) {
|
||||
drm_dbg(&stream->perf->i915->drm,
|
||||
"Unable to override gucrc mode\n");
|
||||
goto err_config;
|
||||
goto err_gucrc;
|
||||
}
|
||||
|
||||
stream->override_gucrc = true;
|
||||
}
|
||||
|
||||
ret = alloc_oa_buffer(stream);
|
||||
@ -3345,11 +3345,15 @@ err_enable:
|
||||
free_oa_buffer(stream);
|
||||
|
||||
err_oa_buf_alloc:
|
||||
free_oa_configs(stream);
|
||||
if (stream->override_gucrc)
|
||||
intel_guc_slpc_unset_gucrc_mode(>->uc.guc.slpc);
|
||||
|
||||
err_gucrc:
|
||||
intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
|
||||
intel_engine_pm_put(stream->engine);
|
||||
|
||||
free_oa_configs(stream);
|
||||
|
||||
err_config:
|
||||
free_noa_wait(stream);
|
||||
|
||||
|
@ -316,6 +316,12 @@ struct i915_perf_stream {
|
||||
* buffer should be checked for available data.
|
||||
*/
|
||||
u64 poll_oa_period;
|
||||
|
||||
/**
|
||||
* @override_gucrc: GuC RC has been overridden for the perf stream,
|
||||
* and we need to restore the default configuration on release.
|
||||
*/
|
||||
bool override_gucrc;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -393,14 +393,12 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
|
||||
* case we assume the system is running at the intended
|
||||
* frequency. Fortunately, the read should rarely fail!
|
||||
*/
|
||||
val = intel_rps_read_rpstat_fw(rps);
|
||||
if (val)
|
||||
val = intel_rps_get_cagf(rps, val);
|
||||
else
|
||||
val = rps->cur_freq;
|
||||
val = intel_rps_read_actual_frequency_fw(rps);
|
||||
if (!val)
|
||||
val = intel_gpu_freq(rps, rps->cur_freq);
|
||||
|
||||
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
|
||||
intel_gpu_freq(rps, val), period_ns / 1000);
|
||||
val, period_ns / 1000);
|
||||
}
|
||||
|
||||
if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <linux/apple-gmux.h>
|
||||
#include <linux/backlight.h>
|
||||
#include <linux/idr.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_reg.h"
|
||||
@ -299,8 +300,12 @@ nv50_backlight_init(struct nouveau_backlight *bl,
|
||||
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
|
||||
/*
|
||||
* Note when this runs the connectors have not been probed yet,
|
||||
* so nv_conn->base.status is not set yet.
|
||||
*/
|
||||
if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)) ||
|
||||
nv_conn->base.status != connector_status_connected)
|
||||
drm_helper_probe_detect(&nv_conn->base, NULL, false) != connector_status_connected)
|
||||
return -ENODEV;
|
||||
|
||||
if (nv_conn->type == DCB_CONNECTOR_eDP) {
|
||||
|
@ -906,12 +906,6 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
|
||||
|
||||
spin_unlock(&sched->job_list_lock);
|
||||
|
||||
if (job) {
|
||||
job->entity->elapsed_ns += ktime_to_ns(
|
||||
ktime_sub(job->s_fence->finished.timestamp,
|
||||
job->s_fence->scheduled.timestamp));
|
||||
}
|
||||
|
||||
return job;
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,8 @@ static int check_block(struct kunit *test, struct drm_buddy *mm,
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(block_size)) {
|
||||
/* We can't use is_power_of_2() for a u64 on 32-bit systems. */
|
||||
if (block_size & (block_size - 1)) {
|
||||
kunit_err(test, "block size not power of two\n");
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
@ -228,13 +228,6 @@ struct drm_sched_entity {
|
||||
*/
|
||||
struct rb_node rb_tree_node;
|
||||
|
||||
/**
|
||||
* @elapsed_ns:
|
||||
*
|
||||
* Records the amount of time where jobs from this entity were active
|
||||
* on the GPU.
|
||||
*/
|
||||
uint64_t elapsed_ns;
|
||||
};
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user