drm/xe/pm: Toggle d3cold_allowed using vram_usages
Adding support to control d3cold by using vram_usages metric from ttm resource manager. When root port is capable of d3cold but xe has disallowed d3cold due to vram_usages above vram_d3ccold_threshol. It is required to disable d3cold to avoid any resume failure because root port can still transition to d3cold when all of pcie endpoints and {upstream, virtual} switch ports will transition to d3hot. Also cleaning up the TODO code comment. v2: - Modify d3cold.allowed in xe_pm_d3cold_allowed_toggle. [Riana] - Cond changed (total_vram_used_mb < xe->d3cold.vram_threshold) according to doc comment. v3: - Added enum instead of true/false argument in d3cold_toggle(). [Rodrigo] - Removed TODO comment. [Rodrigo] Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com> Reviewed-by: Badal Nilawar <badal.nilawar@intel.com> Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230718080703.239343-5-anshuman.gupta@intel.com Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
b2d756199b
commit
2ef08b9802
@ -25,6 +25,11 @@
|
||||
#include "xe_pm.h"
|
||||
#include "xe_step.h"
|
||||
|
||||
enum toggle_d3cold {
|
||||
D3COLD_DISABLE,
|
||||
D3COLD_ENABLE,
|
||||
};
|
||||
|
||||
struct xe_subplatform_desc {
|
||||
enum xe_subplatform subplatform;
|
||||
const char *name;
|
||||
@ -726,6 +731,28 @@ static int xe_pci_resume(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle)
|
||||
{
|
||||
struct xe_device *xe = pdev_to_xe_device(pdev);
|
||||
struct pci_dev *root_pdev;
|
||||
|
||||
if (!xe->d3cold.capable)
|
||||
return;
|
||||
|
||||
root_pdev = pcie_find_root_port(pdev);
|
||||
if (!root_pdev)
|
||||
return;
|
||||
|
||||
switch (toggle) {
|
||||
case D3COLD_DISABLE:
|
||||
pci_d3cold_disable(root_pdev);
|
||||
break;
|
||||
case D3COLD_ENABLE:
|
||||
pci_d3cold_enable(root_pdev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int xe_pci_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
@ -743,6 +770,7 @@ static int xe_pci_runtime_suspend(struct device *dev)
|
||||
pci_ignore_hotplug(pdev);
|
||||
pci_set_power_state(pdev, PCI_D3cold);
|
||||
} else {
|
||||
d3cold_toggle(pdev, D3COLD_DISABLE);
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
}
|
||||
|
||||
@ -767,6 +795,8 @@ static int xe_pci_runtime_resume(struct device *dev)
|
||||
return err;
|
||||
|
||||
pci_set_master(pdev);
|
||||
} else {
|
||||
d3cold_toggle(pdev, D3COLD_ENABLE);
|
||||
}
|
||||
|
||||
return xe_pm_runtime_resume(xe);
|
||||
@ -780,15 +810,15 @@ static int xe_pci_runtime_idle(struct device *dev)
|
||||
if (!xe->d3cold.capable) {
|
||||
xe->d3cold.allowed = false;
|
||||
} else {
|
||||
xe_pm_d3cold_allowed_toggle(xe);
|
||||
|
||||
/*
|
||||
* TODO: d3cold should be allowed (true) if
|
||||
* (IS_DGFX(xe) && !xe_device_mem_access_ongoing(xe))
|
||||
* but maybe include some other conditions. So, before
|
||||
* we can re-enable the D3cold, we need to:
|
||||
* 1. rewrite the VRAM save / restore to avoid buffer object locks
|
||||
* 2. block D3cold if we have a big amount of device memory in use
|
||||
* in order to reduce the latency.
|
||||
* 3. at resume, detect if we really lost power and avoid memory
|
||||
* 2. at resume, detect if we really lost power and avoid memory
|
||||
* restoration if we were only up to d3cold
|
||||
*/
|
||||
xe->d3cold.allowed = false;
|
||||
|
@ -280,3 +280,28 @@ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
|
||||
{
|
||||
struct ttm_resource_manager *man;
|
||||
u32 total_vram_used_mb = 0;
|
||||
u64 vram_used;
|
||||
int i;
|
||||
|
||||
for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
|
||||
man = ttm_manager_type(&xe->ttm, i);
|
||||
if (man) {
|
||||
vram_used = ttm_resource_manager_usage(man);
|
||||
total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&xe->d3cold.lock);
|
||||
|
||||
if (total_vram_used_mb < xe->d3cold.vram_threshold)
|
||||
xe->d3cold.allowed = true;
|
||||
else
|
||||
xe->d3cold.allowed = false;
|
||||
|
||||
mutex_unlock(&xe->d3cold.lock);
|
||||
}
|
||||
|
@ -25,5 +25,6 @@ bool xe_pm_runtime_resume_if_suspended(struct xe_device *xe);
|
||||
int xe_pm_runtime_get_if_active(struct xe_device *xe);
|
||||
void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
|
||||
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
|
||||
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
|
||||
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user