drm fixes for 6.8-rc5
crtc: - fix uninit variable prime: - support > 4GB page arrays buddy: - fix error handling in allocations i915: - fix blankscreen on JSL chromebooks - stable fix to limit DP sst link rates xe: - Fix an out-of-bounds shift. - Fix the display code thinking xe uses shmem - Fix a warning about index out-of-bound - Fix a clang-16 compilation warning amdgpu: - PSR fixes - Suspend/resume fixes - Link training fix - Aspect ratio fix - DCN 3.5 fixes - VCN 4.x fix - GFX 11 fix - Misc display fixes - Misc small fixes amdkfd: - Cache size reporting fix - SIMD distribution fix msm: - GPU: - dmabuf vmap fix - a610 UBWC corruption fix (incorrect hbb) - revert a commit that was making GPU recovery unreliable - tlb invalidation fix ivpu: - suspend/resume fix nouveau: - fix scheduler cleanup path - fix pointless scheduler creation - fix kvalloc argument order rockchip: - vop2 locking fix -----BEGIN PGP SIGNATURE----- iQIyBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmXPDF0ACgkQDHTzWXnE hr5Q4Q/3eeZ8ghAf37SccoPt94DTMngTHjSujLW7SVXMda9w3tYGqDV0SpygnLrH +85oD96qdiN/QB1B4bfmX/qtXpcv10+09oEsPFvFo4lV+on1ThAlSyoSK68pXcNB nbvECXWs4phUq+c3ogi9iqEjmJ20lwbyBaqHmfhKvgA52XRc4WKdrlhkuZ9Hgwck GksrLNqnOE2zsunLmxdmg8NCSkWbnWP0LcazS4yC+ivqaqc6u9im0mzKR3BoxKPn vCQhxTK+WOL3akSyQAp8/O84V9tSzTAplVZYbUmRyZ5i00ITLv/HLQ7a/kfuSjlT Es26ChH/8LloMD1bcijso7VQCwoNI4VBsLtvQtLaQWTBz862Vgjgg2GOL0bpLPj8 Oi6BixQ28RSI8Skm5pIleg/wFOhgwVZAPOgnHFr8/lmbdv/rIuQLq4bKCji+rBo3 ESlcbRwG1zt4pKd15r+rOZJkWMJLrzL+IYbYD+VbTEo4bv8+d0jRCEVqHHSPkxaO DN5A/u2R+k25eLg/hn3CgYg43ej0gbZ1pQvw8Dztjq6MwoHEeHolAQ6yAV1XMaYw JO4zb/X2nQUaRAOE2iCZ3W9Oa6t7nWoguj40926I0xr+qMhWbH8N6iBICqrwCrYX cdvCxyZq7upro7EN0LPOlqDGbyQrz1OxS4nppCpuX5Q6hBr9HA== =g9gc -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2024-02-16' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Regular weekly fixes, nothing too major, mostly amdgpu, then i915, xe, msm and nouveau with some scattered bits elsewhere. crtc: - fix uninit variable prime: - support > 4GB page arrays buddy: - fix error handling in allocations i915: - fix blankscreen on JSL chromebooks - stable fix to limit DP sst link rates xe: - Fix an out-of-bounds shift. - Fix the display code thinking xe uses shmem - Fix a warning about index out-of-bound - Fix a clang-16 compilation warning amdgpu: - PSR fixes - Suspend/resume fixes - Link training fix - Aspect ratio fix - DCN 3.5 fixes - VCN 4.x fix - GFX 11 fix - Misc display fixes - Misc small fixes amdkfd: - Cache size reporting fix - SIMD distribution fix msm: - GPU: - dmabuf vmap fix - a610 UBWC corruption fix (incorrect hbb) - revert a commit that was making GPU recovery unreliable - tlb invalidation fix ivpu: - suspend/resume fix nouveau: - fix scheduler cleanup path - fix pointless scheduler creation - fix kvalloc argument order rockchip: - vop2 locking fix" * tag 'drm-fixes-2024-02-16' of git://anongit.freedesktop.org/drm/drm: (38 commits) drm/amdgpu: Fix implicit assumtion in gfx11 debug flags drm/amdkfd: update SIMD distribution algo for GFXIP 9.4.2 onwards drm/amd/display: Increase ips2_eval delay for DCN35 drm/amdgpu/display: Initialize gamma correction mode variable in dcn30_get_gamcor_current() drm/amdgpu/soc21: update VCN 4 max HEVC encoding resolution drm/amd/display: fixed integer types and null check locations drm/amd/display: Fix array-index-out-of-bounds in dcn35_clkmgr drm/amd/display: Preserve original aspect ratio in create stream drm/amd/display: Fix possible NULL dereference on device remove/driver unload Revert "drm/amd/display: increased min_dcfclk_mhz and min_fclk_mhz" drm/amd/display: Add align done check Revert "drm/amd: flush any delayed gfxoff on suspend entry" drm/amd: Stop evicting resources on APUs in suspend drm/amd/display: Fix possible buffer overflow in 'find_dcfclk_for_voltage()' drm/amd/display: Fix possible use of uninitialized 'max_chunks_fbc_mode' in 'calculate_bandwidth()' drm/amd/display: Initialize 'wait_time_microsec' variable in link_dp_training_dpia.c drm/amd/display: Fix && vs || typos drm/amdkfd: Fix L2 cache size reporting in GFX9.4.3 drm/amdgpu: make damage clips support configurable drm/msm: Wire up tlb ops ...
This commit is contained in:
commit
ca6a62f9fe
@ -510,16 +510,6 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_boot_dpu_active_drive(vdev, false);
|
||||
ivpu_boot_pwr_island_isolation_drive(vdev, true);
|
||||
ivpu_boot_pwr_island_trickle_drive(vdev, false);
|
||||
ivpu_boot_pwr_island_drive(vdev, false);
|
||||
|
||||
return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
|
||||
}
|
||||
|
||||
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
|
||||
{
|
||||
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
|
||||
@ -616,12 +606,37 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
if (IVPU_WA(punit_disabled))
|
||||
return 0;
|
||||
|
||||
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
|
||||
val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
|
||||
REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
|
||||
|
||||
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (ivpu_boot_pwr_domain_disable(vdev)) {
|
||||
ivpu_err(vdev, "Failed to disable power domain\n");
|
||||
if (ivpu_hw_37xx_ip_reset(vdev)) {
|
||||
ivpu_err(vdev, "Failed to reset NPU\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
@ -661,6 +676,11 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* PLL requests may fail when powering down, so issue WP 0 here */
|
||||
ret = ivpu_pll_disable(vdev);
|
||||
if (ret)
|
||||
ivpu_warn(vdev, "Failed to disable PLL: %d\n", ret);
|
||||
|
||||
ret = ivpu_hw_37xx_d0i3_disable(vdev);
|
||||
if (ret)
|
||||
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
|
||||
|
@ -58,11 +58,14 @@ static int ivpu_suspend(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
|
||||
pci_save_state(to_pci_dev(vdev->drm.dev));
|
||||
|
||||
ret = ivpu_shutdown(vdev);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -71,6 +74,9 @@ static int ivpu_resume(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
|
||||
pci_restore_state(to_pci_dev(vdev->drm.dev));
|
||||
|
||||
retry:
|
||||
ret = ivpu_hw_power_up(vdev);
|
||||
if (ret) {
|
||||
@ -120,15 +126,20 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
|
||||
|
||||
ivpu_fw_log_dump(vdev);
|
||||
|
||||
retry:
|
||||
ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
|
||||
if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
|
||||
cond_resched();
|
||||
goto retry;
|
||||
}
|
||||
atomic_inc(&vdev->pm->reset_counter);
|
||||
atomic_set(&vdev->pm->reset_pending, 1);
|
||||
down_write(&vdev->pm->reset_lock);
|
||||
|
||||
if (ret && ret != -EAGAIN)
|
||||
ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
|
||||
ivpu_suspend(vdev);
|
||||
ivpu_pm_prepare_cold_boot(vdev);
|
||||
ivpu_jobs_abort_all(vdev);
|
||||
|
||||
ret = ivpu_resume(vdev);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
|
||||
|
||||
up_write(&vdev->pm->reset_lock);
|
||||
atomic_set(&vdev->pm->reset_pending, 0);
|
||||
|
||||
kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
|
||||
pm_runtime_mark_last_busy(vdev->drm.dev);
|
||||
@ -200,9 +211,6 @@ int ivpu_pm_suspend_cb(struct device *dev)
|
||||
ivpu_suspend(vdev);
|
||||
ivpu_pm_prepare_warm_boot(vdev);
|
||||
|
||||
pci_save_state(to_pci_dev(dev));
|
||||
pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
|
||||
|
||||
ivpu_dbg(vdev, PM, "Suspend done.\n");
|
||||
|
||||
return 0;
|
||||
@ -216,9 +224,6 @@ int ivpu_pm_resume_cb(struct device *dev)
|
||||
|
||||
ivpu_dbg(vdev, PM, "Resume..\n");
|
||||
|
||||
pci_set_power_state(to_pci_dev(dev), PCI_D0);
|
||||
pci_restore_state(to_pci_dev(dev));
|
||||
|
||||
ret = ivpu_resume(vdev);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to resume: %d\n", ret);
|
||||
|
@ -200,6 +200,7 @@ extern uint amdgpu_dc_debug_mask;
|
||||
extern uint amdgpu_dc_visual_confirm;
|
||||
extern uint amdgpu_dm_abm_level;
|
||||
extern int amdgpu_backlight;
|
||||
extern int amdgpu_damage_clips;
|
||||
extern struct amdgpu_mgpu_info mgpu_info;
|
||||
extern int amdgpu_ras_enable;
|
||||
extern uint amdgpu_ras_mask;
|
||||
@ -1549,9 +1550,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
|
||||
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
|
||||
void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
|
||||
#else
|
||||
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
|
||||
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
|
||||
static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
|
@ -1519,4 +1519,19 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
|
||||
#endif /* CONFIG_AMD_PMC */
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_choose_low_power_state
|
||||
*
|
||||
* @adev: amdgpu_device_pointer
|
||||
*
|
||||
* Choose the target low power state for the GPU
|
||||
*/
|
||||
void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_acpi_is_s0ix_active(adev))
|
||||
adev->in_s0ix = true;
|
||||
else if (amdgpu_acpi_is_s3_active(adev))
|
||||
adev->in_s3 = true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
@ -4514,13 +4514,15 @@ int amdgpu_device_prepare(struct drm_device *dev)
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int i, r;
|
||||
|
||||
amdgpu_choose_low_power_state(adev);
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
/* Evict the majority of BOs before starting suspend sequence */
|
||||
r = amdgpu_device_evict_resources(adev);
|
||||
if (r)
|
||||
return r;
|
||||
goto unprepare;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
@ -4529,10 +4531,15 @@ int amdgpu_device_prepare(struct drm_device *dev)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
|
||||
if (r)
|
||||
return r;
|
||||
goto unprepare;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unprepare:
|
||||
adev->in_s0ix = adev->in_s3 = false;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4569,7 +4576,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
||||
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
|
||||
|
||||
cancel_delayed_work_sync(&adev->delayed_init_work);
|
||||
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
|
||||
|
||||
amdgpu_ras_suspend(adev);
|
||||
|
||||
|
@ -211,6 +211,7 @@ int amdgpu_seamless = -1; /* auto */
|
||||
uint amdgpu_debug_mask;
|
||||
int amdgpu_agp = -1; /* auto */
|
||||
int amdgpu_wbrf = -1;
|
||||
int amdgpu_damage_clips = -1; /* auto */
|
||||
|
||||
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
|
||||
|
||||
@ -859,6 +860,18 @@ int amdgpu_backlight = -1;
|
||||
MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
|
||||
module_param_named(backlight, amdgpu_backlight, bint, 0444);
|
||||
|
||||
/**
|
||||
* DOC: damageclips (int)
|
||||
* Enable or disable damage clips support. If damage clips support is disabled,
|
||||
* we will force full frame updates, irrespective of what user space sends to
|
||||
* us.
|
||||
*
|
||||
* Defaults to -1 (where it is enabled unless a PSR-SU display is detected).
|
||||
*/
|
||||
MODULE_PARM_DESC(damageclips,
|
||||
"Damage clips support (0 = disable, 1 = enable, -1 auto (default))");
|
||||
module_param_named(damageclips, amdgpu_damage_clips, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: tmz (int)
|
||||
* Trusted Memory Zone (TMZ) is a method to protect data being written
|
||||
|
@ -723,8 +723,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
|
||||
|
||||
if (adev->gfx.gfx_off_req_count == 0 &&
|
||||
!adev->gfx.gfx_off_state) {
|
||||
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
|
||||
/* If going to s2idle, no need to wait */
|
||||
if (adev->in_s0ix) {
|
||||
if (!amdgpu_dpm_set_powergating_by_smu(adev,
|
||||
AMD_IP_BLOCK_TYPE_GFX, true))
|
||||
adev->gfx.gfx_off_state = true;
|
||||
} else {
|
||||
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
|
||||
delay);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (adev->gfx.gfx_off_req_count == 0) {
|
||||
|
@ -50,13 +50,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs;
|
||||
/* SOC21 */
|
||||
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = {
|
||||
|
@ -55,8 +55,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
|
||||
m = get_mqd(mqd);
|
||||
|
||||
if (has_wa_flag) {
|
||||
uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ?
|
||||
0xffff : 0xffffffff;
|
||||
uint32_t wa_mask =
|
||||
(minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff;
|
||||
|
||||
m->compute_static_thread_mgmt_se0 = wa_mask;
|
||||
m->compute_static_thread_mgmt_se1 = wa_mask;
|
||||
|
@ -303,6 +303,15 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
|
||||
update_cu_mask(mm, mqd, minfo, 0);
|
||||
set_priority(m, q);
|
||||
|
||||
if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) {
|
||||
if (minfo->update_flag & UPDATE_FLAG_IS_GWS)
|
||||
m->compute_resource_limits |=
|
||||
COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
|
||||
else
|
||||
m->compute_resource_limits &=
|
||||
~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
|
||||
}
|
||||
|
||||
q->is_active = QUEUE_IS_ACTIVE(*q);
|
||||
}
|
||||
|
||||
|
@ -532,6 +532,7 @@ struct queue_properties {
|
||||
enum mqd_update_flag {
|
||||
UPDATE_FLAG_DBG_WA_ENABLE = 1,
|
||||
UPDATE_FLAG_DBG_WA_DISABLE = 2,
|
||||
UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */
|
||||
};
|
||||
|
||||
struct mqd_update_info {
|
||||
|
@ -95,6 +95,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
|
||||
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
|
||||
void *gws)
|
||||
{
|
||||
struct mqd_update_info minfo = {0};
|
||||
struct kfd_node *dev = NULL;
|
||||
struct process_queue_node *pqn;
|
||||
struct kfd_process_device *pdd;
|
||||
@ -146,9 +147,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
|
||||
}
|
||||
|
||||
pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
|
||||
minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0;
|
||||
|
||||
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
|
||||
pqn->q, NULL);
|
||||
pqn->q, &minfo);
|
||||
}
|
||||
|
||||
void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
|
||||
|
@ -1638,12 +1638,10 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
|
||||
else
|
||||
mode = UNKNOWN_MEMORY_PARTITION_MODE;
|
||||
|
||||
if (pcache->cache_level == 2)
|
||||
pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc;
|
||||
else if (mode)
|
||||
pcache->cache_size = pcache_info[cache_type].cache_size / mode;
|
||||
else
|
||||
pcache->cache_size = pcache_info[cache_type].cache_size;
|
||||
pcache->cache_size = pcache_info[cache_type].cache_size;
|
||||
/* Partition mode only affects L3 cache size */
|
||||
if (mode && pcache->cache_level == 3)
|
||||
pcache->cache_size /= mode;
|
||||
|
||||
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
|
||||
pcache->cache_type |= HSA_CACHE_TYPE_DATA;
|
||||
|
@ -1956,7 +1956,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
|
||||
&adev->dm.dmub_bo_gpu_addr,
|
||||
&adev->dm.dmub_bo_cpu_addr);
|
||||
|
||||
if (adev->dm.hpd_rx_offload_wq) {
|
||||
if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
|
||||
for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
|
||||
if (adev->dm.hpd_rx_offload_wq[i].wq) {
|
||||
destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
|
||||
@ -5219,6 +5219,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_plane_state,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct dc_flip_addrs *flip_addrs,
|
||||
bool is_psr_su,
|
||||
bool *dirty_regions_changed)
|
||||
{
|
||||
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
|
||||
@ -5243,6 +5244,10 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
|
||||
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
|
||||
clips = drm_plane_get_damage_clips(new_plane_state);
|
||||
|
||||
if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 &&
|
||||
is_psr_su)))
|
||||
goto ffu;
|
||||
|
||||
if (!dm_crtc_state->mpo_requested) {
|
||||
if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
|
||||
goto ffu;
|
||||
@ -6194,7 +6199,9 @@ create_stream_for_sink(struct drm_connector *connector,
|
||||
if (recalculate_timing) {
|
||||
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
|
||||
drm_mode_copy(&saved_mode, &mode);
|
||||
saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
|
||||
drm_mode_copy(&mode, freesync_mode);
|
||||
mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
|
||||
} else {
|
||||
decide_crtc_timing_for_drm_display_mode(
|
||||
&mode, preferred_mode, scale);
|
||||
@ -8298,6 +8305,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
fill_dc_dirty_rects(plane, old_plane_state,
|
||||
new_plane_state, new_crtc_state,
|
||||
&bundle->flip_addrs[planes_count],
|
||||
acrtc_state->stream->link->psr_settings.psr_version ==
|
||||
DC_PSR_VERSION_SU_1,
|
||||
&dirty_rects_changed);
|
||||
|
||||
/*
|
||||
|
@ -94,7 +94,7 @@ static void calculate_bandwidth(
|
||||
const uint32_t s_high = 7;
|
||||
const uint32_t dmif_chunk_buff_margin = 1;
|
||||
|
||||
uint32_t max_chunks_fbc_mode;
|
||||
uint32_t max_chunks_fbc_mode = 0;
|
||||
int32_t num_cursor_lines;
|
||||
|
||||
int32_t i, j, k;
|
||||
|
@ -1850,19 +1850,21 @@ static enum bp_result get_firmware_info_v3_2(
|
||||
/* Vega12 */
|
||||
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
|
||||
DATA_TABLES(smu_info));
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
|
||||
if (!smu_info_v3_2)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
|
||||
|
||||
info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
|
||||
} else if (revision.minor == 3) {
|
||||
/* Vega20 */
|
||||
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
|
||||
DATA_TABLES(smu_info));
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
|
||||
if (!smu_info_v3_3)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
|
||||
|
||||
info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
|
||||
}
|
||||
|
||||
@ -2422,10 +2424,11 @@ static enum bp_result get_integrated_info_v11(
|
||||
info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
|
||||
DATA_TABLES(integratedsysteminfo));
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
|
||||
if (info_v11 == NULL)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
|
||||
|
||||
info->gpu_cap_info =
|
||||
le32_to_cpu(info_v11->gpucapinfo);
|
||||
/*
|
||||
@ -2637,11 +2640,12 @@ static enum bp_result get_integrated_info_v2_1(
|
||||
|
||||
info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
|
||||
DATA_TABLES(integratedsysteminfo));
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
|
||||
|
||||
if (info_v2_1 == NULL)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
|
||||
|
||||
info->gpu_cap_info =
|
||||
le32_to_cpu(info_v2_1->gpucapinfo);
|
||||
/*
|
||||
@ -2799,11 +2803,11 @@ static enum bp_result get_integrated_info_v2_2(
|
||||
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
|
||||
DATA_TABLES(integratedsysteminfo));
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
|
||||
|
||||
if (info_v2_2 == NULL)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
|
||||
|
||||
info->gpu_cap_info =
|
||||
le32_to_cpu(info_v2_2->gpucapinfo);
|
||||
/*
|
||||
|
@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta
|
||||
int i;
|
||||
|
||||
for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) {
|
||||
if (i >= VG_NUM_DCFCLK_DPM_LEVELS)
|
||||
break;
|
||||
if (clock_table->SocVoltage[i] == voltage)
|
||||
return clock_table->DcfClocks[i];
|
||||
}
|
||||
|
@ -655,10 +655,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
|
||||
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
|
||||
uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
|
||||
uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
|
||||
uint32_t num_memps, num_fclk, num_dcfclk;
|
||||
int i;
|
||||
|
||||
/* Determine min/max p-state values. */
|
||||
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
|
||||
num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS :
|
||||
clock_table->NumMemPstatesEnabled;
|
||||
for (i = 0; i < num_memps; i++) {
|
||||
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
|
||||
|
||||
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
|
||||
@ -670,7 +673,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
|
||||
min_dram_speed_mts = max_dram_speed_mts;
|
||||
min_pstate = max_pstate;
|
||||
|
||||
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
|
||||
for (i = 0; i < num_memps; i++) {
|
||||
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
|
||||
|
||||
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
|
||||
@ -699,9 +702,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
|
||||
/* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
|
||||
ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
|
||||
|
||||
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled);
|
||||
num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS :
|
||||
clock_table->NumFclkLevelsEnabled;
|
||||
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
|
||||
|
||||
for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
|
||||
num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
|
||||
clock_table->NumDcfClkLevelsEnabled;
|
||||
for (i = 0; i < num_dcfclk; i++) {
|
||||
int j;
|
||||
|
||||
/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
|
||||
|
@ -56,16 +56,13 @@ static void dpp3_enable_cm_block(
|
||||
|
||||
static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
|
||||
{
|
||||
enum dc_lut_mode mode;
|
||||
enum dc_lut_mode mode = LUT_BYPASS;
|
||||
uint32_t state_mode;
|
||||
uint32_t lut_mode;
|
||||
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
|
||||
|
||||
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode);
|
||||
|
||||
if (state_mode == 0)
|
||||
mode = LUT_BYPASS;
|
||||
|
||||
if (state_mode == 2) {//Programmable RAM LUT
|
||||
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode);
|
||||
if (lut_mode == 0)
|
||||
|
@ -2760,7 +2760,7 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
|
||||
struct _vcs_dpi_voltage_scaling_st entry = {0};
|
||||
struct clk_limit_table_entry max_clk_data = {0};
|
||||
|
||||
unsigned int min_dcfclk_mhz = 399, min_fclk_mhz = 599;
|
||||
unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
|
||||
|
||||
static const unsigned int num_dcfclk_stas = 5;
|
||||
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};
|
||||
|
@ -211,7 +211,7 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
|
||||
struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
|
||||
uint32_t otg_inst;
|
||||
|
||||
if (!abm && !tg && !panel_cntl)
|
||||
if (!abm || !tg || !panel_cntl)
|
||||
return;
|
||||
|
||||
otg_inst = tg->inst;
|
||||
@ -245,7 +245,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
|
||||
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
|
||||
uint32_t otg_inst;
|
||||
|
||||
if (!abm && !tg && !panel_cntl)
|
||||
if (!abm || !tg || !panel_cntl)
|
||||
return false;
|
||||
|
||||
otg_inst = tg->inst;
|
||||
|
@ -361,7 +361,7 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
|
||||
struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
|
||||
int num_dpias = 0;
|
||||
|
||||
for (uint8_t i = 0; i < num_streams; ++i) {
|
||||
for (unsigned int i = 0; i < num_streams; ++i) {
|
||||
if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
|
||||
/* new dpia sst stream, check whether it exceeds max dpia */
|
||||
if (num_dpias >= MAX_DPIA_NUM)
|
||||
|
@ -517,6 +517,7 @@ enum link_training_result dp_check_link_loss_status(
|
||||
{
|
||||
enum link_training_result status = LINK_TRAINING_SUCCESS;
|
||||
union lane_status lane_status;
|
||||
union lane_align_status_updated dpcd_lane_status_updated;
|
||||
uint8_t dpcd_buf[6] = {0};
|
||||
uint32_t lane;
|
||||
|
||||
@ -532,10 +533,12 @@ enum link_training_result dp_check_link_loss_status(
|
||||
* check lanes status
|
||||
*/
|
||||
lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane);
|
||||
dpcd_lane_status_updated.raw = dpcd_buf[4];
|
||||
|
||||
if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
|
||||
!lane_status.bits.CR_DONE_0 ||
|
||||
!lane_status.bits.SYMBOL_LOCKED_0) {
|
||||
!lane_status.bits.SYMBOL_LOCKED_0 ||
|
||||
!dp_is_interlane_aligned(dpcd_lane_status_updated)) {
|
||||
/* if one of the channel equalization, clock
|
||||
* recovery or symbol lock is dropped
|
||||
* consider it as (link has been
|
||||
|
@ -619,7 +619,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
|
||||
uint32_t retries_eq = 0;
|
||||
enum dc_status status;
|
||||
enum dc_dp_training_pattern tr_pattern;
|
||||
uint32_t wait_time_microsec;
|
||||
uint32_t wait_time_microsec = 0;
|
||||
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
|
||||
union lane_align_status_updated dpcd_lane_status_updated = {0};
|
||||
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
|
||||
|
@ -780,7 +780,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_z10 = false,
|
||||
.ignore_pg = true,
|
||||
.psp_disabled_wa = true,
|
||||
.ips2_eval_delay_us = 1650,
|
||||
.ips2_eval_delay_us = 2000,
|
||||
.ips2_entry_delay_us = 800,
|
||||
.static_screen_wait_frames = 2,
|
||||
};
|
||||
|
@ -539,6 +539,12 @@ static int __alloc_range(struct drm_buddy *mm,
|
||||
} while (1);
|
||||
|
||||
list_splice_tail(&allocated, blocks);
|
||||
|
||||
if (total_allocated < size) {
|
||||
err = -ENOSPC;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_undo:
|
||||
|
@ -904,6 +904,7 @@ out:
|
||||
connector_set = NULL;
|
||||
fb = NULL;
|
||||
mode = NULL;
|
||||
num_connectors = 0;
|
||||
|
||||
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
|
||||
|
||||
|
@ -820,7 +820,7 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
|
||||
if (max_segment == 0)
|
||||
max_segment = UINT_MAX;
|
||||
err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
|
||||
nr_pages << PAGE_SHIFT,
|
||||
(unsigned long)nr_pages << PAGE_SHIFT,
|
||||
max_segment, GFP_KERNEL);
|
||||
if (err) {
|
||||
kfree(sg);
|
||||
|
@ -2355,6 +2355,9 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
|
||||
limits->min_rate = intel_dp_common_rate(intel_dp, 0);
|
||||
limits->max_rate = intel_dp_max_link_rate(intel_dp);
|
||||
|
||||
/* FIXME 128b/132b SST support missing */
|
||||
limits->max_rate = min(limits->max_rate, 810000);
|
||||
|
||||
limits->min_lane_count = 1;
|
||||
limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
|
||||
|
||||
|
@ -51,8 +51,8 @@
|
||||
#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00)
|
||||
#define _DSCA_PPS_0 0x6B200
|
||||
#define _DSCC_PPS_0 0x6BA00
|
||||
#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + (pps) * 4)
|
||||
#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + (pps) * 4)
|
||||
#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4)
|
||||
#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4)
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
|
||||
|
@ -1287,7 +1287,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
|
||||
gpu->ubwc_config.highest_bank_bit = 15;
|
||||
|
||||
if (adreno_is_a610(gpu)) {
|
||||
gpu->ubwc_config.highest_bank_bit = 14;
|
||||
gpu->ubwc_config.highest_bank_bit = 13;
|
||||
gpu->ubwc_config.min_acc_len = 1;
|
||||
gpu->ubwc_config.ubwc_mode = 1;
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
vaddr = msm_gem_get_vaddr(obj);
|
||||
vaddr = msm_gem_get_vaddr_locked(obj);
|
||||
if (IS_ERR(vaddr))
|
||||
return PTR_ERR(vaddr);
|
||||
iosys_map_set_vaddr(map, vaddr);
|
||||
@ -36,7 +36,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
|
||||
|
||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
|
||||
{
|
||||
msm_gem_put_vaddr(obj);
|
||||
msm_gem_put_vaddr_locked(obj);
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
|
@ -751,12 +751,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
struct msm_ringbuffer *ring = submit->ring;
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&gpu->lock));
|
||||
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
|
||||
mutex_lock(&gpu->lock);
|
||||
|
||||
msm_gpu_hw_init(gpu);
|
||||
|
||||
submit->seqno = submit->hw_fence->seqno;
|
||||
|
||||
update_sw_cntrs(gpu);
|
||||
|
||||
/*
|
||||
@ -781,11 +783,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
gpu->funcs->submit(gpu, submit);
|
||||
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
|
||||
|
||||
hangcheck_timer_reset(gpu);
|
||||
|
||||
mutex_unlock(&gpu->lock);
|
||||
|
||||
pm_runtime_put(&gpu->pdev->dev);
|
||||
hangcheck_timer_reset(gpu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -21,6 +21,8 @@ struct msm_iommu_pagetable {
|
||||
struct msm_mmu base;
|
||||
struct msm_mmu *parent;
|
||||
struct io_pgtable_ops *pgtbl_ops;
|
||||
const struct iommu_flush_ops *tlb;
|
||||
struct device *iommu_dev;
|
||||
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
|
||||
phys_addr_t ttbr;
|
||||
u32 asid;
|
||||
@ -201,11 +203,33 @@ static const struct msm_mmu_funcs pagetable_funcs = {
|
||||
|
||||
static void msm_iommu_tlb_flush_all(void *cookie)
|
||||
{
|
||||
struct msm_iommu_pagetable *pagetable = cookie;
|
||||
struct adreno_smmu_priv *adreno_smmu;
|
||||
|
||||
if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
|
||||
return;
|
||||
|
||||
adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
|
||||
|
||||
pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie);
|
||||
|
||||
pm_runtime_put_autosuspend(pagetable->iommu_dev);
|
||||
}
|
||||
|
||||
static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
|
||||
size_t granule, void *cookie)
|
||||
{
|
||||
struct msm_iommu_pagetable *pagetable = cookie;
|
||||
struct adreno_smmu_priv *adreno_smmu;
|
||||
|
||||
if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
|
||||
return;
|
||||
|
||||
adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
|
||||
|
||||
pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie);
|
||||
|
||||
pm_runtime_put_autosuspend(pagetable->iommu_dev);
|
||||
}
|
||||
|
||||
static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
|
||||
@ -213,7 +237,7 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
|
||||
{
|
||||
}
|
||||
|
||||
static const struct iommu_flush_ops null_tlb_ops = {
|
||||
static const struct iommu_flush_ops tlb_ops = {
|
||||
.tlb_flush_all = msm_iommu_tlb_flush_all,
|
||||
.tlb_flush_walk = msm_iommu_tlb_flush_walk,
|
||||
.tlb_add_page = msm_iommu_tlb_add_page,
|
||||
@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
|
||||
|
||||
/* The incoming cfg will have the TTBR1 quirk enabled */
|
||||
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
|
||||
ttbr0_cfg.tlb = &null_tlb_ops;
|
||||
ttbr0_cfg.tlb = &tlb_ops;
|
||||
|
||||
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
|
||||
&ttbr0_cfg, iommu->domain);
|
||||
&ttbr0_cfg, pagetable);
|
||||
|
||||
if (!pagetable->pgtbl_ops) {
|
||||
kfree(pagetable);
|
||||
@ -279,6 +303,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
|
||||
|
||||
/* Needed later for TLB flush */
|
||||
pagetable->parent = parent;
|
||||
pagetable->tlb = ttbr1_cfg->tlb;
|
||||
pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
|
||||
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
|
||||
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
|
||||
|
||||
|
@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
|
||||
|
||||
msm_fence_init(submit->hw_fence, fctx);
|
||||
|
||||
submit->seqno = submit->hw_fence->seqno;
|
||||
|
||||
mutex_lock(&priv->lru.lock);
|
||||
|
||||
for (i = 0; i < submit->nr_bos; i++) {
|
||||
@ -35,8 +33,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
|
||||
|
||||
mutex_unlock(&priv->lru.lock);
|
||||
|
||||
/* TODO move submit path over to using a per-ring lock.. */
|
||||
mutex_lock(&gpu->lock);
|
||||
|
||||
msm_gpu_submit(gpu, submit);
|
||||
|
||||
mutex_unlock(&gpu->lock);
|
||||
|
||||
return dma_fence_get(submit->hw_fence);
|
||||
}
|
||||
|
||||
|
@ -128,12 +128,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
|
||||
struct nouveau_abi16_ntfy *ntfy, *temp;
|
||||
|
||||
/* Cancel all jobs from the entity's queue. */
|
||||
drm_sched_entity_fini(&chan->sched.entity);
|
||||
if (chan->sched)
|
||||
drm_sched_entity_fini(&chan->sched->entity);
|
||||
|
||||
if (chan->chan)
|
||||
nouveau_channel_idle(chan->chan);
|
||||
|
||||
nouveau_sched_fini(&chan->sched);
|
||||
if (chan->sched)
|
||||
nouveau_sched_destroy(&chan->sched);
|
||||
|
||||
/* cleanup notifier state */
|
||||
list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
|
||||
@ -337,10 +339,16 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq,
|
||||
chan->chan->dma.ib_max);
|
||||
if (ret)
|
||||
goto done;
|
||||
/* If we're not using the VM_BIND uAPI, we don't need a scheduler.
|
||||
*
|
||||
* The client lock is already acquired by nouveau_abi16_get().
|
||||
*/
|
||||
if (nouveau_cli_uvmm(cli)) {
|
||||
ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
|
||||
chan->chan->dma.ib_max);
|
||||
if (ret)
|
||||
goto done;
|
||||
}
|
||||
|
||||
init->channel = chan->chan->chid;
|
||||
|
||||
|
@ -26,7 +26,7 @@ struct nouveau_abi16_chan {
|
||||
struct nouveau_bo *ntfy;
|
||||
struct nouveau_vma *ntfy_vma;
|
||||
struct nvkm_mm heap;
|
||||
struct nouveau_sched sched;
|
||||
struct nouveau_sched *sched;
|
||||
};
|
||||
|
||||
struct nouveau_abi16 {
|
||||
|
@ -201,7 +201,8 @@ nouveau_cli_fini(struct nouveau_cli *cli)
|
||||
WARN_ON(!list_empty(&cli->worker));
|
||||
|
||||
usif_client_fini(cli);
|
||||
nouveau_sched_fini(&cli->sched);
|
||||
if (cli->sched)
|
||||
nouveau_sched_destroy(&cli->sched);
|
||||
if (uvmm)
|
||||
nouveau_uvmm_fini(uvmm);
|
||||
nouveau_vmm_fini(&cli->svm);
|
||||
@ -311,7 +312,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
|
||||
cli->mem = &mems[ret];
|
||||
|
||||
/* Don't pass in the (shared) sched_wq in order to let
|
||||
* nouveau_sched_init() create a dedicated one for VM_BIND jobs.
|
||||
* nouveau_sched_create() create a dedicated one for VM_BIND jobs.
|
||||
*
|
||||
* This is required to ensure that for VM_BIND jobs free_job() work and
|
||||
* run_job() work can always run concurrently and hence, free_job() work
|
||||
@ -320,7 +321,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
|
||||
* locks which indirectly or directly are held for allocations
|
||||
* elsewhere.
|
||||
*/
|
||||
ret = nouveau_sched_init(&cli->sched, drm, NULL, 1);
|
||||
ret = nouveau_sched_create(&cli->sched, drm, NULL, 1);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
|
@ -98,7 +98,7 @@ struct nouveau_cli {
|
||||
bool disabled;
|
||||
} uvmm;
|
||||
|
||||
struct nouveau_sched sched;
|
||||
struct nouveau_sched *sched;
|
||||
|
||||
const struct nvif_mclass *mem;
|
||||
|
||||
|
@ -389,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
args.sched = &chan16->sched;
|
||||
args.sched = chan16->sched;
|
||||
args.file_priv = file_priv;
|
||||
args.chan = chan;
|
||||
|
||||
|
@ -398,7 +398,7 @@ static const struct drm_sched_backend_ops nouveau_sched_ops = {
|
||||
.free_job = nouveau_sched_free_job,
|
||||
};
|
||||
|
||||
int
|
||||
static int
|
||||
nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
|
||||
struct workqueue_struct *wq, u32 credit_limit)
|
||||
{
|
||||
@ -453,7 +453,30 @@ fail_wq:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
|
||||
struct workqueue_struct *wq, u32 credit_limit)
|
||||
{
|
||||
struct nouveau_sched *sched;
|
||||
int ret;
|
||||
|
||||
sched = kzalloc(sizeof(*sched), GFP_KERNEL);
|
||||
if (!sched)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = nouveau_sched_init(sched, drm, wq, credit_limit);
|
||||
if (ret) {
|
||||
kfree(sched);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*psched = sched;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
nouveau_sched_fini(struct nouveau_sched *sched)
|
||||
{
|
||||
struct drm_gpu_scheduler *drm_sched = &sched->base;
|
||||
@ -471,3 +494,14 @@ nouveau_sched_fini(struct nouveau_sched *sched)
|
||||
if (sched->wq)
|
||||
destroy_workqueue(sched->wq);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_sched_destroy(struct nouveau_sched **psched)
|
||||
{
|
||||
struct nouveau_sched *sched = *psched;
|
||||
|
||||
nouveau_sched_fini(sched);
|
||||
kfree(sched);
|
||||
|
||||
*psched = NULL;
|
||||
}
|
||||
|
@ -111,8 +111,8 @@ struct nouveau_sched {
|
||||
} job;
|
||||
};
|
||||
|
||||
int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
|
||||
struct workqueue_struct *wq, u32 credit_limit);
|
||||
void nouveau_sched_fini(struct nouveau_sched *sched);
|
||||
int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
|
||||
struct workqueue_struct *wq, u32 credit_limit);
|
||||
void nouveau_sched_destroy(struct nouveau_sched **psched);
|
||||
|
||||
#endif
|
||||
|
@ -1011,7 +1011,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
|
||||
buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL);
|
||||
if (!buffer->fault)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1740,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args.sched = &cli->sched;
|
||||
args.sched = cli->sched;
|
||||
args.file_priv = file_priv;
|
||||
|
||||
ret = nouveau_uvmm_vm_bind(&args);
|
||||
|
@ -1985,8 +1985,10 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||
clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
|
||||
}
|
||||
|
||||
if (!clock)
|
||||
if (!clock) {
|
||||
vop2_unlock(vop2);
|
||||
return;
|
||||
}
|
||||
|
||||
if (vcstate->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
|
||||
!(vp_data->feature & VOP2_VP_FEATURE_OUTPUT_10BIT))
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/prime_numbers.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#include <drm/drm_buddy.h>
|
||||
|
||||
@ -18,6 +19,93 @@ static inline u64 get_size(int order, u64 chunk_size)
|
||||
return (1 << order) * chunk_size;
|
||||
}
|
||||
|
||||
static void drm_test_buddy_alloc_contiguous(struct kunit *test)
|
||||
{
|
||||
u64 mm_size, ps = SZ_4K, i, n_pages, total;
|
||||
struct drm_buddy_block *block;
|
||||
struct drm_buddy mm;
|
||||
LIST_HEAD(left);
|
||||
LIST_HEAD(middle);
|
||||
LIST_HEAD(right);
|
||||
LIST_HEAD(allocated);
|
||||
|
||||
mm_size = 16 * 3 * SZ_4K;
|
||||
|
||||
KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
|
||||
|
||||
/*
|
||||
* Idea is to fragment the address space by alternating block
|
||||
* allocations between three different lists; one for left, middle and
|
||||
* right. We can then free a list to simulate fragmentation. In
|
||||
* particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
|
||||
* including the try_harder path.
|
||||
*/
|
||||
|
||||
i = 0;
|
||||
n_pages = mm_size / ps;
|
||||
do {
|
||||
struct list_head *list;
|
||||
int slot = i % 3;
|
||||
|
||||
if (slot == 0)
|
||||
list = &left;
|
||||
else if (slot == 1)
|
||||
list = &middle;
|
||||
else
|
||||
list = &right;
|
||||
KUNIT_ASSERT_FALSE_MSG(test,
|
||||
drm_buddy_alloc_blocks(&mm, 0, mm_size,
|
||||
ps, ps, list, 0),
|
||||
"buddy_alloc hit an error size=%d\n",
|
||||
ps);
|
||||
} while (++i < n_pages);
|
||||
|
||||
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
|
||||
3 * ps, ps, &allocated,
|
||||
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
|
||||
"buddy_alloc didn't error size=%d\n", 3 * ps);
|
||||
|
||||
drm_buddy_free_list(&mm, &middle);
|
||||
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
|
||||
3 * ps, ps, &allocated,
|
||||
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
|
||||
"buddy_alloc didn't error size=%llu\n", 3 * ps);
|
||||
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
|
||||
2 * ps, ps, &allocated,
|
||||
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
|
||||
"buddy_alloc didn't error size=%llu\n", 2 * ps);
|
||||
|
||||
drm_buddy_free_list(&mm, &right);
|
||||
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
|
||||
3 * ps, ps, &allocated,
|
||||
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
|
||||
"buddy_alloc didn't error size=%llu\n", 3 * ps);
|
||||
/*
|
||||
* At this point we should have enough contiguous space for 2 blocks,
|
||||
* however they are never buddies (since we freed middle and right) so
|
||||
* will require the try_harder logic to find them.
|
||||
*/
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
|
||||
2 * ps, ps, &allocated,
|
||||
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
|
||||
"buddy_alloc hit an error size=%d\n", 2 * ps);
|
||||
|
||||
drm_buddy_free_list(&mm, &left);
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
|
||||
3 * ps, ps, &allocated,
|
||||
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
|
||||
"buddy_alloc hit an error size=%d\n", 3 * ps);
|
||||
|
||||
total = 0;
|
||||
list_for_each_entry(block, &allocated, link)
|
||||
total += drm_buddy_block_size(&mm, block);
|
||||
|
||||
KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
|
||||
|
||||
drm_buddy_free_list(&mm, &allocated);
|
||||
drm_buddy_fini(&mm);
|
||||
}
|
||||
|
||||
static void drm_test_buddy_alloc_pathological(struct kunit *test)
|
||||
{
|
||||
u64 mm_size, size, start = 0;
|
||||
@ -280,6 +368,7 @@ static struct kunit_case drm_buddy_tests[] = {
|
||||
KUNIT_CASE(drm_test_buddy_alloc_optimistic),
|
||||
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
|
||||
KUNIT_CASE(drm_test_buddy_alloc_pathological),
|
||||
KUNIT_CASE(drm_test_buddy_alloc_contiguous),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
#include "xe_bo.h"
|
||||
|
||||
#define i915_gem_object_is_shmem(obj) ((obj)->flags & XE_BO_CREATE_SYSTEM_BIT)
|
||||
#define i915_gem_object_is_shmem(obj) (0) /* We don't use shmem */
|
||||
|
||||
static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n)
|
||||
{
|
||||
|
@ -20,8 +20,8 @@
|
||||
|
||||
struct xe_pt_dir {
|
||||
struct xe_pt pt;
|
||||
/** @dir: Directory structure for the xe_pt_walk functionality */
|
||||
struct xe_ptw_dir dir;
|
||||
/** @children: Array of page-table child nodes */
|
||||
struct xe_ptw *children[XE_PDES];
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
|
||||
@ -44,7 +44,7 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
|
||||
|
||||
static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
|
||||
{
|
||||
return container_of(pt_dir->dir.entries[index], struct xe_pt, base);
|
||||
return container_of(pt_dir->children[index], struct xe_pt, base);
|
||||
}
|
||||
|
||||
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
|
||||
@ -65,6 +65,14 @@ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
|
||||
XE_PTE_NULL;
|
||||
}
|
||||
|
||||
static void xe_pt_free(struct xe_pt *pt)
|
||||
{
|
||||
if (pt->level)
|
||||
kfree(as_xe_pt_dir(pt));
|
||||
else
|
||||
kfree(pt);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_pt_create() - Create a page-table.
|
||||
* @vm: The vm to create for.
|
||||
@ -85,15 +93,19 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
|
||||
{
|
||||
struct xe_pt *pt;
|
||||
struct xe_bo *bo;
|
||||
size_t size;
|
||||
int err;
|
||||
|
||||
size = !level ? sizeof(struct xe_pt) : sizeof(struct xe_pt_dir) +
|
||||
XE_PDES * sizeof(struct xe_ptw *);
|
||||
pt = kzalloc(size, GFP_KERNEL);
|
||||
if (level) {
|
||||
struct xe_pt_dir *dir = kzalloc(sizeof(*dir), GFP_KERNEL);
|
||||
|
||||
pt = (dir) ? &dir->pt : NULL;
|
||||
} else {
|
||||
pt = kzalloc(sizeof(*pt), GFP_KERNEL);
|
||||
}
|
||||
if (!pt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pt->level = level;
|
||||
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
|
||||
ttm_bo_type_kernel,
|
||||
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
|
||||
@ -106,8 +118,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
|
||||
goto err_kfree;
|
||||
}
|
||||
pt->bo = bo;
|
||||
pt->level = level;
|
||||
pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL;
|
||||
pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
|
||||
|
||||
if (vm->xef)
|
||||
xe_drm_client_add_bo(vm->xef->client, pt->bo);
|
||||
@ -116,7 +127,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
|
||||
return pt;
|
||||
|
||||
err_kfree:
|
||||
kfree(pt);
|
||||
xe_pt_free(pt);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
@ -193,7 +204,7 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
|
||||
deferred);
|
||||
}
|
||||
}
|
||||
kfree(pt);
|
||||
xe_pt_free(pt);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -358,7 +369,7 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
|
||||
struct iosys_map *map = &parent->bo->vmap;
|
||||
|
||||
if (unlikely(xe_child))
|
||||
parent->base.dir->entries[offset] = &xe_child->base;
|
||||
parent->base.children[offset] = &xe_child->base;
|
||||
|
||||
xe_pt_write(xe_walk->vm->xe, map, offset, pte);
|
||||
parent->num_live++;
|
||||
@ -853,7 +864,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
|
||||
xe_pt_destroy(xe_pt_entry(pt_dir, j_),
|
||||
xe_vma_vm(vma)->flags, deferred);
|
||||
|
||||
pt_dir->dir.entries[j_] = &newpte->base;
|
||||
pt_dir->children[j_] = &newpte->base;
|
||||
}
|
||||
kfree(entries[i].pt_entries);
|
||||
}
|
||||
@ -1507,7 +1518,7 @@ xe_pt_commit_unbind(struct xe_vma *vma,
|
||||
xe_pt_destroy(xe_pt_entry(pt_dir, i),
|
||||
xe_vma_vm(vma)->flags, deferred);
|
||||
|
||||
pt_dir->dir.entries[i] = NULL;
|
||||
pt_dir->children[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
|
||||
u64 addr, u64 end, struct xe_pt_walk *walk)
|
||||
{
|
||||
pgoff_t offset = xe_pt_offset(addr, level, walk);
|
||||
struct xe_ptw **entries = parent->dir ? parent->dir->entries : NULL;
|
||||
struct xe_ptw **entries = parent->children ? parent->children : NULL;
|
||||
const struct xe_pt_walk_ops *ops = walk->ops;
|
||||
enum page_walk_action action;
|
||||
struct xe_ptw *child;
|
||||
|
@ -8,28 +8,15 @@
|
||||
#include <linux/pagewalk.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct xe_ptw_dir;
|
||||
|
||||
/**
|
||||
* struct xe_ptw - base class for driver pagetable subclassing.
|
||||
* @dir: Pointer to an array of children if any.
|
||||
* @children: Pointer to an array of children if any.
|
||||
*
|
||||
* Drivers could subclass this, and if it's a page-directory, typically
|
||||
* embed the xe_ptw_dir::entries array in the same allocation.
|
||||
* embed an array of xe_ptw pointers.
|
||||
*/
|
||||
struct xe_ptw {
|
||||
struct xe_ptw_dir *dir;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_ptw_dir - page directory structure
|
||||
* @entries: Array holding page directory children.
|
||||
*
|
||||
* It is the responsibility of the user to ensure @entries is
|
||||
* correctly sized.
|
||||
*/
|
||||
struct xe_ptw_dir {
|
||||
struct xe_ptw *entries[0];
|
||||
struct xe_ptw **children;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -151,6 +151,11 @@ xe_range_fence_tree_next(struct xe_range_fence *rfence, u64 start, u64 last)
|
||||
return xe_range_fence_tree_iter_next(rfence, start, last);
|
||||
}
|
||||
|
||||
static void xe_range_fence_free(struct xe_range_fence *rfence)
|
||||
{
|
||||
kfree(rfence);
|
||||
}
|
||||
|
||||
const struct xe_range_fence_ops xe_range_fence_kfree_ops = {
|
||||
.free = (void (*)(struct xe_range_fence *rfence)) kfree,
|
||||
.free = xe_range_fence_free,
|
||||
};
|
||||
|
@ -995,9 +995,16 @@ int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
|
||||
int err;
|
||||
|
||||
XE_WARN_ON(!vm);
|
||||
err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
|
||||
if (!err && bo && !bo->vm)
|
||||
err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
|
||||
if (num_shared)
|
||||
err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
|
||||
else
|
||||
err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
|
||||
if (!err && bo && !bo->vm) {
|
||||
if (num_shared)
|
||||
err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
|
||||
else
|
||||
err = drm_exec_lock_obj(exec, &bo->ttm.base);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user