drm fixes for 6.8-rc7

buddy:
 - two allocation fixes + unit test
 
 fbcon:
 - font restore syzkaller fix
 
 ttm:
 - kunit test fix
 
 bridge:
 - fix aux-hpd leaks
 - fix aux-hpd registration
 - fix use after free in soc/qcom
 - fix boot on soc/qcom
 
 xe:
 - A couple of tracepoint updates from Priyanka and Lucas.
 - Make sure BINDs are completed before accepting UNBINDs on LR vms.
 - Don't arbitrarily restrict max number of batched binds.
 - Add uapi for dumpable bos (agreed on IRC).
 - Remove unused uapi flags and a leftover comment.
 - A couple of fixes related to the execlist backend.
 
 msm:
 - DP: Revert a change which was causing a HDP regression
 
 amdgpu:
 - Fix potential buffer overflow
 - Fix power min cap
 - Suspend/resume fix
 - SI PM fix
 - eDP fix
 
 nouveau:
 - fix a misreported VRAM sizing
 - fix a regression in suspend/resume due to freeing
 
 tegra:
 - host1x reset fix
 - only remove existing driver if display is possible
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmXhaX0ACgkQDHTzWXnE
 hr4DtA//VzJEzhfnPlss4kgD7Io5vvH+We9eoSi0lkIrL+mHVJYQOveB/yw8KMJZ
 p6qGYi4Q/bTLyoDS2tL7oP5HfU5ZbzLG2DBFJzBo5Y6dQRhhgWsKGQs/zhlSrm9S
 k8F1lR6DVL3rZd084FIY7GlSlGCj9h2GdmpmvbJ9R0rjFEu2E0WE3aatEVTFCj/Y
 Vy8rjwrrST+cj6kefEGoRO+awmQCJrxIp2TmygB8vWZwlkmCGK+MmzOpJa2Ei5g2
 jLFzMoxN7jIVQzdF/Z3hI6j75lWJAK0mpEsstt9hJlgv/nQbuJyG7rcRfy8X7pBF
 qcT6YCMbEvvC8Kh580H6H9/CIDavKqWOPAaRluv9Y1zA1Q4Wmmeh3u74QNnAX8Fy
 y+PKLe7rraQYx5E6z0EiBpfyxckQlDn6W7wDemyCu4IPyFCj5Y+x3oZ5Ldu0yKPR
 meksQm4NySe0zv/Q2uxYmtR3PcWPCHprvjZeqt9RfAs8iOUYD7liDCoamDiqkoDK
 b2wJrBTrBCq/0JnJEDSd6barSvGjw8ufwOAfyrjsknKRaNSz8+bxpZ3CqlkH+bGj
 6v5Wsll8kRg5gfpnFRh7qWME7MeskfCC1fVby+9hZE8vKYDmISLWuq4yvSlyzJ6k
 15c6CgQrulYyxxJmeTV45mxhfNyv2n1gTem2DXEpM/f1HWK1mi0=
 =uYay
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2024-03-01' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Bunch of fixes, xe, amdgpu, nouveau and tegra all have a few. Then
  drm/bridge including some drivers/soc fallout fixes. The biggest thing
  in here is a new unit test for some buddy allocator fixes, otherwise a
  misc fbcon, ttm unit test and one msm revert.

  Seems pretty normal for this stage.

  buddy:
   - two allocation fixes + unit test

  fbcon:
   - font restore syzkaller fix

  ttm:
   - kunit test fix

  bridge:
   - fix aux-hpd leaks
   - fix aux-hpd registration
   - fix use after free in soc/qcom
   - fix boot on soc/qcom

  xe:
   - A couple of tracepoint updates from Priyanka and Lucas
   - Make sure BINDs are completed before accepting UNBINDs on LR vms
   - Don't arbitrarily restrict max number of batched binds
   - Add uapi for dumpable bos (agreed on IRC)
   - Remove unused uapi flags and a leftover comment
   - A couple of fixes related to the execlist backend

  msm:
   - DP: Revert a change which was causing a HDP regression

  amdgpu:
   - Fix potential buffer overflow
   - Fix power min cap
   - Suspend/resume fix
   - SI PM fix
   - eDP fix

  nouveau:
   - fix a misreported VRAM sizing
   - fix a regression in suspend/resume due to freeing

  tegra:
   - host1x reset fix
   - only remove existing driver if display is possible"

* tag 'drm-fixes-2024-03-01' of https://gitlab.freedesktop.org/drm/kernel: (32 commits)
  drm/nouveau: keep DMA buffers required for suspend/resume
  nouveau: report byte usage in VRAM usage
  drm/xe/xe_trace: Add move_lacks_source detail to xe_bo_move trace
  drm/xe: Deny unbinds if uapi ufence pending
  drm/xe: Expose user fence from xe_sync_entry
  drm/xe: Use pointers in trace events
  drm/xe/xe_bo_move: Enhance xe_bo_move trace
  drm/xe/mmio: fix build warning for BAR resize on 32-bit
  drm/xe: get rid of MAX_BINDS
  drm/xe: Use vmalloc for array of bind allocation in bind IOCTL
  drm/xe: Don't support execlists in xe_gt_tlb_invalidation layer
  drm/xe: Fix execlist splat
  drm/xe/uapi: Remove unused flags
  drm/xe/uapi: Remove DRM_XE_VM_BIND_FLAG_ASYNC comment left over
  drm/xe: Add uapi for dumpable bos
  drm/amd/display: Add monitor patch for specific eDP
  Revert "drm/msm/dp: use drm_bridge_hpd_notify() to report HPD status changes"
  drm/tests/drm_buddy: add alloc_range_bias test
  drm/buddy: check range allocation matches alignment
  drm/buddy: fix range bias
  ...
This commit is contained in:
Linus Torvalds 2024-03-01 11:23:08 -08:00
commit 7187ea0978
39 changed files with 659 additions and 293 deletions

View File

@ -199,7 +199,7 @@ config DRM_TTM
config DRM_TTM_KUNIT_TEST
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
default n
depends on DRM && KUNIT && MMU
depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
select DRM_TTM
select DRM_EXPORT_FOR_TESTS if m
select DRM_KUNIT_TEST_HELPERS
@ -207,7 +207,8 @@ config DRM_TTM_KUNIT_TEST
help
Enables unit tests for TTM, a GPU memory manager subsystem used
to manage memory buffers. This option is mostly useful for kernel
developers.
developers. It depends on (UML || COMPILE_TEST) since no other driver
which uses TTM can be loaded while running the tests.
If in doubt, say "N".

View File

@ -574,11 +574,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
return AMD_RESET_METHOD_MODE1;
}
static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
{
u32 sol_reg;
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
/* Will reset for the following suspend abort cases.
* 1) Only reset limit on APU side, dGPU hasn't checked yet.
* 2) S3 suspend abort and TOS already launched.
*/
if (adev->flags & AMD_IS_APU && adev->in_s3 &&
!adev->suspend_complete &&
sol_reg)
return true;
return false;
}
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
(adev->apu_flags & AMD_APU_IS_RAVEN2))
/* On the latest Raven, the GPU reset can be performed
* successfully. So now, temporarily enable it for the
* S3 suspend abort case.
*/
if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
(adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
!soc15_need_reset_on_resume(adev))
return 0;
switch (soc15_asic_reset_method(adev)) {
@ -1298,24 +1321,6 @@ static int soc15_common_suspend(void *handle)
return soc15_common_hw_fini(adev);
}
static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
{
u32 sol_reg;
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
/* Will reset for the following suspend abort cases.
* 1) Only reset limit on APU side, dGPU hasn't checked yet.
* 2) S3 suspend abort and TOS already launched.
*/
if (adev->flags & AMD_IS_APU && adev->in_s3 &&
!adev->suspend_complete &&
sol_reg)
return true;
return false;
}
static int soc15_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;

View File

@ -67,6 +67,8 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
@ -120,6 +122,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->edid_hdmi = connector->display_info.is_hdmi;
apply_edid_quirks(edid_buf, edid_caps);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0)
return result;
@ -146,8 +150,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
else
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
apply_edid_quirks(edid_buf, edid_caps);
kfree(sads);
kfree(sadb);

View File

@ -76,6 +76,11 @@ static void map_hw_resources(struct dml2_context *dml2,
in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
}
for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) {
dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n",
__func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__);
break;
}
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];

View File

@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev)
return 0;
}
static int si_set_temperature_range(struct amdgpu_device *adev)
{
int ret;
ret = si_thermal_enable_alert(adev, false);
if (ret)
return ret;
ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
if (ret)
return ret;
ret = si_thermal_enable_alert(adev, true);
if (ret)
return ret;
return ret;
}
static void si_dpm_disable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
static int si_dpm_late_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->pm.dpm_enabled)
return 0;
ret = si_set_temperature_range(adev);
if (ret)
return ret;
#if 0 //TODO ?
si_dpm_powergate_uvd(adev, true);
#endif
return 0;
}

View File

@ -1303,13 +1303,12 @@ static int arcturus_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
if (smu->od_enabled) {
if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
} else {
else
od_percent_upper = 0;
od_percent_lower = 100;
}
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);

View File

@ -2357,13 +2357,12 @@ static int navi10_get_power_limit(struct smu_context *smu,
*default_power_limit = power_limit;
if (smu->od_enabled &&
navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
} else {
else
od_percent_upper = 0;
od_percent_lower = 100;
}
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);

View File

@ -640,13 +640,12 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
if (smu->od_enabled) {
if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
} else {
else
od_percent_upper = 0;
od_percent_lower = 100;
}
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);

View File

@ -2369,13 +2369,12 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
if (smu->od_enabled) {
if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
} else {
else
od_percent_upper = 0;
od_percent_lower = 100;
}
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);

View File

@ -2333,13 +2333,12 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
if (smu->od_enabled) {
if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
} else {
else
od_percent_upper = 0;
od_percent_lower = 100;
}
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);

View File

@ -25,20 +25,18 @@ static void drm_aux_hpd_bridge_release(struct device *dev)
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
of_node_put(adev->dev.platform_data);
of_node_put(adev->dev.of_node);
kfree(adev);
}
static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
static void drm_aux_hpd_bridge_free_adev(void *_adev)
{
struct auxiliary_device *adev = _adev;
auxiliary_device_delete(adev);
auxiliary_device_uninit(adev);
auxiliary_device_uninit(_adev);
}
/**
* drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge
* devm_drm_dp_hpd_bridge_alloc - allocate a HPD DisplayPort bridge
* @parent: device instance providing this bridge
* @np: device node pointer corresponding to this bridge instance
*
@ -46,11 +44,9 @@ static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
* DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is
* able to send the HPD events.
*
* Return: device instance that will handle created bridge or an error code
* encoded into the pointer.
* Return: bridge auxiliary device pointer or an error pointer
*/
struct device *drm_dp_hpd_bridge_register(struct device *parent,
struct device_node *np)
struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np)
{
struct auxiliary_device *adev;
int ret;
@ -74,18 +70,62 @@ struct device *drm_dp_hpd_bridge_register(struct device *parent,
ret = auxiliary_device_init(adev);
if (ret) {
of_node_put(adev->dev.platform_data);
of_node_put(adev->dev.of_node);
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
kfree(adev);
return ERR_PTR(ret);
}
ret = auxiliary_device_add(adev);
if (ret) {
auxiliary_device_uninit(adev);
ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_free_adev, adev);
if (ret)
return ERR_PTR(ret);
}
ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev);
return adev;
}
EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_alloc);
static void drm_aux_hpd_bridge_del_adev(void *_adev)
{
auxiliary_device_delete(_adev);
}
/**
* devm_drm_dp_hpd_bridge_add - register a HDP DisplayPort bridge
* @dev: struct device to tie registration lifetime to
* @adev: bridge auxiliary device to be registered
*
* Returns: zero on success or a negative errno
*/
int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev)
{
int ret;
ret = auxiliary_device_add(adev);
if (ret)
return ret;
return devm_add_action_or_reset(dev, drm_aux_hpd_bridge_del_adev, adev);
}
EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_add);
/**
* drm_dp_hpd_bridge_register - allocate and register a HDP DisplayPort bridge
* @parent: device instance providing this bridge
* @np: device node pointer corresponding to this bridge instance
*
* Return: device instance that will handle created bridge or an error pointer
*/
struct device *drm_dp_hpd_bridge_register(struct device *parent, struct device_node *np)
{
struct auxiliary_device *adev;
int ret;
adev = devm_drm_dp_hpd_bridge_alloc(parent, np);
if (IS_ERR(adev))
return ERR_CAST(adev);
ret = devm_drm_dp_hpd_bridge_add(parent, adev);
if (ret)
return ERR_PTR(ret);

View File

@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm,
u64 start, u64 end,
unsigned int order)
{
u64 req_size = mm->chunk_size << order;
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
LIST_HEAD(dfs);
@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm,
if (drm_buddy_block_is_allocated(block))
continue;
if (block_start < start || block_end > end) {
u64 adjusted_start = max(block_start, start);
u64 adjusted_end = min(block_end, end);
if (round_down(adjusted_end + 1, req_size) <=
round_up(adjusted_start, req_size))
continue;
}
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) {
/*
@ -761,8 +771,12 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
return -EINVAL;
/* Actual range allocation */
if (start + size == end)
if (start + size == end) {
if (!IS_ALIGNED(start | end, min_block_size))
return -EINVAL;
return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
}
original_size = size;
original_min_size = min_block_size;

View File

@ -329,10 +329,26 @@ static const struct component_ops dp_display_comp_ops = {
.unbind = dp_display_unbind,
};
static void dp_display_send_hpd_event(struct msm_dp *dp_display)
{
struct dp_display_private *dp;
struct drm_connector *connector;
dp = container_of(dp_display, struct dp_display_private, dp_display);
connector = dp->dp_display.connector;
drm_helper_hpd_irq_event(connector->dev);
}
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
struct drm_bridge *bridge = dp->dp_display.bridge;
if ((hpd && dp->dp_display.link_ready) ||
(!hpd && !dp->dp_display.link_ready)) {
drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
(hpd ? "on" : "off"));
return 0;
}
/* reset video pattern flag on disconnect */
if (!hpd) {
@ -348,7 +364,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
dp->dp_display.connector_type, hpd);
drm_bridge_hpd_notify(bridge, dp->dp_display.link_ready);
dp_display_send_hpd_event(&dp->dp_display);
return 0;
}

View File

@ -269,7 +269,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
break;
case NOUVEAU_GETPARAM_VRAM_USED: {
struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
getparam->value = (u64)ttm_resource_manager_usage(vram_mgr) << PAGE_SHIFT;
getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
break;
}
default:

View File

@ -1054,8 +1054,6 @@ r535_gsp_postinit(struct nvkm_gsp *gsp)
/* Release the DMA buffers that were needed only for boot and init */
nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
nvkm_gsp_mem_dtor(gsp, &gsp->libos);
nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
return ret;
}
@ -2163,6 +2161,8 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
r535_gsp_dtor_fws(gsp);
nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
nvkm_gsp_mem_dtor(gsp, &gsp->logintr);

View File

@ -1243,9 +1243,26 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm_mode_config_reset(drm);
err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
if (err < 0)
goto hub;
/*
* Only take over from a potential firmware framebuffer if any CRTCs
* have been registered. This must not be a fatal error because there
* are other accelerators that are exposed via this driver.
*
* Another case where this happens is on Tegra234 where the display
* hardware is no longer part of the host1x complex, so this driver
* will not expose any modesetting features.
*/
if (drm->mode_config.num_crtc > 0) {
err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
if (err < 0)
goto hub;
} else {
/*
* Indicate to userspace that this doesn't expose any display
* capabilities.
*/
drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
}
err = drm_dev_register(drm, 0);
if (err < 0)

View File

@ -14,11 +14,216 @@
#include "../lib/drm_random.h"
static unsigned int random_seed;
static inline u64 get_size(int order, u64 chunk_size)
{
return (1 << order) * chunk_size;
}
static void drm_test_buddy_alloc_range_bias(struct kunit *test)
{
u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem;
DRM_RND_STATE(prng, random_seed);
unsigned int i, count, *order;
struct drm_buddy mm;
LIST_HEAD(allocated);
bias_size = SZ_1M;
ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
ps = max(SZ_4K, ps);
mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
"buddy_init failed\n");
count = mm_size / bias_size;
order = drm_random_order(count, &prng);
KUNIT_EXPECT_TRUE(test, order);
/*
* Idea is to split the address space into uniform bias ranges, and then
* in some random order allocate within each bias, using various
* patterns within. This should detect if allocations leak out from a
* given bias, for example.
*/
for (i = 0; i < count; i++) {
LIST_HEAD(tmp);
u32 size;
bias_start = order[i] * bias_size;
bias_end = bias_start + bias_size;
bias_rem = bias_size;
/* internal round_up too big */
KUNIT_ASSERT_TRUE_MSG(test,
drm_buddy_alloc_blocks(&mm, bias_start,
bias_end, bias_size + ps, bias_size,
&allocated,
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
bias_start, bias_end, bias_size, bias_size);
/* size too big */
KUNIT_ASSERT_TRUE_MSG(test,
drm_buddy_alloc_blocks(&mm, bias_start,
bias_end, bias_size + ps, ps,
&allocated,
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
bias_start, bias_end, bias_size + ps, ps);
/* bias range too small for size */
KUNIT_ASSERT_TRUE_MSG(test,
drm_buddy_alloc_blocks(&mm, bias_start + ps,
bias_end, bias_size, ps,
&allocated,
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
bias_start + ps, bias_end, bias_size, ps);
/* bias misaligned */
KUNIT_ASSERT_TRUE_MSG(test,
drm_buddy_alloc_blocks(&mm, bias_start + ps,
bias_end - ps,
bias_size >> 1, bias_size >> 1,
&allocated,
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
/* single big page */
KUNIT_ASSERT_FALSE_MSG(test,
drm_buddy_alloc_blocks(&mm, bias_start,
bias_end, bias_size, bias_size,
&tmp,
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
bias_start, bias_end, bias_size, bias_size);
drm_buddy_free_list(&mm, &tmp);
/* single page with internal round_up */
KUNIT_ASSERT_FALSE_MSG(test,
drm_buddy_alloc_blocks(&mm, bias_start,
bias_end, ps, bias_size,
&tmp,
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
bias_start, bias_end, ps, bias_size);
drm_buddy_free_list(&mm, &tmp);
/* random size within */
size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
if (size)
KUNIT_ASSERT_FALSE_MSG(test,
drm_buddy_alloc_blocks(&mm, bias_start,
bias_end, size, ps,
&tmp,
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
bias_start, bias_end, size, ps);
bias_rem -= size;
/* too big for current avail */
KUNIT_ASSERT_TRUE_MSG(test,
drm_buddy_alloc_blocks(&mm, bias_start,
bias_end, bias_rem + ps, ps,
&allocated,
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
bias_start, bias_end, bias_rem + ps, ps);
if (bias_rem) {
/* random fill of the remainder */
size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
size = max(size, ps);
KUNIT_ASSERT_FALSE_MSG(test,
drm_buddy_alloc_blocks(&mm, bias_start,
bias_end, size, ps,
&allocated,
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
bias_start, bias_end, size, ps);
/*
* Intentionally allow some space to be left
* unallocated, and ideally not always on the bias
* boundaries.
*/
drm_buddy_free_list(&mm, &tmp);
} else {
list_splice_tail(&tmp, &allocated);
}
}
kfree(order);
drm_buddy_free_list(&mm, &allocated);
drm_buddy_fini(&mm);
/*
* Something more free-form. Idea is to pick a random starting bias
* range within the address space and then start filling it up. Also
* randomly grow the bias range in both directions as we go along. This
* should give us bias start/end which is not always uniform like above,
* and in some cases will require the allocator to jump over already
* allocated nodes in the middle of the address space.
*/
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
"buddy_init failed\n");
bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
bias_end = max(bias_end, bias_start + ps);
bias_rem = bias_end - bias_start;
do {
u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
KUNIT_ASSERT_FALSE_MSG(test,
drm_buddy_alloc_blocks(&mm, bias_start,
bias_end, size, ps,
&allocated,
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
bias_start, bias_end, size);
bias_rem -= size;
/*
* Try to randomly grow the bias range in both directions, or
* only one, or perhaps don't grow at all.
*/
do {
u32 old_bias_start = bias_start;
u32 old_bias_end = bias_end;
if (bias_start)
bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
if (bias_end != mm_size)
bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
bias_rem += old_bias_start - bias_start;
bias_rem += bias_end - old_bias_end;
} while (!bias_rem && (bias_start || bias_end != mm_size));
} while (bias_rem);
KUNIT_ASSERT_EQ(test, bias_start, 0);
KUNIT_ASSERT_EQ(test, bias_end, mm_size);
KUNIT_ASSERT_TRUE_MSG(test,
drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
ps, ps,
&allocated,
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc passed with bias(%x-%x), size=%u\n",
bias_start, bias_end, ps);
drm_buddy_free_list(&mm, &allocated);
drm_buddy_fini(&mm);
}
static void drm_test_buddy_alloc_contiguous(struct kunit *test)
{
const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
@ -362,17 +567,30 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_fini(&mm);
}
static int drm_buddy_suite_init(struct kunit_suite *suite)
{
while (!random_seed)
random_seed = get_random_u32();
kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
random_seed);
return 0;
}
static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_limit),
KUNIT_CASE(drm_test_buddy_alloc_optimistic),
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
KUNIT_CASE(drm_test_buddy_alloc_pathological),
KUNIT_CASE(drm_test_buddy_alloc_contiguous),
KUNIT_CASE(drm_test_buddy_alloc_range_bias),
{}
};
static struct kunit_suite drm_buddy_test_suite = {
.name = "drm_buddy",
.suite_init = drm_buddy_suite_init,
.test_cases = drm_buddy_tests,
};

View File

@ -28,6 +28,14 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
[XE_PL_SYSTEM] = "system",
[XE_PL_TT] = "gtt",
[XE_PL_VRAM0] = "vram0",
[XE_PL_VRAM1] = "vram1",
[XE_PL_STOLEN] = "stolen"
};
static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
@ -713,8 +721,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
migrate = xe->tiles[0].migrate;
xe_assert(xe, migrate);
trace_xe_bo_move(bo);
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
xe_device_mem_access_get(xe);
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {

View File

@ -243,6 +243,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo);
int xe_bo_restore_pinned(struct xe_bo *bo);
extern struct ttm_device_funcs xe_ttm_funcs;
extern const char *const xe_mem_type_to_name[];
int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);

View File

@ -131,14 +131,6 @@ static void bo_meminfo(struct xe_bo *bo,
static void show_meminfo(struct drm_printer *p, struct drm_file *file)
{
static const char *const mem_type_to_name[TTM_NUM_MEM_TYPES] = {
[XE_PL_SYSTEM] = "system",
[XE_PL_TT] = "gtt",
[XE_PL_VRAM0] = "vram0",
[XE_PL_VRAM1] = "vram1",
[4 ... 6] = NULL,
[XE_PL_STOLEN] = "stolen"
};
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {};
struct xe_file *xef = file->driver_priv;
struct ttm_device *bdev = &xef->xe->ttm;
@ -171,7 +163,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
spin_unlock(&client->bos_lock);
for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) {
if (!mem_type_to_name[mem_type])
if (!xe_mem_type_to_name[mem_type])
continue;
man = ttm_manager_type(bdev, mem_type);
@ -182,7 +174,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
DRM_GEM_OBJECT_RESIDENT |
(mem_type != XE_PL_SYSTEM ? 0 :
DRM_GEM_OBJECT_PURGEABLE),
mem_type_to_name[mem_type]);
xe_mem_type_to_name[mem_type]);
}
}
}

View File

@ -309,85 +309,6 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
return q->ops->set_timeslice(q, value);
}
static int exec_queue_set_preemption_timeout(struct xe_device *xe,
struct xe_exec_queue *q, u64 value,
bool create)
{
u32 min = 0, max = 0;
xe_exec_queue_get_prop_minmax(q->hwe->eclass,
XE_EXEC_QUEUE_PREEMPT_TIMEOUT, &min, &max);
if (xe_exec_queue_enforce_schedule_limit() &&
!xe_hw_engine_timeout_in_range(value, min, max))
return -EINVAL;
return q->ops->set_preempt_timeout(q, value);
}
static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
u32 min = 0, max = 0;
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
xe_exec_queue_get_prop_minmax(q->hwe->eclass,
XE_EXEC_QUEUE_JOB_TIMEOUT, &min, &max);
if (xe_exec_queue_enforce_schedule_limit() &&
!xe_hw_engine_timeout_in_range(value, min, max))
return -EINVAL;
return q->ops->set_job_timeout(q, value);
}
static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
return -EINVAL;
q->usm.acc_trigger = value;
return 0;
}
static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
return -EINVAL;
q->usm.acc_notify = value;
return 0;
}
static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
return -EINVAL;
if (value > DRM_XE_ACC_GRANULARITY_64M)
return -EINVAL;
q->usm.acc_granularity = value;
return 0;
}
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
u64 value, bool create);
@ -395,11 +316,6 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
};
static int exec_queue_user_ext_set_property(struct xe_device *xe,
@ -418,7 +334,9 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
if (XE_IOCTL_DBG(xe, ext.property >=
ARRAY_SIZE(exec_queue_set_property_funcs)) ||
XE_IOCTL_DBG(xe, ext.pad))
XE_IOCTL_DBG(xe, ext.pad) ||
XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));

View File

@ -150,16 +150,6 @@ struct xe_exec_queue {
spinlock_t lock;
} compute;
/** @usm: unified shared memory state */
struct {
/** @acc_trigger: access counter trigger */
u32 acc_trigger;
/** @acc_notify: access counter notify */
u32 acc_notify;
/** @acc_granularity: access counter granularity */
u32 acc_granularity;
} usm;
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;

View File

@ -212,7 +212,7 @@ static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
{
struct xe_execlist_port *port = exl->port;
enum xe_exec_queue_priority priority = exl->active_priority;
enum xe_exec_queue_priority priority = exl->q->sched_props.priority;
XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
XE_WARN_ON(priority < 0);

View File

@ -247,6 +247,14 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
xe_gt_assert(gt, vma);
/* Execlists not supported */
if (gt_to_xe(gt)->info.force_execlist) {
if (fence)
__invalidation_fence_signal(fence);
return 0;
}
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
if (!xe->info.has_range_tlb_invalidation) {
@ -317,6 +325,10 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
struct drm_printer p = drm_err_printer(__func__);
int ret;
/* Execlists not supported */
if (gt_to_xe(gt)->info.force_execlist)
return 0;
/*
* XXX: See above, this algorithm only works if seqno are always in
* order

View File

@ -682,8 +682,6 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
#define PVC_CTX_ASID (0x2e + 1)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
#define ACC_GRANULARITY_S 20
#define ACC_NOTIFY_S 16
int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
@ -754,13 +752,7 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_write_ctx_reg(lrc, CTX_RING_CTL,
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
if (xe->info.has_asid && vm)
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
(q->usm.acc_granularity <<
ACC_GRANULARITY_S) | vm->usm.asid);
if (xe->info.has_usm && vm)
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
(q->usm.acc_notify << ACC_NOTIFY_S) |
q->usm.acc_trigger);
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
lrc->desc = LRC_VALID;
lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;

View File

@ -105,7 +105,7 @@ static void xe_resize_vram_bar(struct xe_device *xe)
pci_bus_for_each_resource(root, root_res, i) {
if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
root_res->start > 0x100000000ull)
(u64)root_res->start > 0x100000000ul)
break;
}

View File

@ -19,7 +19,7 @@
#include "xe_macros.h"
#include "xe_sched_job_types.h"
struct user_fence {
struct xe_user_fence {
struct xe_device *xe;
struct kref refcount;
struct dma_fence_cb cb;
@ -27,31 +27,32 @@ struct user_fence {
struct mm_struct *mm;
u64 __user *addr;
u64 value;
int signalled;
};
static void user_fence_destroy(struct kref *kref)
{
struct user_fence *ufence = container_of(kref, struct user_fence,
struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence,
refcount);
mmdrop(ufence->mm);
kfree(ufence);
}
static void user_fence_get(struct user_fence *ufence)
static void user_fence_get(struct xe_user_fence *ufence)
{
kref_get(&ufence->refcount);
}
static void user_fence_put(struct user_fence *ufence)
static void user_fence_put(struct xe_user_fence *ufence)
{
kref_put(&ufence->refcount, user_fence_destroy);
}
static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
u64 value)
static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
u64 value)
{
struct user_fence *ufence;
struct xe_user_fence *ufence;
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
if (!ufence)
@ -69,7 +70,7 @@ static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
static void user_fence_worker(struct work_struct *w)
{
struct user_fence *ufence = container_of(w, struct user_fence, worker);
struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
if (mmget_not_zero(ufence->mm)) {
kthread_use_mm(ufence->mm);
@ -80,10 +81,11 @@ static void user_fence_worker(struct work_struct *w)
}
wake_up_all(&ufence->xe->ufence_wq);
WRITE_ONCE(ufence->signalled, 1);
user_fence_put(ufence);
}
static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence)
{
INIT_WORK(&ufence->worker, user_fence_worker);
queue_work(ufence->xe->ordered_wq, &ufence->worker);
@ -92,7 +94,7 @@ static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct user_fence *ufence = container_of(cb, struct user_fence, cb);
struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb);
kick_ufence(ufence, fence);
}
@ -340,3 +342,39 @@ err_out:
return ERR_PTR(-ENOMEM);
}
/**
* xe_sync_ufence_get() - Get user fence from sync
* @sync: input sync
*
* Get a user fence reference from sync.
*
* Return: xe_user_fence pointer with reference
*/
struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync)
{
user_fence_get(sync->ufence);
return sync->ufence;
}
/**
* xe_sync_ufence_put() - Put user fence reference
* @ufence: user fence reference
*
*/
void xe_sync_ufence_put(struct xe_user_fence *ufence)
{
user_fence_put(ufence);
}
/**
* xe_sync_ufence_get_status() - Get user fence status
* @ufence: user fence
*
* Return: 1 if signalled, 0 not signalled, <0 on error
*/
int xe_sync_ufence_get_status(struct xe_user_fence *ufence)
{
return READ_ONCE(ufence->signalled);
}

View File

@ -38,4 +38,8 @@ static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
return !!sync->ufence;
}
struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
void xe_sync_ufence_put(struct xe_user_fence *ufence);
int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
#endif

View File

@ -18,7 +18,7 @@ struct xe_sync_entry {
struct drm_syncobj *syncobj;
struct dma_fence *fence;
struct dma_fence_chain *chain_fence;
struct user_fence *ufence;
struct xe_user_fence *ufence;
u64 addr;
u64 timeline_value;
u32 type;

View File

@ -12,6 +12,7 @@
#include <linux/tracepoint.h>
#include <linux/types.h>
#include "xe_bo.h"
#include "xe_bo_types.h"
#include "xe_exec_queue_types.h"
#include "xe_gpu_scheduler_types.h"
@ -26,16 +27,16 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
TP_ARGS(fence),
TP_STRUCT__entry(
__field(u64, fence)
__field(struct xe_gt_tlb_invalidation_fence *, fence)
__field(int, seqno)
),
TP_fast_assign(
__entry->fence = (u64)fence;
__entry->fence = fence;
__entry->seqno = fence->seqno;
),
TP_printk("fence=0x%016llx, seqno=%d",
TP_printk("fence=%p, seqno=%d",
__entry->fence, __entry->seqno)
);
@ -82,16 +83,16 @@ DECLARE_EVENT_CLASS(xe_bo,
TP_STRUCT__entry(
__field(size_t, size)
__field(u32, flags)
__field(u64, vm)
__field(struct xe_vm *, vm)
),
TP_fast_assign(
__entry->size = bo->size;
__entry->flags = bo->flags;
__entry->vm = (unsigned long)bo->vm;
__entry->vm = bo->vm;
),
TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx",
TP_printk("size=%zu, flags=0x%02x, vm=%p",
__entry->size, __entry->flags, __entry->vm)
);
@ -100,9 +101,31 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
TP_ARGS(bo)
);
DEFINE_EVENT(xe_bo, xe_bo_move,
TP_PROTO(struct xe_bo *bo),
TP_ARGS(bo)
TRACE_EVENT(xe_bo_move,
TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
bool move_lacks_source),
TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
TP_STRUCT__entry(
__field(struct xe_bo *, bo)
__field(size_t, size)
__field(u32, new_placement)
__field(u32, old_placement)
__array(char, device_id, 12)
__field(bool, move_lacks_source)
),
TP_fast_assign(
__entry->bo = bo;
__entry->size = bo->size;
__entry->new_placement = new_placement;
__entry->old_placement = old_placement;
strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
__entry->move_lacks_source = move_lacks_source;
),
TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
__entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
xe_mem_type_to_name[__entry->old_placement],
xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
);
DECLARE_EVENT_CLASS(xe_exec_queue,
@ -327,16 +350,16 @@ DECLARE_EVENT_CLASS(xe_hw_fence,
TP_STRUCT__entry(
__field(u64, ctx)
__field(u32, seqno)
__field(u64, fence)
__field(struct xe_hw_fence *, fence)
),
TP_fast_assign(
__entry->ctx = fence->dma.context;
__entry->seqno = fence->dma.seqno;
__entry->fence = (unsigned long)fence;
__entry->fence = fence;
),
TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u",
TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
__entry->ctx, __entry->fence, __entry->seqno)
);
@ -365,7 +388,7 @@ DECLARE_EVENT_CLASS(xe_vma,
TP_ARGS(vma),
TP_STRUCT__entry(
__field(u64, vma)
__field(struct xe_vma *, vma)
__field(u32, asid)
__field(u64, start)
__field(u64, end)
@ -373,14 +396,14 @@ DECLARE_EVENT_CLASS(xe_vma,
),
TP_fast_assign(
__entry->vma = (unsigned long)vma;
__entry->vma = vma;
__entry->asid = xe_vma_vm(vma)->usm.asid;
__entry->start = xe_vma_start(vma);
__entry->end = xe_vma_end(vma) - 1;
__entry->ptr = xe_vma_userptr(vma);
),
TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
__entry->vma, __entry->asid, __entry->start,
__entry->end, __entry->ptr)
)
@ -465,16 +488,16 @@ DECLARE_EVENT_CLASS(xe_vm,
TP_ARGS(vm),
TP_STRUCT__entry(
__field(u64, vm)
__field(struct xe_vm *, vm)
__field(u32, asid)
),
TP_fast_assign(
__entry->vm = (unsigned long)vm;
__entry->vm = vm;
__entry->asid = vm->usm.asid;
),
TP_printk("vm=0x%016llx, asid=0x%05x", __entry->vm,
TP_printk("vm=%p, asid=0x%05x", __entry->vm,
__entry->asid)
);

View File

@ -897,6 +897,11 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
struct xe_device *xe = vm->xe;
bool read_only = xe_vma_read_only(vma);
if (vma->ufence) {
xe_sync_ufence_put(vma->ufence);
vma->ufence = NULL;
}
if (xe_vma_is_userptr(vma)) {
struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
@ -1608,6 +1613,16 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
trace_xe_vma_unbind(vma);
if (vma->ufence) {
struct xe_user_fence * const f = vma->ufence;
if (!xe_sync_ufence_get_status(f))
return ERR_PTR(-EBUSY);
vma->ufence = NULL;
xe_sync_ufence_put(f);
}
if (number_tiles > 1) {
fences = kmalloc_array(number_tiles, sizeof(*fences),
GFP_KERNEL);
@ -1741,6 +1756,21 @@ err_fences:
return ERR_PTR(err);
}
static struct xe_user_fence *
find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
{
unsigned int i;
for (i = 0; i < num_syncs; i++) {
struct xe_sync_entry *e = &syncs[i];
if (xe_sync_is_ufence(e))
return xe_sync_ufence_get(e);
}
return NULL;
}
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
u32 num_syncs, bool immediate, bool first_op,
@ -1748,9 +1778,16 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
{
struct dma_fence *fence;
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
struct xe_user_fence *ufence;
xe_vm_assert_held(vm);
ufence = find_ufence_get(syncs, num_syncs);
if (vma->ufence && ufence)
xe_sync_ufence_put(vma->ufence);
vma->ufence = ufence ?: vma->ufence;
if (immediate) {
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
last_op);
@ -2117,10 +2154,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.immediate =
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
op->map.read_only =
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.pat_index = pat_index;
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
@ -2313,8 +2346,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
flags |= op->map.read_only ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
@ -2445,7 +2476,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
op->syncs, op->num_syncs,
op->map.immediate || !xe_vm_in_fault_mode(vm),
!xe_vm_in_fault_mode(vm),
op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST);
break;
@ -2720,14 +2751,11 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
return 0;
}
#define SUPPORTED_FLAGS \
(DRM_XE_VM_BIND_FLAG_READONLY | \
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL)
#define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE)
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
#define MAX_BINDS 512 /* FIXME: Picking random upper limit */
static int vm_bind_ioctl_check_args(struct xe_device *xe,
struct drm_xe_vm_bind *args,
struct drm_xe_vm_bind_op **bind_ops)
@ -2739,16 +2767,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
if (args->num_binds > 1) {
u64 __user *bind_user =
u64_to_user_ptr(args->vector_of_binds);
*bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
args->num_binds, GFP_KERNEL);
*bind_ops = kvmalloc_array(args->num_binds,
sizeof(struct drm_xe_vm_bind_op),
GFP_KERNEL | __GFP_ACCOUNT);
if (!*bind_ops)
return -ENOMEM;
@ -2838,7 +2866,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
free_bind_ops:
if (args->num_binds > 1)
kfree(*bind_ops);
kvfree(*bind_ops);
return err;
}
@ -2926,13 +2954,15 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
if (args->num_binds) {
bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL);
bos = kvcalloc(args->num_binds, sizeof(*bos),
GFP_KERNEL | __GFP_ACCOUNT);
if (!bos) {
err = -ENOMEM;
goto release_vm_lock;
}
ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL);
ops = kvcalloc(args->num_binds, sizeof(*ops),
GFP_KERNEL | __GFP_ACCOUNT);
if (!ops) {
err = -ENOMEM;
goto release_vm_lock;
@ -3073,10 +3103,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; bos && i < args->num_binds; ++i)
xe_bo_put(bos[i]);
kfree(bos);
kfree(ops);
kvfree(bos);
kvfree(ops);
if (args->num_binds > 1)
kfree(bind_ops);
kvfree(bind_ops);
return err;
@ -3100,10 +3130,10 @@ put_exec_queue:
if (q)
xe_exec_queue_put(q);
free_objs:
kfree(bos);
kfree(ops);
kvfree(bos);
kvfree(ops);
if (args->num_binds > 1)
kfree(bind_ops);
kvfree(bind_ops);
return err;
}

View File

@ -19,6 +19,7 @@
struct xe_bo;
struct xe_sync_entry;
struct xe_user_fence;
struct xe_vm;
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
@ -104,6 +105,12 @@ struct xe_vma {
* @pat_index: The pat index to use when encoding the PTEs for this vma.
*/
u16 pat_index;
/**
* @ufence: The user fence that was provided with MAP.
* Needs to be signalled before UNMAP can be processed.
*/
struct xe_user_fence *ufence;
};
/**
@ -288,10 +295,6 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
/** @immediate: Immediate bind */
bool immediate;
/** @read_only: Read only */
bool read_only;
/** @is_null: is NULL binding */
bool is_null;
/** @pat_index: The pat index to use for this operation. */

View File

@ -169,6 +169,7 @@ static const struct host1x_info host1x06_info = {
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
.reserve_vblank_syncpts = false,
.skip_reset_assert = true,
};
static const struct host1x_sid_entry tegra194_sid_table[] = {
@ -680,13 +681,15 @@ static int __maybe_unused host1x_runtime_suspend(struct device *dev)
host1x_intr_stop(host);
host1x_syncpt_save(host);
err = reset_control_bulk_assert(host->nresets, host->resets);
if (err) {
dev_err(dev, "failed to assert reset: %d\n", err);
goto resume_host1x;
}
if (!host->info->skip_reset_assert) {
err = reset_control_bulk_assert(host->nresets, host->resets);
if (err) {
dev_err(dev, "failed to assert reset: %d\n", err);
goto resume_host1x;
}
usleep_range(1000, 2000);
usleep_range(1000, 2000);
}
clk_disable_unprepare(host->clk);
reset_control_bulk_release(host->nresets, host->resets);

View File

@ -116,6 +116,12 @@ struct host1x_info {
* the display driver disables VBLANK increments.
*/
bool reserve_vblank_syncpts;
/*
* On Tegra186, secure world applications may require access to
* host1x during suspend/resume. To allow this, we need to leave
* host1x not in reset.
*/
bool skip_reset_assert;
};
struct host1x {

View File

@ -265,10 +265,17 @@ static int pmic_glink_probe(struct platform_device *pdev)
pg->client_mask = *match_data;
pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
if (IS_ERR(pg->pdr)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr),
"failed to initialize pdr\n");
return ret;
}
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) {
ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi");
if (ret)
return ret;
goto out_release_pdr_handle;
}
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) {
ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
@ -281,17 +288,11 @@ static int pmic_glink_probe(struct platform_device *pdev)
goto out_release_altmode_aux;
}
pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
if (IS_ERR(pg->pdr)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n");
goto out_release_aux_devices;
}
service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd");
if (IS_ERR(service)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(service),
"failed adding pdr lookup for charger_pd\n");
goto out_release_pdr_handle;
goto out_release_aux_devices;
}
mutex_lock(&__pmic_glink_lock);
@ -300,8 +301,6 @@ static int pmic_glink_probe(struct platform_device *pdev)
return 0;
out_release_pdr_handle:
pdr_handle_release(pg->pdr);
out_release_aux_devices:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
pmic_glink_del_aux_device(pg, &pg->ps_aux);
@ -311,6 +310,8 @@ out_release_altmode_aux:
out_release_ucsi_aux:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
out_release_pdr_handle:
pdr_handle_release(pg->pdr);
return ret;
}

View File

@ -76,7 +76,7 @@ struct pmic_glink_altmode_port {
struct work_struct work;
struct device *bridge;
struct auxiliary_device *bridge;
enum typec_orientation orientation;
u16 svid;
@ -230,7 +230,7 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
else
pmic_glink_altmode_enable_usb(altmode, alt_port);
drm_aux_hpd_bridge_notify(alt_port->bridge,
drm_aux_hpd_bridge_notify(&alt_port->bridge->dev,
alt_port->hpd_state ?
connector_status_connected :
connector_status_disconnected);
@ -454,7 +454,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
alt_port->index = port;
INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
alt_port->bridge = drm_dp_hpd_bridge_register(dev, to_of_node(fwnode));
alt_port->bridge = devm_drm_dp_hpd_bridge_alloc(dev, to_of_node(fwnode));
if (IS_ERR(alt_port->bridge)) {
fwnode_handle_put(fwnode);
return PTR_ERR(alt_port->bridge);
@ -510,6 +510,16 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
}
}
for (port = 0; port < ARRAY_SIZE(altmode->ports); port++) {
alt_port = &altmode->ports[port];
if (!alt_port->bridge)
continue;
ret = devm_drm_dp_hpd_bridge_add(dev, alt_port->bridge);
if (ret)
return ret;
}
altmode->client = devm_pmic_glink_register_client(dev,
altmode->owner_id,
pmic_glink_altmode_callback,

View File

@ -2399,11 +2399,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
int resize, ret, old_userfont, old_width, old_height, old_charcount;
char *old_data = NULL;
u8 *old_data = vc->vc_font.data;
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
if (p->userfont)
old_data = vc->vc_font.data;
vc->vc_font.data = (void *)(p->fontdata = data);
old_userfont = p->userfont;
if ((p->userfont = userfont))
@ -2437,13 +2435,13 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
update_screen(vc);
}
if (old_data && (--REFCOUNT(old_data) == 0))
if (old_userfont && (--REFCOUNT(old_data) == 0))
kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
return 0;
err_out:
p->fontdata = old_data;
vc->vc_font.data = (void *)old_data;
vc->vc_font.data = old_data;
if (userfont) {
p->userfont = old_userfont;

View File

@ -9,6 +9,8 @@
#include <drm/drm_connector.h>
struct auxiliary_device;
#if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE)
int drm_aux_bridge_register(struct device *parent);
#else
@ -19,10 +21,23 @@ static inline int drm_aux_bridge_register(struct device *parent)
#endif
#if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE)
struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np);
int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev);
struct device *drm_dp_hpd_bridge_register(struct device *parent,
struct device_node *np);
void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status);
#else
static inline struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent,
struct device_node *np)
{
return NULL;
}
static inline int devm_drm_dp_hpd_bridge_add(struct auxiliary_device *adev)
{
return 0;
}
static inline struct device *drm_dp_hpd_bridge_register(struct device *parent,
struct device_node *np)
{

View File

@ -831,11 +831,6 @@ struct drm_xe_vm_destroy {
* - %DRM_XE_VM_BIND_OP_PREFETCH
*
* and the @flags can be:
* - %DRM_XE_VM_BIND_FLAG_READONLY
* - %DRM_XE_VM_BIND_FLAG_ASYNC
* - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - Valid on a faulting VM only, do the
* MAP operation immediately rather than deferring the MAP to the page
* fault handler.
* - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
* tables are setup with a special bit which indicates writes are
* dropped and all reads return zero. In the future, the NULL flags
@ -928,9 +923,8 @@ struct drm_xe_vm_bind_op {
/** @op: Bind operation to perform */
__u32 op;
#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
/** @flags: Bind flags */
__u32 flags;
@ -1045,19 +1039,6 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
/* Monitor 128KB contiguous region with 4K sub-granularity */
#define DRM_XE_ACC_GRANULARITY_128K 0
/* Monitor 2MB contiguous region with 64KB sub-granularity */
#define DRM_XE_ACC_GRANULARITY_2M 1
/* Monitor 16MB contiguous region with 512KB sub-granularity */
#define DRM_XE_ACC_GRANULARITY_16M 2
/* Monitor 64MB contiguous region with 2M sub-granularity */
#define DRM_XE_ACC_GRANULARITY_64M 3
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;