drm fixes for 6.8-rc6

fbdev:
 - fix sparc undefined reference
 
 syncobj:
 - fix sync obj fence waiting
 - handle NULL fence in syncobj eventfd code
 
 ttm:
 - fix invalid free
 
 buddy:
 - fix list handling
 - fix 32-bit build
 
 meson:
 - don't remove bridges from other drivers
 
 nouveau:
 - fix build warnings
 - add two minor info parameters
 - add a Kconfig to allow GSP by default on some GPUs
 
 ivpu:
 - allow fw to do initial tile config
 
 i915:
 - fix TV mode
 
 amdgpu:
 - Suspend/resume fixes
 - Backlight error fix
 - DCN 3.5 fixes
 - Misc fixes
 
 xe:
 - Remove support for persistent exec_queues
 - Drop a reduntant sysfs newline printout
 - A three-patch fix for a VM_BIND rebind optimization path
 - Fix a modpost warning on an xe KUNIT module
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmXX7WkACgkQDHTzWXnE
 hr5lBRAAozCmnFJmu1As6oz4Of4aSt+i9/R7raRqxzRvTrIIGVGVN3U4dCCsu0Ah
 +Zi+NBXxXlliW0oO9pNfbhPz8ji27SlzyTtulxO1m3MYk4dUu9Cs/5wGSwLqFfeM
 yDAX1vWOeIhu1etIeMsFu/AhhE+vdr7zK/P8OHsS5hmmEcz043pLvrnLM2RzRN41
 CGOttm3pDeJ1FJFTiItGnSmurwrpt3/gd/tMv0lJlaCAUAZwjM0mQwzi5agGgpuI
 QuHHxz+WKHzQM1IIZKpVsSCbwmpm2S7ljJUpV7NVi4ViZo51ZV54FNaki/Hef9AK
 ypNnb2cf4K7jfJLXmKu4xagam251c/kqxVCHUv7amGQcMhFWHLEvNxWdh+RaQDQz
 /m4SAhe3KpB7gIczVFqZmsxSYjxVGMqQIPVuYBrB/Umm2uOk5CzDdRCpoocXGVyI
 GrQ5KtLx2D82SgtjCOT1ZS7BH2bv1VgFZgGZ/q4CbShK3sqRt2OPOgBsvXfXKnPO
 6kYDQ/C2syl/CUrQ+Gy+QAZsKd7hmpPswozui55MrqI3ccblJEtGM1S6yj24liyH
 g+Xwvl5GwKO/ZdmHAyb5ID4zsG+LsaLQebquG9VE26ja94omz+XGN5Huu7f5B0Lx
 nYeqF3mZr4tH5oDk9GeQUkhzkO/XC4VbrJQH0bFn8G6G2arm5pE=
 =CBvJ
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2024-02-23' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "This is the weekly drm fixes. Non-drivers there is a fbdev/sparc fix,
  syncobj, ttm and buddy fixes.

  On the driver side, ivpu, meson, i915 have a small fix each. Then
  amdgpu and xe have a bunch. Nouveau has some minor uapi additions to
  give userspace some useful info along with a Kconfig change to allow
  the new GSP firmware paths to be used by default on the GPUs it
  supports.

  Seems about the usual amount for this time of release cycle.

  fbdev:
   - fix sparc undefined reference

  syncobj:
   - fix sync obj fence waiting
   - handle NULL fence in syncobj eventfd code

  ttm:
   - fix invalid free

  buddy:
   - fix list handling
   - fix 32-bit build

  meson:
   - don't remove bridges from other drivers

  nouveau:
   - fix build warnings
   - add two minor info parameters
   - add a Kconfig to allow GSP by default on some GPUs

  ivpu:
   - allow fw to do initial tile config

  i915:
   - fix TV mode

  amdgpu:
   - Suspend/resume fixes
   - Backlight error fix
   - DCN 3.5 fixes
   - Misc fixes

  xe:
   - Remove support for persistent exec_queues
   - Drop a reduntant sysfs newline printout
   - A three-patch fix for a VM_BIND rebind optimization path
   - Fix a modpost warning on an xe KUNIT module"

* tag 'drm-fixes-2024-02-23' of git://anongit.freedesktop.org/drm/drm: (27 commits)
  nouveau: add an ioctl to report vram usage
  nouveau: add an ioctl to return vram bar size.
  nouveau/gsp: add kconfig option to enable GSP paths by default
  drm/amdgpu: Fix the runtime resume failure issue
  drm/amd/display: fix null-pointer dereference on edid reading
  drm/amd/display: Fix memory leak in dm_sw_fini()
  drm/amd/display: fix input states translation error for dcn35 & dcn351
  drm/amd/display: Fix potential null pointer dereference in dc_dmub_srv
  drm/amd/display: Only allow dig mapping to pwrseq in new asic
  drm/amd/display: adjust few initialization order in dm
  drm/syncobj: handle NULL fence in syncobj_eventfd_entry_func
  drm/syncobj: call drm_syncobj_fence_add_wait when WAIT_AVAILABLE flag is set
  drm/ttm: Fix an invalid freeing on already freed page in error path
  sparc: Fix undefined reference to fb_is_primary_device
  drm/xe: Fix modpost warning on xe_mocs kunit module
  drm/xe/xe_gt_idle: Drop redundant newline in name
  drm/xe: Return 2MB page size for compact 64k PTEs
  drm/xe: Add XE_VMA_PTE_64K VMA flag
  drm/xe: Fix xe_vma_set_pte_size
  drm/xe/uapi: Remove support for persistent exec_queues
  ...
This commit is contained in:
Linus Torvalds 2024-02-23 09:17:47 -08:00
commit 06b7ef70b1
40 changed files with 183 additions and 190 deletions

View File

@ -60,7 +60,7 @@ libs-y += arch/sparc/prom/
libs-y += arch/sparc/lib/
drivers-$(CONFIG_PM) += arch/sparc/power/
drivers-$(CONFIG_FB) += arch/sparc/video/
drivers-$(CONFIG_FB_CORE) += arch/sparc/video/
boot := arch/sparc/boot

View File

@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_FB) += fbdev.o
obj-$(CONFIG_FB_CORE) += fbdev.o

View File

@ -24,7 +24,7 @@
#define SKU_HW_ID_SHIFT 16u
#define SKU_HW_ID_MASK 0xffff0000u
#define PLL_CONFIG_DEFAULT 0x1
#define PLL_CONFIG_DEFAULT 0x0
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
#define PLL_REF_CLK_FREQ (50 * 1000000)

View File

@ -1528,6 +1528,9 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
*/
void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
{
if (adev->in_runpm)
return;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))

View File

@ -1843,21 +1843,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub aux callback");
goto error;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error;
}
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
* It is expected that DMUB will resend any pending notifications at this point, for
* example HPD from DPIA.
*/
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
* It is expected that DMUB will resend any pending notifications at this point. Note
* that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
* align legacy interface initialization sequence. Connection status will be proactivly
* detected once in the amdgpu_dm_initialize_drm_device.
*/
dc_enable_dmub_outbox(adev->dm.dc);
/* DPIA trace goes to dmesg logs only if outbox is enabled */
@ -2287,6 +2278,7 @@ static int dm_sw_fini(void *handle)
if (adev->dm.dmub_srv) {
dmub_srv_destroy(adev->dm.dmub_srv);
kfree(adev->dm.dmub_srv);
adev->dm.dmub_srv = NULL;
}
@ -3536,6 +3528,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
}
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
@ -3564,10 +3564,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
handle_hpd_rx_irq,
(void *) aconnector);
}
if (adev->dm.hpd_rx_offload_wq)
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
aconnector;
}
}
@ -4561,6 +4557,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
if (dm->hpd_rx_offload_wq)
dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
aconnector;
if (!dc_link_detect_connection_type(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
@ -6534,10 +6534,15 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
struct edid *edid;
struct i2c_adapter *ddc;
if (dc_link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
else
ddc = &aconnector->i2c->base;
/*
* Note: drm_get_edid gets edid in the following order:
@ -6545,7 +6550,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
* 2) firmware EDID if set via edid_firmware module parameter
* 3) regular DDC read.
*/
edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
edid = drm_get_edid(connector, ddc);
if (!edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
@ -6586,12 +6591,18 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base);
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
struct edid *edid;
struct i2c_adapter *ddc;
if (dc_link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
else
ddc = &aconnector->i2c->base;
/*
* Note: drm_get_edid gets edid in the following order:
@ -6599,7 +6610,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
* 2) firmware EDID if set via edid_firmware module parameter
* 3) regular DDC read.
*/
edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
edid = drm_get_edid(connector, ddc);
if (!edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;

View File

@ -125,7 +125,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
struct dc_context *dc_ctx;
struct dmub_srv *dmub;
enum dmub_status status;
int i;
@ -133,6 +133,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
dc_ctx = dc_dmub_srv->ctx;
dmub = dc_dmub_srv->dmub;
for (i = 0 ; i < count; i++) {
@ -1161,7 +1162,7 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con
bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
struct dc_context *dc_ctx;
enum dmub_status status;
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
@ -1170,6 +1171,8 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
return true;
dc_ctx = dc_dmub_srv->ctx;
if (wait) {
if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
do {

View File

@ -290,4 +290,5 @@ void dce_panel_cntl_construct(
dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs;
dce_panel_cntl->base.ctx = init_data->ctx;
dce_panel_cntl->base.inst = init_data->inst;
dce_panel_cntl->base.pwrseq_inst = 0;
}

View File

@ -215,4 +215,5 @@ void dcn301_panel_cntl_construct(
dcn301_panel_cntl->base.funcs = &dcn301_link_panel_cntl_funcs;
dcn301_panel_cntl->base.ctx = init_data->ctx;
dcn301_panel_cntl->base.inst = init_data->inst;
dcn301_panel_cntl->base.pwrseq_inst = 0;
}

View File

@ -154,8 +154,24 @@ void dcn31_panel_cntl_construct(
struct dcn31_panel_cntl *dcn31_panel_cntl,
const struct panel_cntl_init_data *init_data)
{
uint8_t pwrseq_inst = 0xF;
dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
dcn31_panel_cntl->base.ctx = init_data->ctx;
dcn31_panel_cntl->base.inst = init_data->inst;
dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst;
switch (init_data->eng_id) {
case ENGINE_ID_DIGA:
pwrseq_inst = 0;
break;
case ENGINE_ID_DIGB:
pwrseq_inst = 1;
break;
default:
DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id);
ASSERT(false);
break;
}
dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
}

View File

@ -398,7 +398,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
/* Copy clocks tables entries, if available */
if (dml2->config.bbox_overrides.clks_table.num_states) {
p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {
p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
}
@ -437,6 +436,14 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
dml2_policy_build_synthetic_soc_states(s, p);
if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
// Override last out_state with data from last in_state
// This will ensure that out_state contains max fclk
memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
&p->in_states->state_array[p->in_states->num_states - 1],
sizeof(struct soc_state_bounding_box_st));
}
}
void dml2_translate_ip_params(const struct dc *in, struct ip_params_st *out)

View File

@ -57,7 +57,7 @@ struct panel_cntl_funcs {
struct panel_cntl_init_data {
struct dc_context *ctx;
uint32_t inst;
uint32_t pwrseq_inst;
uint32_t eng_id;
};
struct panel_cntl {

View File

@ -370,30 +370,6 @@ static enum transmitter translate_encoder_to_transmitter(
}
}
static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link)
{
uint8_t pwrseq_inst = 0xF;
struct dc_context *dc_ctx = link->dc->ctx;
DC_LOGGER_INIT(dc_ctx->logger);
switch (link->eng_id) {
case ENGINE_ID_DIGA:
pwrseq_inst = 0;
break;
case ENGINE_ID_DIGB:
pwrseq_inst = 1;
break;
default:
DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id);
ASSERT(false);
break;
}
return pwrseq_inst;
}
static void link_destruct(struct dc_link *link)
{
int i;
@ -657,7 +633,7 @@ static bool construct_phy(struct dc_link *link,
link->link_id.id == CONNECTOR_ID_LVDS)) {
panel_cntl_init_data.ctx = dc_ctx;
panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count;
panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link);
panel_cntl_init_data.eng_id = link->eng_id;
link->panel_cntl =
link->dc->res_pool->funcs->panel_cntl_create(
&panel_cntl_init_data);

View File

@ -538,13 +538,13 @@ static int __alloc_range(struct drm_buddy *mm,
list_add(&block->left->tmp_link, dfs);
} while (1);
list_splice_tail(&allocated, blocks);
if (total_allocated < size) {
err = -ENOSPC;
goto err_free;
}
list_splice_tail(&allocated, blocks);
return 0;
err_undo:

View File

@ -1040,7 +1040,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint64_t *points;
uint32_t signaled_count, i;
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
lockdep_assert_none_held_once();
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
@ -1109,7 +1110,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
* fallthough and try a 0 timeout wait!
*/
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
for (i = 0; i < count; ++i)
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
@ -1416,10 +1418,21 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
/* This happens inside the syncobj lock */
fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
if (!fence)
return;
ret = dma_fence_chain_find_seqno(&fence, entry->point);
if (ret != 0 || !fence) {
if (ret != 0) {
/* The given seqno has not been submitted yet. */
dma_fence_put(fence);
return;
} else if (!fence) {
/* If dma_fence_chain_find_seqno returns 0 but sets the fence
* to NULL, it implies that the given seqno is signaled and a
* later seqno has already been submitted. Assign a stub fence
* so that the eventfd still gets signaled below.
*/
fence = dma_fence_get_stub();
}
list_del_init(&entry->node);

View File

@ -1209,7 +1209,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_tv_format format;
u32 format_map;
format_map = 1 << conn_state->tv.mode;
format_map = 1 << conn_state->tv.legacy_mode;
memset(&format, 0, sizeof(format));
memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
@ -2298,7 +2298,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
* Read the list of supported input resolutions for the selected TV
* format.
*/
format_map = 1 << conn_state->tv.mode;
format_map = 1 << conn_state->tv.legacy_mode;
memcpy(&tv_res, &format_map,
min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
@ -2363,7 +2363,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
int i;
for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) {
*val = i;
return 0;
@ -2419,7 +2419,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
if (property == intel_sdvo_connector->tv_format) {
state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val];
if (state->crtc) {
struct drm_crtc_state *crtc_state =
@ -3076,7 +3076,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
drm_property_add_enum(intel_sdvo_connector->tv_format, i,
tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0];
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;

View File

@ -949,7 +949,7 @@ intel_disable_tv(struct intel_atomic_state *state,
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
{
int format = conn_state->tv.mode;
int format = conn_state->tv.legacy_mode;
return &tv_modes[format];
}
@ -1704,7 +1704,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
break;
}
connector->state->tv.mode = i;
connector->state->tv.legacy_mode = i;
}
static int
@ -1859,7 +1859,7 @@ static int intel_tv_atomic_check(struct drm_connector *connector,
old_state = drm_atomic_get_old_connector_state(state, connector);
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
if (old_state->tv.mode != new_state->tv.mode ||
if (old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
old_state->tv.margins.left != new_state->tv.margins.left ||
old_state->tv.margins.right != new_state->tv.margins.right ||
old_state->tv.margins.top != new_state->tv.margins.top ||
@ -1896,7 +1896,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
conn_state->tv.margins.right = 46;
conn_state->tv.margins.bottom = 37;
conn_state->tv.mode = 0;
conn_state->tv.legacy_mode = 0;
/* Create TV properties then attach current values */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
@ -1910,7 +1910,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
drm_object_attach_property(&connector->base,
i915->drm.mode_config.legacy_tv_mode_property,
conn_state->tv.mode);
conn_state->tv.legacy_mode);
drm_object_attach_property(&connector->base,
i915->drm.mode_config.tv_left_margin_property,
conn_state->tv.margins.left);

View File

@ -294,6 +294,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_CVBS]) {
meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
drm_bridge_remove(&meson_encoder_cvbs->bridge);
drm_bridge_remove(meson_encoder_cvbs->next_bridge);
}
}

View File

@ -168,6 +168,5 @@ void meson_encoder_dsi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_DSI]) {
meson_encoder_dsi = priv->encoders[MESON_ENC_DSI];
drm_bridge_remove(&meson_encoder_dsi->bridge);
drm_bridge_remove(meson_encoder_dsi->next_bridge);
}
}

View File

@ -474,6 +474,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_HDMI]) {
meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
drm_bridge_remove(&meson_encoder_hdmi->bridge);
drm_bridge_remove(meson_encoder_hdmi->next_bridge);
}
}

View File

@ -100,3 +100,11 @@ config DRM_NOUVEAU_SVM
help
Say Y here if you want to enable experimental support for
Shared Virtual Memory (SVM).
config DRM_NOUVEAU_GSP_DEFAULT
bool "Use GSP firmware for Turing/Ampere (needs firmware installed)"
depends on DRM_NOUVEAU
default n
help
Say Y here if you want to use the GSP codepaths by default on
Turing and Ampere GPUs.

View File

@ -199,6 +199,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@ -263,6 +264,14 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
break;
}
case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
break;
case NOUVEAU_GETPARAM_VRAM_USED: {
struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
getparam->value = (u64)ttm_resource_manager_usage(vram_mgr) << PAGE_SHIFT;
break;
}
default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;

View File

@ -168,12 +168,11 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
rm->flush = r535_bar_flush;
ret = gf100_bar_new_(rm, device, type, inst, &bar);
*pbar = bar;
if (ret) {
if (!bar)
kfree(rm);
kfree(rm);
return ret;
}
*pbar = bar;
bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
if (!bar->flushBAR2PhysMode)

View File

@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
return (void *)fw;
}
static void
shadow_fw_release(void *fw)
{
release_firmware(fw);
}
static const struct nvbios_source
shadow_fw = {
.name = "firmware",
.init = shadow_fw_init,
.fini = (void(*)(void *))release_firmware,
.fini = shadow_fw_release,
.read = shadow_fw_read,
.rw = false,
};

View File

@ -2312,8 +2312,12 @@ r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
{
struct nvkm_subdev *subdev = &gsp->subdev;
int ret;
bool enable_gsp = fwif->enable;
if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable))
#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
enable_gsp = true;
#endif
if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
return -EINVAL;
if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||

View File

@ -55,30 +55,30 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
KUNIT_ASSERT_FALSE_MSG(test,
drm_buddy_alloc_blocks(&mm, 0, mm_size,
ps, ps, list, 0),
"buddy_alloc hit an error size=%d\n",
"buddy_alloc hit an error size=%u\n",
ps);
} while (++i < n_pages);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
"buddy_alloc didn't error size=%d\n", 3 * ps);
"buddy_alloc didn't error size=%u\n", 3 * ps);
drm_buddy_free_list(&mm, &middle);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
"buddy_alloc didn't error size=%llu\n", 3 * ps);
"buddy_alloc didn't error size=%u\n", 3 * ps);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
2 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
"buddy_alloc didn't error size=%llu\n", 2 * ps);
"buddy_alloc didn't error size=%u\n", 2 * ps);
drm_buddy_free_list(&mm, &right);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
"buddy_alloc didn't error size=%llu\n", 3 * ps);
"buddy_alloc didn't error size=%u\n", 3 * ps);
/*
* At this point we should have enough contiguous space for 2 blocks,
* however they are never buddies (since we freed middle and right) so
@ -87,13 +87,13 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
2 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
"buddy_alloc hit an error size=%d\n", 2 * ps);
"buddy_alloc hit an error size=%u\n", 2 * ps);
drm_buddy_free_list(&mm, &left);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
"buddy_alloc hit an error size=%d\n", 3 * ps);
"buddy_alloc hit an error size=%u\n", 3 * ps);
total = 0;
list_for_each_entry(block, &allocated, link)

View File

@ -387,7 +387,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
enum ttm_caching caching,
pgoff_t start_page, pgoff_t end_page)
{
struct page **pages = tt->pages;
struct page **pages = &tt->pages[start_page];
unsigned int order;
pgoff_t i, nr;

View File

@ -21,4 +21,5 @@ kunit_test_suite(xe_mocs_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xe_mocs kunit test");
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);

View File

@ -83,9 +83,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
static void device_kill_persistent_exec_queues(struct xe_device *xe,
struct xe_file *xef);
static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
@ -102,8 +99,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
mutex_unlock(&xef->exec_queue.lock);
xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock);
device_kill_persistent_exec_queues(xe, xef);
mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm)
xe_vm_close_and_put(vm);
@ -255,9 +250,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xa_erase(&xe->usm.asid_to_vm, asid);
}
drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock);
INIT_LIST_HEAD(&xe->persistent_engines.list);
spin_lock_init(&xe->pinned.lock);
INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
INIT_LIST_HEAD(&xe->pinned.external_vram);
@ -570,37 +562,6 @@ void xe_device_shutdown(struct xe_device *xe)
{
}
void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q)
{
mutex_lock(&xe->persistent_engines.lock);
list_add_tail(&q->persistent.link, &xe->persistent_engines.list);
mutex_unlock(&xe->persistent_engines.lock);
}
void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
struct xe_exec_queue *q)
{
mutex_lock(&xe->persistent_engines.lock);
if (!list_empty(&q->persistent.link))
list_del(&q->persistent.link);
mutex_unlock(&xe->persistent_engines.lock);
}
static void device_kill_persistent_exec_queues(struct xe_device *xe,
struct xe_file *xef)
{
struct xe_exec_queue *q, *next;
mutex_lock(&xe->persistent_engines.lock);
list_for_each_entry_safe(q, next, &xe->persistent_engines.list,
persistent.link)
if (q->persistent.xef == xef) {
xe_exec_queue_kill(q);
list_del_init(&q->persistent.link);
}
mutex_unlock(&xe->persistent_engines.lock);
}
void xe_device_wmb(struct xe_device *xe)
{
struct xe_gt *gt = xe_root_mmio_gt(xe);

View File

@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe);
void xe_device_remove(struct xe_device *xe);
void xe_device_shutdown(struct xe_device *xe);
void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q);
void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
struct xe_exec_queue *q);
void xe_device_wmb(struct xe_device *xe);
static inline struct xe_file *to_xe_file(const struct drm_file *file)

View File

@ -341,14 +341,6 @@ struct xe_device {
struct mutex lock;
} usm;
/** @persistent_engines: engines that are closed but still running */
struct {
/** @lock: protects persistent engines */
struct mutex lock;
/** @list: list of persistent engines */
struct list_head list;
} persistent_engines;
/** @pinned: pinned BO state */
struct {
/** @lock: protected pinned BO list state */

View File

@ -60,7 +60,6 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class];
q->ops = gt->exec_queue_ops;
INIT_LIST_HEAD(&q->persistent.link);
INIT_LIST_HEAD(&q->compute.link);
INIT_LIST_HEAD(&q->multi_gt_link);
@ -326,23 +325,6 @@ static int exec_queue_set_preemption_timeout(struct xe_device *xe,
return q->ops->set_preempt_timeout(q, value);
}
static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm)))
return -EINVAL;
if (value)
q->flags |= EXEC_QUEUE_FLAG_PERSISTENT;
else
q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
return 0;
}
static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
@ -414,7 +396,6 @@ static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
@ -441,6 +422,9 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
if (!exec_queue_set_property_funcs[idx])
return -EINVAL;
return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
}
@ -704,9 +688,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
}
q = xe_exec_queue_create(xe, vm, logical_mask,
args->width, hwe,
xe_vm_in_lr_mode(vm) ? 0 :
EXEC_QUEUE_FLAG_PERSISTENT);
args->width, hwe, 0);
up_read(&vm->lock);
xe_vm_put(vm);
if (IS_ERR(q))
@ -728,8 +710,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
goto kill_exec_queue;
}
q->persistent.xef = xef;
mutex_lock(&xef->exec_queue.lock);
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->exec_queue.lock);
@ -872,10 +852,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT))
xe_exec_queue_kill(q);
else
xe_device_add_persistent_exec_queues(xe, q);
xe_exec_queue_kill(q);
trace_xe_exec_queue_close(q);
xe_exec_queue_put(q);

View File

@ -105,16 +105,6 @@ struct xe_exec_queue {
struct xe_guc_exec_queue *guc;
};
/**
* @persistent: persistent exec queue state
*/
struct {
/** @xef: file which this exec queue belongs to */
struct xe_file *xef;
/** @link: link in list of persistent exec queues */
struct list_head link;
} persistent;
union {
/**
* @parallel: parallel submission state

View File

@ -378,8 +378,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags);
if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
xe_device_remove_persistent_exec_queues(xe, q);
drm_sched_entity_fini(&exl->entity);
drm_sched_fini(&exl->sched);
kfree(exl);

View File

@ -145,10 +145,10 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
}
if (xe_gt_is_media_type(gt)) {
sprintf(gtidle->name, "gt%d-mc\n", gt->info.id);
sprintf(gtidle->name, "gt%d-mc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_mc6_residency;
} else {
sprintf(gtidle->name, "gt%d-rc\n", gt->info.id);
sprintf(gtidle->name, "gt%d-rc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_rc6_residency;
}

View File

@ -1028,8 +1028,6 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);

View File

@ -499,10 +499,12 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
* this device *requires* 64K PTE size for VRAM, fail.
*/
if (level == 0 && !xe_parent->is_compact) {
if (xe_pt_is_pte_ps64K(addr, next, xe_walk))
if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
pte |= XE_PTE_PS64;
else if (XE_WARN_ON(xe_walk->needs_64K))
} else if (XE_WARN_ON(xe_walk->needs_64K)) {
return -EINVAL;
}
}
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte);
@ -545,13 +547,16 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
*child = &xe_child->base;
/*
* Prefer the compact pagetable layout for L0 if possible.
* Prefer the compact pagetable layout for L0 if possible. Only
* possible if VMA covers entire 2MB region as compact 64k and
* 4k pages cannot be mixed within a 2MB region.
* TODO: Suballocate the pt bo to avoid wasting a lot of
* memory.
*/
if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 &&
covers && xe_pt_scan_64K(addr, next, xe_walk)) {
walk->shifts = xe_compact_pt_shifts;
xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT;
flags |= XE_PDE_64K;
xe_child->is_compact = true;
}

View File

@ -2190,15 +2190,17 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
{
if (vma->gpuva.flags & XE_VMA_PTE_1G)
return SZ_1G;
else if (vma->gpuva.flags & XE_VMA_PTE_2M)
else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
return SZ_2M;
else if (vma->gpuva.flags & XE_VMA_PTE_64K)
return SZ_64K;
else if (vma->gpuva.flags & XE_VMA_PTE_4K)
return SZ_4K;
return SZ_1G; /* Uninitialized, used max size */
}
static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
{
switch (size) {
case SZ_1G:
@ -2207,9 +2209,13 @@ static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
case SZ_2M:
vma->gpuva.flags |= XE_VMA_PTE_2M;
break;
case SZ_64K:
vma->gpuva.flags |= XE_VMA_PTE_64K;
break;
case SZ_4K:
vma->gpuva.flags |= XE_VMA_PTE_4K;
break;
}
return SZ_4K;
}
static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)

View File

@ -29,6 +29,8 @@ struct xe_vm;
#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
/** struct xe_userptr - User pointer */
struct xe_userptr {

View File

@ -54,6 +54,20 @@ extern "C" {
*/
#define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
/*
* NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size
*
* Query the VRAM BAR size.
*/
#define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
/*
* NOUVEAU_GETPARAM_VRAM_USED
*
* Get remaining VRAM size.
*/
#define NOUVEAU_GETPARAM_VRAM_USED 19
struct drm_nouveau_getparam {
__u64 param;
__u64 value;

View File

@ -1046,7 +1046,6 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6