From 2e6c86be0e57079d1fb6c7c7e5423db096d0548a Mon Sep 17 00:00:00 2001 From: Xiaomeng Tong Date: Sun, 27 Mar 2022 13:53:55 +0800 Subject: [PATCH 001/219] stm: ltdc: fix two incorrect NULL checks on list iterator The two bugs are here: if (encoder) { if (bridge && bridge->timings) The list iterator value 'encoder/bridge' will *always* be set and non-NULL by drm_for_each_encoder()/list_for_each_entry(), so it is incorrect to assume that the iterator value will be NULL if the list is empty or no element is found. To fix the bug, use a new variable '*_iter' as the list iterator, while use the old variable 'encoder/bridge' as a dedicated pointer to point to the found element. Cc: stable@vger.kernel.org Fixes: 99e360442f223 ("drm/stm: Fix bus_flags handling") Signed-off-by: Xiaomeng Tong Acked-by: Raphael Gallais-Pou Signed-off-by: Philippe Cornu Link: https://patchwork.freedesktop.org/patch/msgid/20220327055355.3808-1-xiam0nd.tong@gmail.com --- drivers/gpu/drm/stm/ltdc.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 17cc050207f4..6bd45df8f5a7 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -869,8 +869,8 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc) struct drm_device *ddev = crtc->dev; struct drm_connector_list_iter iter; struct drm_connector *connector = NULL; - struct drm_encoder *encoder = NULL; - struct drm_bridge *bridge = NULL; + struct drm_encoder *encoder = NULL, *en_iter; + struct drm_bridge *bridge = NULL, *br_iter; struct drm_display_mode *mode = &crtc->state->adjusted_mode; u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h; u32 total_width, total_height; @@ -880,15 +880,19 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc) int ret; /* get encoder from crtc */ - drm_for_each_encoder(encoder, ddev) - if (encoder->crtc == crtc) + drm_for_each_encoder(en_iter, ddev) + if (en_iter->crtc == crtc) { + encoder = en_iter; break; + } if (encoder) { /* get bridge from encoder */ - list_for_each_entry(bridge, &encoder->bridge_chain, chain_node) - if (bridge->encoder == encoder) + list_for_each_entry(br_iter, &encoder->bridge_chain, chain_node) + if (br_iter->encoder == encoder) { + bridge = br_iter; break; + } /* Get the connector from encoder */ drm_connector_list_iter_begin(ddev, &iter); From 25d4cb51d6cde1229edb42f24a215f3ef8325393 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 26 Mar 2022 16:51:32 -0700 Subject: [PATCH 002/219] drm: sti: don't use kernel-doc markers Don't mark static functions as kernel-doc. Prevents multiple kernel-doc build warnings: drivers/gpu/drm/sti/sti_hdmi.c:187: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * HDMI interrupt handler threaded drivers/gpu/drm/sti/sti_hdmi.c:219: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * HDMI interrupt handler drivers/gpu/drm/sti/sti_hdmi.c:241: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * Set hdmi active area depending on the drm display mode selected drivers/gpu/drm/sti/sti_hdmi.c:262: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * Overall hdmi configuration drivers/gpu/drm/sti/sti_hdmi.c:340: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * Helper to concatenate infoframe in 32 bits word drivers/gpu/drm/sti/sti_hdmi.c:357: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * Helper to write info frame drivers/gpu/drm/sti/sti_hdmi.c:427: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * Prepare and configure the AVI infoframe drivers/gpu/drm/sti/sti_hdmi.c:470: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * Prepare and configure the AUDIO infoframe drivers/gpu/drm/sti/sti_hdmi.c:555: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * Software reset of the hdmi subsystem Fixes: 5402626c83a2 ("drm: sti: add HDMI driver") Signed-off-by: Randy Dunlap Cc: Aditya Srivastava Cc: Benjamin Gaignard Cc: Alain Volmat Cc: David Airlie Cc: Daniel Vetter Acked-by: Alain Volmat Signed-off-by: Philippe Cornu Link: https://patchwork.freedesktop.org/patch/msgid/20220326235132.25192-1-rdunlap@infradead.org --- drivers/gpu/drm/sti/sti_hdmi.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index f3ace11209dd..78e099c8e805 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -183,7 +183,7 @@ void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset) writel(val, hdmi->regs + offset); } -/** +/* * HDMI interrupt handler threaded * * @irq: irq number @@ -215,7 +215,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg) return IRQ_HANDLED; } -/** +/* * HDMI interrupt handler * * @irq: irq number @@ -237,7 +237,7 @@ static irqreturn_t hdmi_irq(int irq, void *arg) return IRQ_WAKE_THREAD; } -/** +/* * Set hdmi active area depending on the drm display mode selected * * @hdmi: pointer on the hdmi internal structure @@ -258,7 +258,7 @@ static void hdmi_active_area(struct sti_hdmi *hdmi) hdmi_write(hdmi, ymax, HDMI_ACTIVE_VID_YMAX); } -/** +/* * Overall hdmi configuration * * @hdmi: pointer on the hdmi internal structure @@ -336,7 +336,7 @@ static void hdmi_infoframe_reset(struct sti_hdmi *hdmi, hdmi_write(hdmi, 0x0, pack_offset + i); } -/** +/* * Helper to concatenate infoframe in 32 bits word * * @ptr: pointer on the hdmi internal structure @@ -353,7 +353,7 @@ static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size) return value; } -/** +/* * Helper to write info frame * * @hdmi: pointer on the hdmi internal structure @@ -423,7 +423,7 @@ static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, hdmi_write(hdmi, val, HDMI_SW_DI_CFG); } -/** +/* * Prepare and configure the AVI infoframe * * AVI infoframe are transmitted at least once per two video field and @@ -466,7 +466,7 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi) return 0; } -/** +/* * Prepare and configure the AUDIO infoframe * * AUDIO infoframe are transmitted once per frame and @@ -551,7 +551,7 @@ static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi) #define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */ -/** +/* * Software reset of the hdmi subsystem * * @hdmi: pointer on the hdmi internal structure @@ -785,7 +785,7 @@ static void sti_hdmi_disable(struct drm_bridge *bridge) cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID); } -/** +/* * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent * clocks. None-coherent clocks means that audio and TMDS clocks have not the * same source (drifts between clocks). In this case assumption is that CTS is From 15431b110cc5984967646384ea1d47999c677ce3 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 4 Oct 2019 09:27:06 +0100 Subject: [PATCH 003/219] drm: sti: fix spelling mistake: rejec -> rejection In other places of the driver the string hdmi_rejection_pll is used instead of the truncated hdmi_rejec_pll, so use this string instead to be consistent. Signed-off-by: Colin Ian King Signed-off-by: Philippe Cornu Link: https://patchwork.freedesktop.org/patch/msgid/20191004082706.26478-1-colin.king@canonical.com --- drivers/gpu/drm/sti/sti_hdmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 78e099c8e805..b3fbee7eac11 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -892,7 +892,7 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge) if (clk_prepare_enable(hdmi->clk_tmds)) DRM_ERROR("Failed to prepare/enable hdmi_tmds clk\n"); if (clk_prepare_enable(hdmi->clk_phy)) - DRM_ERROR("Failed to prepare/enable hdmi_rejec_pll clk\n"); + DRM_ERROR("Failed to prepare/enable hdmi_rejection_pll clk\n"); hdmi->enabled = true; From 6e87601b7e3e067a6a6c083914e8a109edcded86 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 14 Mar 2022 12:53:40 +0100 Subject: [PATCH 004/219] drm/sti: fix typos in comments Various spelling mistakes in comments. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Reviewed-by: Alain Volmat Signed-off-by: Philippe Cornu Link: https://patchwork.freedesktop.org/patch/msgid/20220314115354.144023-17-Julia.Lawall@inria.fr --- drivers/gpu/drm/sti/sti_gdp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 3db3768a3241..b58415f2e4d8 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -406,7 +406,7 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp) (hw_nvn != gdp->node_list[i].top_field_paddr)) return &gdp->node_list[i]; - /* in hazardious cases restart with the first node */ + /* in hazardous cases restart with the first node */ DRM_ERROR("inconsistent NVN for %s: 0x%08X\n", sti_plane_to_str(&gdp->plane), hw_nvn); From 7bc80a5462c37eab58a9ea386064307c0f447fd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 9 Nov 2021 11:08:18 +0100 Subject: [PATCH 005/219] dma-buf: add enum dma_resv_usage v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change adds the dma_resv_usage enum and allows us to specify why a dma_resv object is queried for its containing fences. Additional to that a dma_resv_usage_rw() helper function is added to aid retrieving the fences for a read or write userspace submission. This is then deployed to the different query functions of the dma_resv object and all of their users. When the write paratermer was previously true we now use DMA_RESV_USAGE_WRITE and DMA_RESV_USAGE_READ otherwise. v2: add KERNEL/OTHER in separate patch v3: some kerneldoc suggestions by Daniel v4: some more kerneldoc suggestions by Daniel, fix missing cases lost in the rebase pointed out by Bas. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-2-christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 6 +- drivers/dma-buf/dma-resv.c | 35 +++++---- drivers/dma-buf/st-dma-resv.c | 48 ++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +- drivers/gpu/drm/drm_gem.c | 3 +- drivers/gpu/drm/drm_gem_atomic_helper.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 6 +- .../gpu/drm/i915/display/intel_atomic_plane.c | 3 +- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_lmem.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_wait.c | 6 +- .../drm/i915/gem/selftests/i915_gem_dmabuf.c | 3 +- drivers/gpu/drm/i915/i915_request.c | 3 +- drivers/gpu/drm/i915/i915_sw_fence.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 3 +- drivers/gpu/drm/nouveau/dispnv50/wndw.c | 3 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 8 +- drivers/gpu/drm/nouveau/nouveau_fence.c | 8 +- drivers/gpu/drm/nouveau/nouveau_gem.c | 3 +- drivers/gpu/drm/panfrost/panfrost_drv.c | 3 +- drivers/gpu/drm/qxl/qxl_debugfs.c | 3 +- drivers/gpu/drm/radeon/radeon_display.c | 3 +- drivers/gpu/drm/radeon/radeon_gem.c | 9 ++- drivers/gpu/drm/radeon/radeon_mn.c | 4 +- drivers/gpu/drm/radeon/radeon_sync.c | 2 +- drivers/gpu/drm/radeon/radeon_uvd.c | 4 +- drivers/gpu/drm/scheduler/sched_main.c | 3 +- drivers/gpu/drm/ttm/ttm_bo.c | 18 +++-- drivers/gpu/drm/vgem/vgem_fence.c | 4 +- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 5 +- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 4 +- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 3 +- drivers/infiniband/core/umem_dmabuf.c | 3 +- include/linux/dma-buf.h | 8 +- include/linux/dma-resv.h | 73 +++++++++++++++---- 46 files changed, 215 insertions(+), 126 deletions(-) diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 775d3afb4169..1cddb65eafda 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -216,7 +216,8 @@ static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, struct dma_fence *fence; int r; - dma_resv_for_each_fence(&cursor, resv, write, fence) { + dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write), + fence) { dma_fence_get(fence); r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); if (!r) @@ -1124,7 +1125,8 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, long ret; /* Wait on any implicit rendering fences */ - ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT); + ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write), + true, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 8c650b96357a..17237e6ee30c 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -384,7 +384,7 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) cursor->seq = read_seqcount_begin(&cursor->obj->seq); cursor->index = -1; cursor->shared_count = 0; - if (cursor->all_fences) { + if (cursor->usage >= DMA_RESV_USAGE_READ) { cursor->fences = dma_resv_shared_list(cursor->obj); if (cursor->fences) cursor->shared_count = cursor->fences->shared_count; @@ -496,7 +496,7 @@ struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor) dma_resv_assert_held(cursor->obj); cursor->index = 0; - if (cursor->all_fences) + if (cursor->usage >= DMA_RESV_USAGE_READ) cursor->fences = dma_resv_shared_list(cursor->obj); else cursor->fences = NULL; @@ -551,7 +551,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) list = NULL; excl = NULL; - dma_resv_iter_begin(&cursor, src, true); + dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ); dma_resv_for_each_fence_unlocked(&cursor, f) { if (dma_resv_iter_is_restarted(&cursor)) { @@ -597,7 +597,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences); * dma_resv_get_fences - Get an object's shared and exclusive * fences without update side lock held * @obj: the reservation object - * @write: true if we should return all fences + * @usage: controls which fences to include, see enum dma_resv_usage. * @num_fences: the number of fences returned * @fences: the array of fence ptrs returned (array is krealloc'd to the * required size, and must be freed by caller) @@ -605,7 +605,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences); * Retrieve all fences from the reservation object. * Returns either zero or -ENOMEM. */ -int dma_resv_get_fences(struct dma_resv *obj, bool write, +int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, unsigned int *num_fences, struct dma_fence ***fences) { struct dma_resv_iter cursor; @@ -614,7 +614,7 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write, *num_fences = 0; *fences = NULL; - dma_resv_iter_begin(&cursor, obj, write); + dma_resv_iter_begin(&cursor, obj, usage); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (dma_resv_iter_is_restarted(&cursor)) { @@ -646,7 +646,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences); /** * dma_resv_get_singleton - Get a single fence for all the fences * @obj: the reservation object - * @write: true if we should return all fences + * @usage: controls which fences to include, see enum dma_resv_usage. * @fence: the resulting fence * * Get a single fence representing all the fences inside the resv object. @@ -658,7 +658,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences); * * Returns 0 on success and negative error values on failure. */ -int dma_resv_get_singleton(struct dma_resv *obj, bool write, +int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, struct dma_fence **fence) { struct dma_fence_array *array; @@ -666,7 +666,7 @@ int dma_resv_get_singleton(struct dma_resv *obj, bool write, unsigned count; int r; - r = dma_resv_get_fences(obj, write, &count, &fences); + r = dma_resv_get_fences(obj, usage, &count, &fences); if (r) return r; @@ -700,7 +700,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton); * dma_resv_wait_timeout - Wait on reservation's objects * shared and/or exclusive fences. * @obj: the reservation object - * @wait_all: if true, wait on all fences, else wait on just exclusive fence + * @usage: controls which fences to include, see enum dma_resv_usage. * @intr: if true, do interruptible wait * @timeout: timeout value in jiffies or zero to return immediately * @@ -710,14 +710,14 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton); * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or * greater than zer on success. */ -long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, - unsigned long timeout) +long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, + bool intr, unsigned long timeout) { long ret = timeout ? timeout : 1; struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_iter_begin(&cursor, obj, wait_all); + dma_resv_iter_begin(&cursor, obj, usage); dma_resv_for_each_fence_unlocked(&cursor, fence) { ret = dma_fence_wait_timeout(fence, intr, ret); @@ -737,8 +737,7 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); * dma_resv_test_signaled - Test if a reservation object's fences have been * signaled. * @obj: the reservation object - * @test_all: if true, test all fences, otherwise only test the exclusive - * fence + * @usage: controls which fences to include, see enum dma_resv_usage. * * Callers are not required to hold specific locks, but maybe hold * dma_resv_lock() already. @@ -747,12 +746,12 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); * * True if all fences signaled, else false. */ -bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) +bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage) { struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_iter_begin(&cursor, obj, test_all); + dma_resv_iter_begin(&cursor, obj, usage); dma_resv_for_each_fence_unlocked(&cursor, fence) { dma_resv_iter_end(&cursor); return false; @@ -775,7 +774,7 @@ void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_for_each_fence(&cursor, obj, true, fence) { + dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) { seq_printf(seq, "\t%s fence:", dma_resv_iter_is_exclusive(&cursor) ? "Exclusive" : "Shared"); diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index d2e61f6ae989..d097981061b1 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -58,7 +58,7 @@ static int sanitycheck(void *arg) return r; } -static int test_signaling(void *arg, bool shared) +static int test_signaling(void *arg, enum dma_resv_usage usage) { struct dma_resv resv; struct dma_fence *f; @@ -81,18 +81,18 @@ static int test_signaling(void *arg, bool shared) goto err_unlock; } - if (shared) + if (usage >= DMA_RESV_USAGE_READ) dma_resv_add_shared_fence(&resv, f); else dma_resv_add_excl_fence(&resv, f); - if (dma_resv_test_signaled(&resv, shared)) { + if (dma_resv_test_signaled(&resv, usage)) { pr_err("Resv unexpectedly signaled\n"); r = -EINVAL; goto err_unlock; } dma_fence_signal(f); - if (!dma_resv_test_signaled(&resv, shared)) { + if (!dma_resv_test_signaled(&resv, usage)) { pr_err("Resv not reporting signaled\n"); r = -EINVAL; goto err_unlock; @@ -107,15 +107,15 @@ err_free: static int test_excl_signaling(void *arg) { - return test_signaling(arg, false); + return test_signaling(arg, DMA_RESV_USAGE_WRITE); } static int test_shared_signaling(void *arg) { - return test_signaling(arg, true); + return test_signaling(arg, DMA_RESV_USAGE_READ); } -static int test_for_each(void *arg, bool shared) +static int test_for_each(void *arg, enum dma_resv_usage usage) { struct dma_resv_iter cursor; struct dma_fence *f, *fence; @@ -139,13 +139,13 @@ static int test_for_each(void *arg, bool shared) goto err_unlock; } - if (shared) + if (usage >= DMA_RESV_USAGE_READ) dma_resv_add_shared_fence(&resv, f); else dma_resv_add_excl_fence(&resv, f); r = -ENOENT; - dma_resv_for_each_fence(&cursor, &resv, shared, fence) { + dma_resv_for_each_fence(&cursor, &resv, usage, fence) { if (!r) { pr_err("More than one fence found\n"); r = -EINVAL; @@ -156,7 +156,8 @@ static int test_for_each(void *arg, bool shared) r = -EINVAL; goto err_unlock; } - if (dma_resv_iter_is_exclusive(&cursor) != !shared) { + if (dma_resv_iter_is_exclusive(&cursor) != + (usage >= DMA_RESV_USAGE_READ)) { pr_err("Unexpected fence usage\n"); r = -EINVAL; goto err_unlock; @@ -178,15 +179,15 @@ err_free: static int test_excl_for_each(void *arg) { - return test_for_each(arg, false); + return test_for_each(arg, DMA_RESV_USAGE_WRITE); } static int test_shared_for_each(void *arg) { - return test_for_each(arg, true); + return test_for_each(arg, DMA_RESV_USAGE_READ); } -static int test_for_each_unlocked(void *arg, bool shared) +static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage) { struct dma_resv_iter cursor; struct dma_fence *f, *fence; @@ -211,14 +212,14 @@ static int test_for_each_unlocked(void *arg, bool shared) goto err_free; } - if (shared) + if (usage >= DMA_RESV_USAGE_READ) dma_resv_add_shared_fence(&resv, f); else dma_resv_add_excl_fence(&resv, f); dma_resv_unlock(&resv); r = -ENOENT; - dma_resv_iter_begin(&cursor, &resv, shared); + dma_resv_iter_begin(&cursor, &resv, usage); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (!r) { pr_err("More than one fence found\n"); @@ -234,7 +235,8 @@ static int test_for_each_unlocked(void *arg, bool shared) r = -EINVAL; goto err_iter_end; } - if (dma_resv_iter_is_exclusive(&cursor) != !shared) { + if (dma_resv_iter_is_exclusive(&cursor) != + (usage >= DMA_RESV_USAGE_READ)) { pr_err("Unexpected fence usage\n"); r = -EINVAL; goto err_iter_end; @@ -262,15 +264,15 @@ err_free: static int test_excl_for_each_unlocked(void *arg) { - return test_for_each_unlocked(arg, false); + return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE); } static int test_shared_for_each_unlocked(void *arg) { - return test_for_each_unlocked(arg, true); + return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ); } -static int test_get_fences(void *arg, bool shared) +static int test_get_fences(void *arg, enum dma_resv_usage usage) { struct dma_fence *f, **fences = NULL; struct dma_resv resv; @@ -294,13 +296,13 @@ static int test_get_fences(void *arg, bool shared) goto err_resv; } - if (shared) + if (usage >= DMA_RESV_USAGE_READ) dma_resv_add_shared_fence(&resv, f); else dma_resv_add_excl_fence(&resv, f); dma_resv_unlock(&resv); - r = dma_resv_get_fences(&resv, shared, &i, &fences); + r = dma_resv_get_fences(&resv, usage, &i, &fences); if (r) { pr_err("get_fences failed\n"); goto err_free; @@ -324,12 +326,12 @@ err_resv: static int test_excl_get_fences(void *arg) { - return test_get_fences(arg, false); + return test_get_fences(arg, DMA_RESV_USAGE_WRITE); } static int test_shared_get_fences(void *arg) { - return test_get_fences(arg, true); + return test_get_fences(arg, DMA_RESV_USAGE_READ); } int dma_resv(void) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e85e347eb670..413f32c3fd63 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1288,7 +1288,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, * * TODO: Remove together with dma_resv rework. */ - dma_resv_for_each_fence(&cursor, resv, false, fence) { + dma_resv_for_each_fence(&cursor, resv, + DMA_RESV_USAGE_WRITE, + fence) { break; } dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index fae5c1debfad..7a6908d71820 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -200,8 +200,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto unpin; } - /* TODO: Unify this with other drivers */ - r = dma_resv_get_fences(new_abo->tbo.base.resv, true, + r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE, &work->shared_count, &work->shared); if (unlikely(r != 0)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 57b74d35052f..84a53758e18e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -526,7 +526,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_amdgpu_bo(gobj); - ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, + true, timeout); /* ret == 0 means not signaled, * ret > 0 means signaled diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 81207737c716..65998cbcd7f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -111,7 +111,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, struct dma_fence *fence; int r; - r = dma_resv_get_singleton(resv, true, &fence); + r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_READ, &fence); if (r) goto fallback; @@ -139,7 +139,8 @@ fallback: /* Not enough memory for the delayed delete, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); + dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ, + false, MAX_SCHEDULE_TIMEOUT); amdgpu_pasid_free(pasid); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 4b153daf283d..86f5248676b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, mmu_interval_set_seq(mni, cur_seq); - r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ, + false, MAX_SCHEDULE_TIMEOUT); mutex_unlock(&adev->notifier_lock); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6f57a2fd5fe3..a7f39f8ab7be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -768,8 +768,8 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) return 0; } - r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE, + false, MAX_SCHEDULE_TIMEOUT); if (r < 0) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 40e06745fae9..744e144e5fc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -259,7 +259,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, if (resv == NULL) return -EINVAL; - dma_resv_for_each_fence(&cursor, resv, true, f) { + /* TODO: Use DMA_RESV_USAGE_READ here */ + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) { dma_fence_chain_for_each(f, f) { struct dma_fence *tmp = dma_fence_chain_contained(f); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f7f149588432..5db5066e74b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1344,7 +1344,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully */ - dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) { + dma_resv_for_each_fence(&resv_cursor, bo->base.resv, + DMA_RESV_USAGE_READ, f) { if (amdkfd_fence_check_mm(f, current->mm)) return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 39c74d9fa7cc..3654326219e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1163,7 +1163,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + r = dma_resv_wait_timeout(bo->tbo.base.resv, + DMA_RESV_USAGE_WRITE, false, msecs_to_jiffies(10)); if (r == 0) r = -ETIMEDOUT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b13451255e8b..a0376fd36a82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2059,7 +2059,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_for_each_fence(&cursor, resv, true, fence) { + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, fence) { /* Add a callback for each fence in the reservation object */ amdgpu_vm_prt_get(adev); amdgpu_vm_add_prt_cb(adev, fence); @@ -2665,7 +2665,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return true; /* Don't evict VM page tables while they are busy */ - if (!dma_resv_test_signaled(bo->tbo.base.resv, true)) + if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_READ)) return false; /* Try to block ongoing updates */ @@ -2845,7 +2845,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true, + timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, + DMA_RESV_USAGE_READ, true, timeout); if (timeout <= 0) return timeout; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b30656959fd8..9e24b1e616af 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9236,7 +9236,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * deadlock during GPU reset when this fence will not signal * but we hold reservation lock for the BO. */ - r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false, + r = dma_resv_wait_timeout(abo->tbo.base.resv, + DMA_RESV_USAGE_WRITE, false, msecs_to_jiffies(5000)); if (unlikely(r <= 0)) DRM_ERROR("Waiting for fences timed out!"); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 133dfae06fab..eb0c2d041f13 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -771,7 +771,8 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, return -EINVAL; } - ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout); + ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), + true, timeout); if (ret == 0) ret = -ETIME; else if (ret > 0) diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index 9338ddb7edff..a6d89aed0bda 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -151,7 +151,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st return 0; obj = drm_gem_fb_get_obj(state->fb, 0); - ret = dma_resv_get_singleton(obj->resv, false, &fence); + ret = dma_resv_get_singleton(obj->resv, DMA_RESV_USAGE_WRITE, &fence); if (ret) return ret; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index d5314aa28ff7..507172e2780b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -380,12 +380,14 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, } if (op & ETNA_PREP_NOSYNC) { - if (!dma_resv_test_signaled(obj->resv, write)) + if (!dma_resv_test_signaled(obj->resv, + dma_resv_usage_rw(write))) return -EBUSY; } else { unsigned long remain = etnaviv_timeout_to_jiffies(timeout); - ret = dma_resv_wait_timeout(obj->resv, write, true, remain); + ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write), + true, remain); if (ret <= 0) return ret == 0 ? -ETIMEDOUT : ret; } diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 5712688232fb..03e86e836a17 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -997,7 +997,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane, if (ret < 0) goto unpin_fb; - dma_resv_iter_begin(&cursor, obj->base.resv, false); + dma_resv_iter_begin(&cursor, obj->base.resv, + DMA_RESV_USAGE_WRITE); dma_resv_for_each_fence_unlocked(&cursor, fence) { add_rps_boost_after_vblank(new_plane_state->hw.crtc, fence); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 470fdfd61a0f..14a1c0ad8c3c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -138,12 +138,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * Alternatively, we can trade that extra information on read/write * activity with * args->busy = - * !dma_resv_test_signaled(obj->resv, true); + * !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ); * to report the overall busyness. This is what the wait-ioctl does. * */ args->busy = 0; - dma_resv_iter_begin(&cursor, obj->base.resv, true); + dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_READ); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (dma_resv_iter_is_restarted(&cursor)) args->busy = 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index 444f8268b9c5..a200d3e66573 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -66,7 +66,7 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) struct intel_memory_region *mr = READ_ONCE(obj->mm.region); #ifdef CONFIG_LOCKDEP - GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) && + GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_READ) && i915_gem_object_evictable(obj)); #endif return mr && (mr->type == INTEL_MEMORY_LOCAL || diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 6d1a71d6404c..644fe237601c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -86,7 +86,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, return true; /* we will unbind on next submission, still have userptr pins */ - r = dma_resv_wait_timeout(obj->base.resv, true, false, + r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_READ, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index dab3d30c09a0..319936f91ac5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -40,7 +40,8 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, struct dma_fence *fence; long ret = timeout ?: 1; - dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL); + dma_resv_iter_begin(&cursor, resv, + dma_resv_usage_rw(flags & I915_WAIT_ALL)); dma_resv_for_each_fence_unlocked(&cursor, fence) { ret = i915_gem_object_wait_fence(fence, flags, timeout); if (ret <= 0) @@ -117,7 +118,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL); + dma_resv_iter_begin(&cursor, obj->base.resv, + dma_resv_usage_rw(flags & I915_WAIT_ALL)); dma_resv_for_each_fence_unlocked(&cursor, fence) i915_gem_fence_wait_priority(fence, attr); dma_resv_iter_end(&cursor); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index b071a58dd6da..b4275b55e5b8 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -219,7 +219,8 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, goto out_detach; } - timeout = dma_resv_wait_timeout(dmabuf->resv, false, true, 5 * HZ); + timeout = dma_resv_wait_timeout(dmabuf->resv, DMA_RESV_USAGE_WRITE, + true, 5 * HZ); if (!timeout) { pr_err("dmabuf wait for exclusive fence timed out.\n"); timeout = -ETIME; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 582770360ad1..73d5195146b0 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1598,7 +1598,8 @@ i915_request_await_object(struct i915_request *to, struct dma_fence *fence; int ret = 0; - dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) { + dma_resv_for_each_fence(&cursor, obj->base.resv, + dma_resv_usage_rw(write), fence) { ret = i915_request_await_dma_fence(to, fence); if (ret) break; diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 2a74a9a1cafe..ae984c66c48a 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -585,7 +585,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, debug_fence_assert(fence); might_sleep_if(gfpflags_allow_blocking(gfp)); - dma_resv_iter_begin(&cursor, resv, write); + dma_resv_iter_begin(&cursor, resv, dma_resv_usage_rw(write)); dma_resv_for_each_fence_unlocked(&cursor, f) { pending = i915_sw_fence_await_dma_fence(fence, f, timeout, gfp); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 02b9ae65a96a..01bbb5f2d462 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -848,7 +848,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); long ret; - ret = dma_resv_wait_timeout(obj->resv, write, true, remain); + ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write), + true, remain); if (ret == 0) return remain == 0 ? -EBUSY : -ETIMEDOUT; else if (ret < 0) diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index e2faf92e4831..8642b84ea20c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -558,7 +558,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) asyw->image.handle[0] = ctxdma->object.handle; } - ret = dma_resv_get_singleton(nvbo->bo.base.resv, false, + ret = dma_resv_get_singleton(nvbo->bo.base.resv, + DMA_RESV_USAGE_WRITE, &asyw->state.fence); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 74f8652d2bd3..c6bb4dbcd735 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -962,11 +962,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, struct dma_fence *fence; int ret; - /* TODO: This is actually a memory management dependency */ - ret = dma_resv_get_singleton(bo->base.resv, false, &fence); + ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE, + &fence); if (ret) - dma_resv_wait_timeout(bo->base.resv, false, false, - MAX_SCHEDULE_TIMEOUT); + dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE, + false, MAX_SCHEDULE_TIMEOUT); nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 0268259e97eb..d5e81ccee01c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -350,14 +350,16 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, if (ret) return ret; - /* Waiting for the exclusive fence first causes performance regressions - * under some circumstances. So manually wait for the shared ones first. + /* Waiting for the writes first causes performance regressions + * under some circumstances. So manually wait for the reads first. */ for (i = 0; i < 2; ++i) { struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_for_each_fence(&cursor, resv, exclusive, fence) { + dma_resv_for_each_fence(&cursor, resv, + dma_resv_usage_rw(exclusive), + fence) { struct nouveau_fence *f; if (i == 0 && dma_resv_iter_is_exclusive(&cursor)) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 9416bee92141..fab542a758ff 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -962,7 +962,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, return -ENOENT; nvbo = nouveau_gem_object(gem); - lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true, + lret = dma_resv_wait_timeout(nvbo->bo.base.resv, + dma_resv_usage_rw(write), true, no_wait ? 0 : 30 * HZ); if (!lret) ret = -EBUSY; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 94b6f0a19c83..7fcbc2a5b6cd 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -316,7 +316,8 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, if (!gem_obj) return -ENOENT; - ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout); + ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ, + true, timeout); if (!ret) ret = timeout ? -ETIMEDOUT : -EBUSY; diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 6a36b0fd845c..33e5889d6608 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -61,7 +61,8 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data) struct dma_fence *fence; int rel = 0; - dma_resv_iter_begin(&cursor, bo->tbo.base.resv, true); + dma_resv_iter_begin(&cursor, bo->tbo.base.resv, + DMA_RESV_USAGE_READ); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (dma_resv_iter_is_restarted(&cursor)) rel = 0; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index f60e826cd292..57ff2b723c87 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -533,7 +533,8 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto cleanup; } - r = dma_resv_get_singleton(new_rbo->tbo.base.resv, false, &work->fence); + r = dma_resv_get_singleton(new_rbo->tbo.base.resv, DMA_RESV_USAGE_WRITE, + &work->fence); if (r) { radeon_bo_unreserve(new_rbo); DRM_ERROR("failed to get new rbo buffer fences\n"); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index f563284a7fac..6616a828f40b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -162,7 +162,9 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj, } if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */ - r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); + r = dma_resv_wait_timeout(robj->tbo.base.resv, + DMA_RESV_USAGE_READ, + true, 30 * HZ); if (!r) r = -EBUSY; @@ -524,7 +526,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, } robj = gem_to_radeon_bo(gobj); - r = dma_resv_test_signaled(robj->tbo.base.resv, true); + r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ); if (r == 0) r = -EBUSY; else @@ -553,7 +555,8 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } robj = gem_to_radeon_bo(gobj); - ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, + true, 30 * HZ); if (ret == 0) r = -EBUSY; else if (ret < 0) diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 9fa88549c89e..68ebeb1bdfff 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn, return true; } - r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ, + false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index b991ba1bcd51..49bbb2266c0f 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c @@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev, struct dma_fence *f; int r = 0; - dma_resv_for_each_fence(&cursor, resv, shared, f) { + dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(shared), f) { fence = to_radeon_fence(f); if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index bc0f44299bb9..a50750740ab0 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -478,8 +478,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } - r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE, + false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) { DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r); return r ? r : -ETIME; diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index c5660b066554..76fd2904c7c6 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -705,7 +705,8 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, dma_resv_assert_held(obj->resv); - dma_resv_for_each_fence(&cursor, obj->resv, write, fence) { + dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write), + fence) { /* Make sure to grab an additional ref on the added fence */ dma_fence_get(fence); ret = drm_sched_job_add_dependency(job, fence); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c49996cf25d0..cff05b62f3f7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -223,7 +223,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_iter_begin(&cursor, resv, true); + dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (!fence->ops->signaled) dma_fence_enable_sw_signaling(fence); @@ -252,7 +252,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, struct dma_resv *resv = &bo->base._resv; int ret; - if (dma_resv_test_signaled(resv, true)) + if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_READ)) ret = 0; else ret = -EBUSY; @@ -264,7 +264,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, dma_resv_unlock(bo->base.resv); spin_unlock(&bo->bdev->lru_lock); - lret = dma_resv_wait_timeout(resv, true, interruptible, + lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ, + interruptible, 30 * HZ); if (lret < 0) @@ -367,7 +368,8 @@ static void ttm_bo_release(struct kref *kref) /* Last resort, if we fail to allocate memory for the * fences block for the BO to become idle */ - dma_resv_wait_timeout(bo->base.resv, true, false, + dma_resv_wait_timeout(bo->base.resv, + DMA_RESV_USAGE_READ, false, 30 * HZ); } @@ -378,7 +380,7 @@ static void ttm_bo_release(struct kref *kref) ttm_mem_io_free(bdev, bo->resource); } - if (!dma_resv_test_signaled(bo->base.resv, true) || + if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ) || !dma_resv_trylock(bo->base.resv)) { /* The BO is not idle, resurrect it for delayed destroy */ ttm_bo_flush_all_fences(bo); @@ -1044,14 +1046,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, long timeout = 15 * HZ; if (no_wait) { - if (dma_resv_test_signaled(bo->base.resv, true)) + if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) return 0; else return -EBUSY; } - timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible, - timeout); + timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, + interruptible, timeout); if (timeout < 0) return timeout; diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 2ddbebca87d9..91fc4940c65a 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -130,6 +130,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, struct vgem_file *vfile = file->driver_priv; struct dma_resv *resv; struct drm_gem_object *obj; + enum dma_resv_usage usage; struct dma_fence *fence; int ret; @@ -151,7 +152,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, /* Check for a conflicting fence */ resv = obj->resv; - if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) { + usage = dma_resv_usage_rw(arg->flags & VGEM_FENCE_WRITE); + if (!dma_resv_test_signaled(resv, usage)) { ret = -EBUSY; goto err_fence; } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 77743fd2c61a..f8d83358d2a0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -518,9 +518,10 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, return -ENOENT; if (args->flags & VIRTGPU_WAIT_NOWAIT) { - ret = dma_resv_test_signaled(obj->resv, true); + ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ); } else { - ret = dma_resv_wait_timeout(obj->resv, true, true, timeout); + ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, + true, timeout); } if (ret == 0) ret = -EBUSY; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index fe13aa8b4a64..b96884f7d03d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -528,8 +528,8 @@ static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo, if (flags & drm_vmw_synccpu_allow_cs) { long lret; - lret = dma_resv_wait_timeout(bo->base.resv, true, true, - nonblock ? 0 : + lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, + true, nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); if (!lret) return -EBUSY; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 626067104751..a84d1d5628d0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1164,7 +1164,8 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, if (bo->moving) dma_fence_put(bo->moving); - return dma_resv_get_singleton(bo->base.resv, false, + return dma_resv_get_singleton(bo->base.resv, + DMA_RESV_USAGE_WRITE, &bo->moving); } diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c index d32cd7538835..f9901d273b8e 100644 --- a/drivers/infiniband/core/umem_dmabuf.c +++ b/drivers/infiniband/core/umem_dmabuf.c @@ -67,7 +67,8 @@ wait_fence: * may be not up-to-date. Wait for the exporter to finish * the migration. */ - return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, false, + return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, + DMA_RESV_USAGE_WRITE, false, MAX_SCHEDULE_TIMEOUT); } EXPORT_SYMBOL(ib_umem_dmabuf_map_pages); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 6fb91956ab8d..a297397743a2 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -408,6 +408,9 @@ struct dma_buf { * pipelining across drivers. These do not set any fences for their * access. An example here is v4l. * + * - Driver should use dma_resv_usage_rw() when retrieving fences as + * dependency for implicit synchronization. + * * DYNAMIC IMPORTER RULES: * * Dynamic importers, see dma_buf_attachment_is_dynamic(), have @@ -423,8 +426,9 @@ struct dma_buf { * * IMPORTANT: * - * All drivers must obey the struct dma_resv rules, specifically the - * rules for updating and obeying fences. + * All drivers and memory management related functions must obey the + * struct dma_resv rules, specifically the rules for updating and + * obeying fences. See enum dma_resv_usage for further descriptions. */ struct dma_resv *resv; diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 5fa04d0fccad..92cd8023980f 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -49,6 +49,53 @@ extern struct ww_class reservation_ww_class; struct dma_resv_list; +/** + * enum dma_resv_usage - how the fences from a dma_resv obj are used + * + * This enum describes the different use cases for a dma_resv object and + * controls which fences are returned when queried. + * + * An important fact is that there is the order WRITEobj = obj; - cursor->all_fences = all_fences; + cursor->usage = usage; cursor->fence = NULL; } @@ -241,7 +288,7 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) * dma_resv_for_each_fence - fence iterator * @cursor: a struct dma_resv_iter pointer * @obj: a dma_resv object pointer - * @all_fences: true if all fences should be returned + * @usage: controls which fences to return * @fence: the current fence * * Iterate over the fences in a struct dma_resv object while holding the @@ -250,8 +297,8 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) * valid as long as the lock is held and so no extra reference to the fence is * taken. */ -#define dma_resv_for_each_fence(cursor, obj, all_fences, fence) \ - for (dma_resv_iter_begin(cursor, obj, all_fences), \ +#define dma_resv_for_each_fence(cursor, obj, usage, fence) \ + for (dma_resv_iter_begin(cursor, obj, usage), \ fence = dma_resv_iter_first(cursor); fence; \ fence = dma_resv_iter_next(cursor)) @@ -418,14 +465,14 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); -int dma_resv_get_fences(struct dma_resv *obj, bool write, +int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, unsigned int *num_fences, struct dma_fence ***fences); -int dma_resv_get_singleton(struct dma_resv *obj, bool write, +int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, struct dma_fence **fence); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); -long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, - unsigned long timeout); -bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); +long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, + bool intr, unsigned long timeout); +bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage); void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq); #endif /* _LINUX_RESERVATION_H */ From 73511edf8b196e6f1ccda0fdf294ff57aa2dc9db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 9 Nov 2021 11:08:18 +0100 Subject: [PATCH 006/219] dma-buf: specify usage while adding fences to dma_resv obj v7 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of distingting between shared and exclusive fences specify the fence usage while adding fences. Rework all drivers to use this interface instead and deprecate the old one. v2: some kerneldoc comments suggested by Daniel v3: fix a missing case in radeon v4: rebase on nouveau changes, fix lockdep and temporary disable warning v5: more documentation updates v6: separate internal dma_resv changes from this patch, avoids to disable warning temporary, rebase on upstream changes v7: fix missed case in lima driver, minimize changes to i915_gem_busy_ioctl Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-3-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 48 +++++++-- drivers/dma-buf/st-dma-resv.c | 101 +++++------------- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 6 +- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 10 +- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 6 +- drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 3 +- drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c | 5 +- .../drm/i915/gem/selftests/i915_gem_migrate.c | 4 +- .../drm/i915/gem/selftests/i915_gem_mman.c | 3 +- drivers/gpu/drm/i915/i915_vma.c | 8 +- .../drm/i915/selftests/intel_memory_region.c | 3 +- drivers/gpu/drm/lima/lima_gem.c | 7 +- drivers/gpu/drm/msm/msm_gem_submit.c | 6 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 9 +- drivers/gpu/drm/nouveau/nouveau_fence.c | 4 +- drivers/gpu/drm/panfrost/panfrost_job.c | 2 +- drivers/gpu/drm/qxl/qxl_release.c | 3 +- drivers/gpu/drm/radeon/radeon_object.c | 6 +- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- drivers/gpu/drm/ttm/ttm_bo_util.c | 5 +- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 6 +- drivers/gpu/drm/v3d/v3d_gem.c | 4 +- drivers/gpu/drm/vc4/vc4_gem.c | 2 +- drivers/gpu/drm/vgem/vgem_fence.c | 9 +- drivers/gpu/drm/virtio/virtgpu_gem.c | 3 +- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 3 +- include/linux/dma-buf.h | 16 +-- include/linux/dma-resv.h | 25 +++-- 30 files changed, 149 insertions(+), 166 deletions(-) diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 17237e6ee30c..543dae6566d2 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -234,14 +234,14 @@ EXPORT_SYMBOL(dma_resv_reserve_fences); #ifdef CONFIG_DEBUG_MUTEXES /** - * dma_resv_reset_shared_max - reset shared fences for debugging + * dma_resv_reset_max_fences - reset shared fences for debugging * @obj: the dma_resv object to reset * * Reset the number of pre-reserved shared slots to test that drivers do * correct slot allocation using dma_resv_reserve_fences(). See also * &dma_resv_list.shared_max. */ -void dma_resv_reset_shared_max(struct dma_resv *obj) +void dma_resv_reset_max_fences(struct dma_resv *obj) { struct dma_resv_list *fences = dma_resv_shared_list(obj); @@ -251,7 +251,7 @@ void dma_resv_reset_shared_max(struct dma_resv *obj) if (fences) fences->shared_max = fences->shared_count; } -EXPORT_SYMBOL(dma_resv_reset_shared_max); +EXPORT_SYMBOL(dma_resv_reset_max_fences); #endif /** @@ -264,7 +264,8 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max); * * See also &dma_resv.fence for a discussion of the semantics. */ -void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) +static void dma_resv_add_shared_fence(struct dma_resv *obj, + struct dma_fence *fence) { struct dma_resv_list *fobj; struct dma_fence *old; @@ -305,13 +306,13 @@ replace: write_seqcount_end(&obj->seq); dma_fence_put(old); } -EXPORT_SYMBOL(dma_resv_add_shared_fence); /** * dma_resv_replace_fences - replace fences in the dma_resv obj * @obj: the reservation object * @context: the context of the fences to replace * @replacement: the new fence to use instead + * @usage: how the new fence is used, see enum dma_resv_usage * * Replace fences with a specified context with a new fence. Only valid if the * operation represented by the original fence has no longer access to the @@ -321,12 +322,16 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence); * update fence which makes the resource inaccessible. */ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, - struct dma_fence *replacement) + struct dma_fence *replacement, + enum dma_resv_usage usage) { struct dma_resv_list *list; struct dma_fence *old; unsigned int i; + /* Only readers supported for now */ + WARN_ON(usage != DMA_RESV_USAGE_READ); + dma_resv_assert_held(obj); write_seqcount_begin(&obj->seq); @@ -360,7 +365,8 @@ EXPORT_SYMBOL(dma_resv_replace_fences); * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock(). * See also &dma_resv.fence_excl for a discussion of the semantics. */ -void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) +static void dma_resv_add_excl_fence(struct dma_resv *obj, + struct dma_fence *fence) { struct dma_fence *old_fence = dma_resv_excl_fence(obj); @@ -375,7 +381,27 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) dma_fence_put(old_fence); } -EXPORT_SYMBOL(dma_resv_add_excl_fence); + +/** + * dma_resv_add_fence - Add a fence to the dma_resv obj + * @obj: the reservation object + * @fence: the fence to add + * @usage: how the fence is used, see enum dma_resv_usage + * + * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and + * dma_resv_reserve_fences() has been called. + * + * See also &dma_resv.fence for a discussion of the semantics. + */ +void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, + enum dma_resv_usage usage) +{ + if (usage == DMA_RESV_USAGE_WRITE) + dma_resv_add_excl_fence(obj, fence); + else + dma_resv_add_shared_fence(obj, fence); +} +EXPORT_SYMBOL(dma_resv_add_fence); /* Restart the iterator by initializing all the necessary fields, but not the * relation to the dma_resv object. */ @@ -574,7 +600,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) } dma_fence_get(f); - if (dma_resv_iter_is_exclusive(&cursor)) + if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_WRITE) excl = f; else RCU_INIT_POINTER(list->shared[list->shared_count++], f); @@ -771,13 +797,13 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled); */ void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) { + static const char *usage[] = { "write", "read" }; struct dma_resv_iter cursor; struct dma_fence *fence; dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) { seq_printf(seq, "\t%s fence:", - dma_resv_iter_is_exclusive(&cursor) ? - "Exclusive" : "Shared"); + usage[dma_resv_iter_usage(&cursor)]); dma_fence_describe(fence, seq); } } diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index d097981061b1..d0f7c2bfd4f0 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -58,8 +58,9 @@ static int sanitycheck(void *arg) return r; } -static int test_signaling(void *arg, enum dma_resv_usage usage) +static int test_signaling(void *arg) { + enum dma_resv_usage usage = (unsigned long)arg; struct dma_resv resv; struct dma_fence *f; int r; @@ -81,11 +82,7 @@ static int test_signaling(void *arg, enum dma_resv_usage usage) goto err_unlock; } - if (usage >= DMA_RESV_USAGE_READ) - dma_resv_add_shared_fence(&resv, f); - else - dma_resv_add_excl_fence(&resv, f); - + dma_resv_add_fence(&resv, f, usage); if (dma_resv_test_signaled(&resv, usage)) { pr_err("Resv unexpectedly signaled\n"); r = -EINVAL; @@ -105,18 +102,9 @@ err_free: return r; } -static int test_excl_signaling(void *arg) -{ - return test_signaling(arg, DMA_RESV_USAGE_WRITE); -} - -static int test_shared_signaling(void *arg) -{ - return test_signaling(arg, DMA_RESV_USAGE_READ); -} - -static int test_for_each(void *arg, enum dma_resv_usage usage) +static int test_for_each(void *arg) { + enum dma_resv_usage usage = (unsigned long)arg; struct dma_resv_iter cursor; struct dma_fence *f, *fence; struct dma_resv resv; @@ -139,10 +127,7 @@ static int test_for_each(void *arg, enum dma_resv_usage usage) goto err_unlock; } - if (usage >= DMA_RESV_USAGE_READ) - dma_resv_add_shared_fence(&resv, f); - else - dma_resv_add_excl_fence(&resv, f); + dma_resv_add_fence(&resv, f, usage); r = -ENOENT; dma_resv_for_each_fence(&cursor, &resv, usage, fence) { @@ -156,8 +141,7 @@ static int test_for_each(void *arg, enum dma_resv_usage usage) r = -EINVAL; goto err_unlock; } - if (dma_resv_iter_is_exclusive(&cursor) != - (usage >= DMA_RESV_USAGE_READ)) { + if (dma_resv_iter_usage(&cursor) != usage) { pr_err("Unexpected fence usage\n"); r = -EINVAL; goto err_unlock; @@ -177,18 +161,9 @@ err_free: return r; } -static int test_excl_for_each(void *arg) -{ - return test_for_each(arg, DMA_RESV_USAGE_WRITE); -} - -static int test_shared_for_each(void *arg) -{ - return test_for_each(arg, DMA_RESV_USAGE_READ); -} - -static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage) +static int test_for_each_unlocked(void *arg) { + enum dma_resv_usage usage = (unsigned long)arg; struct dma_resv_iter cursor; struct dma_fence *f, *fence; struct dma_resv resv; @@ -212,10 +187,7 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage) goto err_free; } - if (usage >= DMA_RESV_USAGE_READ) - dma_resv_add_shared_fence(&resv, f); - else - dma_resv_add_excl_fence(&resv, f); + dma_resv_add_fence(&resv, f, usage); dma_resv_unlock(&resv); r = -ENOENT; @@ -235,8 +207,7 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage) r = -EINVAL; goto err_iter_end; } - if (dma_resv_iter_is_exclusive(&cursor) != - (usage >= DMA_RESV_USAGE_READ)) { + if (dma_resv_iter_usage(&cursor) != usage) { pr_err("Unexpected fence usage\n"); r = -EINVAL; goto err_iter_end; @@ -262,18 +233,9 @@ err_free: return r; } -static int test_excl_for_each_unlocked(void *arg) -{ - return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE); -} - -static int test_shared_for_each_unlocked(void *arg) -{ - return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ); -} - -static int test_get_fences(void *arg, enum dma_resv_usage usage) +static int test_get_fences(void *arg) { + enum dma_resv_usage usage = (unsigned long)arg; struct dma_fence *f, **fences = NULL; struct dma_resv resv; int r, i; @@ -296,10 +258,7 @@ static int test_get_fences(void *arg, enum dma_resv_usage usage) goto err_resv; } - if (usage >= DMA_RESV_USAGE_READ) - dma_resv_add_shared_fence(&resv, f); - else - dma_resv_add_excl_fence(&resv, f); + dma_resv_add_fence(&resv, f, usage); dma_resv_unlock(&resv); r = dma_resv_get_fences(&resv, usage, &i, &fences); @@ -324,30 +283,24 @@ err_resv: return r; } -static int test_excl_get_fences(void *arg) -{ - return test_get_fences(arg, DMA_RESV_USAGE_WRITE); -} - -static int test_shared_get_fences(void *arg) -{ - return test_get_fences(arg, DMA_RESV_USAGE_READ); -} - int dma_resv(void) { static const struct subtest tests[] = { SUBTEST(sanitycheck), - SUBTEST(test_excl_signaling), - SUBTEST(test_shared_signaling), - SUBTEST(test_excl_for_each), - SUBTEST(test_shared_for_each), - SUBTEST(test_excl_for_each_unlocked), - SUBTEST(test_shared_for_each_unlocked), - SUBTEST(test_excl_get_fences), - SUBTEST(test_shared_get_fences), + SUBTEST(test_signaling), + SUBTEST(test_for_each), + SUBTEST(test_for_each_unlocked), + SUBTEST(test_get_fences), }; + enum dma_resv_usage usage; + int r; spin_lock_init(&fence_lock); - return subtests(tests, NULL); + for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ; + ++usage) { + r = subtests(tests, (void *)(unsigned long)usage); + if (r) + return r; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 98b1736bb221..5031e26e6716 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -263,7 +263,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, */ replacement = dma_fence_get_stub(); dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context, - replacement); + replacement, DMA_RESV_USAGE_READ); dma_fence_put(replacement); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 413f32c3fd63..76fd916424d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -55,8 +55,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); p->uf_entry.priority = 0; p->uf_entry.tv.bo = &bo->tbo; - /* One for TTM and one for the CS job */ - p->uf_entry.tv.num_shared = 2; + /* One for TTM and two for the CS job */ + p->uf_entry.tv.num_shared = 3; drm_gem_object_put(gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index a7f39f8ab7be..a3cdf8a24377 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1397,10 +1397,8 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, return; } - if (shared) - dma_resv_add_shared_fence(resv, fence); - else - dma_resv_add_excl_fence(resv, fence); + dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ : + DMA_RESV_USAGE_WRITE); } /** diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 53f7c78628a4..98bb5c9239de 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -202,14 +202,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = &submit->bos[i].obj->base; + bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; - if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) - dma_resv_add_excl_fence(obj->resv, - submit->out_fence); - else - dma_resv_add_shared_fence(obj->resv, - submit->out_fence); - + dma_resv_add_fence(obj->resv, submit->out_fence, write ? + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); submit_unlock_object(submit, i); } } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 14a1c0ad8c3c..ddda468241ef 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -148,11 +148,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, if (dma_resv_iter_is_restarted(&cursor)) args->busy = 0; - if (dma_resv_iter_is_exclusive(&cursor)) - /* Translate the exclusive fence to the READ *and* WRITE engine */ + if (dma_resv_iter_usage(&cursor) <= DMA_RESV_USAGE_WRITE) + /* Translate the write fences to the READ *and* WRITE engine */ args->busy |= busy_check_writer(fence); else - /* Translate shared fences to READ set of engines */ + /* Translate read fences to READ set of engines */ args->busy |= busy_check_reader(fence); } dma_resv_iter_end(&cursor); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index 1fd0cc9ca213..f5f2b8b115ea 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -116,7 +116,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, obj->base.resv, NULL, true, i915_fence_timeout(i915), I915_FENCE_GFP); - dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma); + dma_resv_add_fence(obj->base.resv, &clflush->base.dma, + DMA_RESV_USAGE_WRITE); dma_fence_work_commit(&clflush->base); /* * We must have successfully populated the pages(since we are diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 432ac74ff225..438b8a95b3d1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -637,9 +637,8 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, if (IS_ERR_OR_NULL(copy_fence)) return PTR_ERR_OR_ZERO(copy_fence); - dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence); - dma_resv_add_shared_fence(src_bo->base.resv, copy_fence); - + dma_resv_add_fence(dst_bo->base.resv, copy_fence, DMA_RESV_USAGE_WRITE); + dma_resv_add_fence(src_bo->base.resv, copy_fence, DMA_RESV_USAGE_READ); dma_fence_put(copy_fence); return 0; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c index 0e52eb87cd55..4997ed18b6e4 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c @@ -218,8 +218,8 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt, if (rq) { err = dma_resv_reserve_fences(obj->base.resv, 1); if (!err) - dma_resv_add_excl_fence(obj->base.resv, - &rq->fence); + dma_resv_add_fence(obj->base.resv, &rq->fence, + DMA_RESV_USAGE_WRITE); i915_gem_object_set_moving_fence(obj, &rq->fence); i915_request_put(rq); } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index a132e241c3ee..3a6e3f6d239f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -1220,7 +1220,8 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements, expand32(POISON_INUSE), &rq); i915_gem_object_unpin_pages(obj); if (rq) { - dma_resv_add_excl_fence(obj->base.resv, &rq->fence); + dma_resv_add_fence(obj->base.resv, &rq->fence, + DMA_RESV_USAGE_WRITE); i915_gem_object_set_moving_fence(obj, &rq->fence); i915_request_put(rq); } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index bae3423f58e8..524477d8939e 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1826,7 +1826,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma, } if (fence) { - dma_resv_add_excl_fence(vma->obj->base.resv, fence); + dma_resv_add_fence(vma->obj->base.resv, fence, + DMA_RESV_USAGE_WRITE); obj->write_domain = I915_GEM_DOMAIN_RENDER; obj->read_domains = 0; } @@ -1838,7 +1839,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma, } if (fence) { - dma_resv_add_shared_fence(vma->obj->base.resv, fence); + dma_resv_add_fence(vma->obj->base.resv, fence, + DMA_RESV_USAGE_READ); obj->write_domain = 0; } } @@ -2078,7 +2080,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm) goto out_rpm; } - dma_resv_add_shared_fence(obj->base.resv, fence); + dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ); dma_fence_put(fence); out_rpm: diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 6114e013092b..73eb53edb8de 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -1056,7 +1056,8 @@ static int igt_lmem_write_cpu(void *arg) obj->mm.pages->sgl, I915_CACHE_NONE, true, 0xdeadbeaf, &rq); if (rq) { - dma_resv_add_excl_fence(obj->base.resv, &rq->fence); + dma_resv_add_fence(obj->base.resv, &rq->fence, + DMA_RESV_USAGE_WRITE); i915_request_put(rq); } diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index e0a11ee0e86d..0f1ca0b0db49 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -364,10 +364,9 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) fence = lima_sched_context_queue_task(submit->task); for (i = 0; i < submit->nr_bos; i++) { - if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) - dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence); - else - dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence); + dma_resv_add_fence(lima_bo_resv(bos[i]), fence, + submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE ? + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); } drm_gem_unlock_reservations((struct drm_gem_object **)bos, diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3164db8be893..8d1eef914ba8 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -395,9 +395,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit) struct drm_gem_object *obj = &submit->bos[i].obj->base; if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) - dma_resv_add_excl_fence(obj->resv, submit->user_fence); + dma_resv_add_fence(obj->resv, submit->user_fence, + DMA_RESV_USAGE_WRITE); else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) - dma_resv_add_shared_fence(obj->resv, submit->user_fence); + dma_resv_add_fence(obj->resv, submit->user_fence, + DMA_RESV_USAGE_READ); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c6bb4dbcd735..05076e530e7d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1308,10 +1308,11 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool excl { struct dma_resv *resv = nvbo->bo.base.resv; - if (exclusive) - dma_resv_add_excl_fence(resv, &fence->base); - else if (fence) - dma_resv_add_shared_fence(resv, &fence->base); + if (!fence) + return; + + dma_resv_add_fence(resv, &fence->base, exclusive ? + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); } static void diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index d5e81ccee01c..7f01dcf81fab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -360,9 +360,11 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(exclusive), fence) { + enum dma_resv_usage usage; struct nouveau_fence *f; - if (i == 0 && dma_resv_iter_is_exclusive(&cursor)) + usage = dma_resv_iter_usage(&cursor); + if (i == 0 && usage == DMA_RESV_USAGE_WRITE) continue; f = nouveau_local_fence(fence, chan->drm); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index c34114560e49..fda5871aebe3 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -268,7 +268,7 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos, int i; for (i = 0; i < bo_count; i++) - dma_resv_add_excl_fence(bos[i]->resv, fence); + dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE); } int panfrost_job_push(struct panfrost_job *job) diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index cde1e8ddaeaa..368d26da0d6a 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -429,7 +429,8 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) list_for_each_entry(entry, &release->bos, head) { bo = entry->bo; - dma_resv_add_shared_fence(bo->base.resv, &release->base); + dma_resv_add_fence(bo->base.resv, &release->base, + DMA_RESV_USAGE_READ); ttm_bo_move_to_lru_tail_unlocked(bo); dma_resv_unlock(bo->base.resv); } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 7ffd2e90f325..cb5c4aa45cef 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -791,8 +791,6 @@ void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, return; } - if (shared) - dma_resv_add_shared_fence(resv, &fence->base); - else - dma_resv_add_excl_fence(resv, &fence->base); + dma_resv_add_fence(resv, &fence->base, shared ? + DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE); } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cff05b62f3f7..d74f9eea855e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -739,7 +739,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, return ret; } - dma_resv_add_shared_fence(bo->base.resv, fence); + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE); ret = dma_resv_reserve_fences(bo->base.resv, 1); if (unlikely(ret)) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 1b96b91bf81b..7a96a1db13a7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -507,7 +507,8 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, if (ret) return ret; - dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); + dma_resv_add_fence(&ghost_obj->base._resv, fence, + DMA_RESV_USAGE_WRITE); /** * If we're not moving to fixed memory, the TTM object @@ -561,7 +562,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); int ret = 0; - dma_resv_add_excl_fence(bo->base.resv, fence); + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE); if (!evict) ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); else if (!from->use_tt && pipeline) diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 789c645f004e..0eb995d25df1 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -154,10 +154,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - if (entry->num_shared) - dma_resv_add_shared_fence(bo->base.resv, fence); - else - dma_resv_add_excl_fence(bo->base.resv, fence); + dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ? + DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE); ttm_bo_move_to_lru_tail_unlocked(bo); dma_resv_unlock(bo->base.resv); } diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 961812d33827..2352e9640922 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -550,8 +550,8 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, for (i = 0; i < job->bo_count; i++) { /* XXX: Use shared fences for read-only objects. */ - dma_resv_add_excl_fence(job->bo[i]->resv, - job->done_fence); + dma_resv_add_fence(job->bo[i]->resv, job->done_fence, + DMA_RESV_USAGE_WRITE); } drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 594bd6bb00d2..38550317e025 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -546,7 +546,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) bo = to_vc4_bo(&exec->bo[i]->base); bo->seqno = seqno; - dma_resv_add_shared_fence(bo->base.base.resv, exec->fence); + dma_resv_add_fence(bo->base.base.resv, exec->fence); } list_for_each_entry(bo, &exec->unref_list, unref_head) { diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 91fc4940c65a..c2a879734d40 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -161,12 +161,9 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, /* Expose the fence via the dma-buf */ dma_resv_lock(resv, NULL); ret = dma_resv_reserve_fences(resv, 1); - if (!ret) { - if (arg->flags & VGEM_FENCE_WRITE) - dma_resv_add_excl_fence(resv, fence); - else - dma_resv_add_shared_fence(resv, fence); - } + if (!ret) + dma_resv_add_fence(resv, fence, arg->flags & VGEM_FENCE_WRITE ? + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); dma_resv_unlock(resv); /* Record the fence in our idr for later signaling */ diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 1820ca6cf673..580a78809836 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -250,7 +250,8 @@ void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, int i; for (i = 0; i < objs->nents; i++) - dma_resv_add_excl_fence(objs->objs[i]->resv, fence); + dma_resv_add_fence(objs->objs[i]->resv, fence, + DMA_RESV_USAGE_WRITE); } void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index b96884f7d03d..bec50223efe5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -758,7 +758,8 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo, ret = dma_resv_reserve_fences(bo->base.resv, 1); if (!ret) - dma_resv_add_excl_fence(bo->base.resv, &fence->base); + dma_resv_add_fence(bo->base.resv, &fence->base, + DMA_RESV_USAGE_WRITE); else /* Last resort fallback when we are OOM */ dma_fence_wait(&fence->base, false); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index a297397743a2..71731796c8c3 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -393,15 +393,15 @@ struct dma_buf { * e.g. exposed in `Implicit Fence Poll Support`_ must follow the * below rules. * - * - Drivers must add a shared fence through dma_resv_add_shared_fence() - * for anything the userspace API considers a read access. This highly - * depends upon the API and window system. + * - Drivers must add a read fence through dma_resv_add_fence() with the + * DMA_RESV_USAGE_READ flag for anything the userspace API considers a + * read access. This highly depends upon the API and window system. * - * - Similarly drivers must set the exclusive fence through - * dma_resv_add_excl_fence() for anything the userspace API considers - * write access. + * - Similarly drivers must add a write fence through + * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for + * anything the userspace API considers write access. * - * - Drivers may just always set the exclusive fence, since that only + * - Drivers may just always add a write fence, since that only * causes unecessarily synchronization, but no correctness issues. * * - Some drivers only expose a synchronous userspace API with no @@ -416,7 +416,7 @@ struct dma_buf { * Dynamic importers, see dma_buf_attachment_is_dynamic(), have * additional constraints on how they set up fences: * - * - Dynamic importers must obey the exclusive fence and wait for it to + * - Dynamic importers must obey the write fences and wait for them to * signal before allowing access to the buffer's underlying storage * through the device. * diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 92cd8023980f..98dc5234b487 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -195,6 +195,9 @@ struct dma_resv_iter { /** @fence: the currently handled fence */ struct dma_fence *fence; + /** @fence_usage: the usage of the current fence */ + enum dma_resv_usage fence_usage; + /** @seq: sequence number to check for modifications */ unsigned int seq; @@ -244,14 +247,15 @@ static inline void dma_resv_iter_end(struct dma_resv_iter *cursor) } /** - * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one + * dma_resv_iter_usage - Return the usage of the current fence * @cursor: the cursor of the current position * - * Returns true if the currently returned fence is the exclusive one. + * Returns the usage of the currently processed fence. */ -static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor) +static inline enum dma_resv_usage +dma_resv_iter_usage(struct dma_resv_iter *cursor) { - return cursor->index == 0; + return cursor->fence_usage; } /** @@ -306,9 +310,9 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) #ifdef CONFIG_DEBUG_MUTEXES -void dma_resv_reset_shared_max(struct dma_resv *obj); +void dma_resv_reset_max_fences(struct dma_resv *obj); #else -static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {} +static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {} #endif /** @@ -454,17 +458,18 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) */ static inline void dma_resv_unlock(struct dma_resv *obj) { - dma_resv_reset_shared_max(obj); + dma_resv_reset_max_fences(obj); ww_mutex_unlock(&obj->lock); } void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences); -void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); +void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, + enum dma_resv_usage usage); void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, - struct dma_fence *fence); -void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); + struct dma_fence *fence, + enum dma_resv_usage usage); int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, unsigned int *num_fences, struct dma_fence ***fences); int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, From 047a1b877ed48098bed71fcfb1d4891e1b54441d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 23 Nov 2021 09:33:07 +0100 Subject: [PATCH 007/219] dma-buf & drm/amdgpu: remove dma_resv workaround MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rework the internals of the dma_resv object to allow adding more than one write fence and remember for each fence what purpose it had. This allows removing the workaround from amdgpu which used a container for this instead. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Cc: amd-gfx@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-4-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 353 ++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 53 +-- include/linux/dma-resv.h | 47 +-- 4 files changed, 157 insertions(+), 297 deletions(-) diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 543dae6566d2..378d47e1cfea 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -44,12 +44,12 @@ /** * DOC: Reservation Object Overview * - * The reservation object provides a mechanism to manage shared and - * exclusive fences associated with a buffer. A reservation object - * can have attached one exclusive fence (normally associated with - * write operations) or N shared fences (read operations). The RCU - * mechanism is used to protect read access to fences from locked - * write-side updates. + * The reservation object provides a mechanism to manage a container of + * dma_fence object associated with a resource. A reservation object + * can have any number of fences attaches to it. Each fence carries an usage + * parameter determining how the operation represented by the fence is using the + * resource. The RCU mechanism is used to protect read access to fences from + * locked write-side updates. * * See struct dma_resv for more details. */ @@ -57,39 +57,59 @@ DEFINE_WD_CLASS(reservation_ww_class); EXPORT_SYMBOL(reservation_ww_class); +/* Mask for the lower fence pointer bits */ +#define DMA_RESV_LIST_MASK 0x3 + struct dma_resv_list { struct rcu_head rcu; - u32 shared_count, shared_max; - struct dma_fence __rcu *shared[]; + u32 num_fences, max_fences; + struct dma_fence __rcu *table[]; }; -/** - * dma_resv_list_alloc - allocate fence list - * @shared_max: number of fences we need space for - * +/* Extract the fence and usage flags from an RCU protected entry in the list. */ +static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index, + struct dma_resv *resv, struct dma_fence **fence, + enum dma_resv_usage *usage) +{ + long tmp; + + tmp = (long)rcu_dereference_check(list->table[index], + resv ? dma_resv_held(resv) : true); + *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK); + if (usage) + *usage = tmp & DMA_RESV_LIST_MASK; +} + +/* Set the fence and usage flags at the specific index in the list. */ +static void dma_resv_list_set(struct dma_resv_list *list, + unsigned int index, + struct dma_fence *fence, + enum dma_resv_usage usage) +{ + long tmp = ((long)fence) | usage; + + RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp); +} + +/* * Allocate a new dma_resv_list and make sure to correctly initialize - * shared_max. + * max_fences. */ -static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max) +static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences) { struct dma_resv_list *list; - list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL); + list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL); if (!list) return NULL; - list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) / - sizeof(*list->shared); + list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) / + sizeof(*list->table); return list; } -/** - * dma_resv_list_free - free fence list - * @list: list to free - * - * Free a dma_resv_list and make sure to drop all references. - */ +/* Free a dma_resv_list and make sure to drop all references. */ static void dma_resv_list_free(struct dma_resv_list *list) { unsigned int i; @@ -97,9 +117,12 @@ static void dma_resv_list_free(struct dma_resv_list *list) if (!list) return; - for (i = 0; i < list->shared_count; ++i) - dma_fence_put(rcu_dereference_protected(list->shared[i], true)); + for (i = 0; i < list->num_fences; ++i) { + struct dma_fence *fence; + dma_resv_list_entry(list, i, NULL, &fence, NULL); + dma_fence_put(fence); + } kfree_rcu(list, rcu); } @@ -112,8 +135,7 @@ void dma_resv_init(struct dma_resv *obj) ww_mutex_init(&obj->lock, &reservation_ww_class); seqcount_ww_mutex_init(&obj->seq, &obj->lock); - RCU_INIT_POINTER(obj->fence, NULL); - RCU_INIT_POINTER(obj->fence_excl, NULL); + RCU_INIT_POINTER(obj->fences, NULL); } EXPORT_SYMBOL(dma_resv_init); @@ -123,46 +145,32 @@ EXPORT_SYMBOL(dma_resv_init); */ void dma_resv_fini(struct dma_resv *obj) { - struct dma_resv_list *fobj; - struct dma_fence *excl; - /* * This object should be dead and all references must have * been released to it, so no need to be protected with rcu. */ - excl = rcu_dereference_protected(obj->fence_excl, 1); - if (excl) - dma_fence_put(excl); - - fobj = rcu_dereference_protected(obj->fence, 1); - dma_resv_list_free(fobj); + dma_resv_list_free(rcu_dereference_protected(obj->fences, true)); ww_mutex_destroy(&obj->lock); } EXPORT_SYMBOL(dma_resv_fini); -static inline struct dma_fence * -dma_resv_excl_fence(struct dma_resv *obj) +/* Dereference the fences while ensuring RCU rules */ +static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj) { - return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj)); -} - -static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) -{ - return rcu_dereference_check(obj->fence, dma_resv_held(obj)); + return rcu_dereference_check(obj->fences, dma_resv_held(obj)); } /** - * dma_resv_reserve_fences - Reserve space to add shared fences to - * a dma_resv. + * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object. * @obj: reservation object * @num_fences: number of fences we want to add * - * Should be called before dma_resv_add_shared_fence(). Must - * be called with @obj locked through dma_resv_lock(). + * Should be called before dma_resv_add_fence(). Must be called with @obj + * locked through dma_resv_lock(). * * Note that the preallocated slots need to be re-reserved if @obj is unlocked - * at any time before calling dma_resv_add_shared_fence(). This is validated - * when CONFIG_DEBUG_MUTEXES is enabled. + * at any time before calling dma_resv_add_fence(). This is validated when + * CONFIG_DEBUG_MUTEXES is enabled. * * RETURNS * Zero for success, or -errno @@ -174,11 +182,11 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences) dma_resv_assert_held(obj); - old = dma_resv_shared_list(obj); - if (old && old->shared_max) { - if ((old->shared_count + num_fences) <= old->shared_max) + old = dma_resv_fences_list(obj); + if (old && old->max_fences) { + if ((old->num_fences + num_fences) <= old->max_fences) return 0; - max = max(old->shared_count + num_fences, old->shared_max * 2); + max = max(old->num_fences + num_fences, old->max_fences * 2); } else { max = max(4ul, roundup_pow_of_two(num_fences)); } @@ -193,27 +201,27 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences) * references from the old struct are carried over to * the new. */ - for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) { + for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) { + enum dma_resv_usage usage; struct dma_fence *fence; - fence = rcu_dereference_protected(old->shared[i], - dma_resv_held(obj)); + dma_resv_list_entry(old, i, obj, &fence, &usage); if (dma_fence_is_signaled(fence)) - RCU_INIT_POINTER(new->shared[--k], fence); + RCU_INIT_POINTER(new->table[--k], fence); else - RCU_INIT_POINTER(new->shared[j++], fence); + dma_resv_list_set(new, j++, fence, usage); } - new->shared_count = j; + new->num_fences = j; /* * We are not changing the effective set of fences here so can * merely update the pointer to the new array; both existing * readers and new readers will see exactly the same set of - * active (unsignaled) shared fences. Individual fences and the + * active (unsignaled) fences. Individual fences and the * old array are protected by RCU and so will not vanish under * the gaze of the rcu_read_lock() readers. */ - rcu_assign_pointer(obj->fence, new); + rcu_assign_pointer(obj->fences, new); if (!old) return 0; @@ -222,7 +230,7 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences) for (i = k; i < max; ++i) { struct dma_fence *fence; - fence = rcu_dereference_protected(new->shared[i], + fence = rcu_dereference_protected(new->table[i], dma_resv_held(obj)); dma_fence_put(fence); } @@ -234,38 +242,39 @@ EXPORT_SYMBOL(dma_resv_reserve_fences); #ifdef CONFIG_DEBUG_MUTEXES /** - * dma_resv_reset_max_fences - reset shared fences for debugging + * dma_resv_reset_max_fences - reset fences for debugging * @obj: the dma_resv object to reset * - * Reset the number of pre-reserved shared slots to test that drivers do + * Reset the number of pre-reserved fence slots to test that drivers do * correct slot allocation using dma_resv_reserve_fences(). See also - * &dma_resv_list.shared_max. + * &dma_resv_list.max_fences. */ void dma_resv_reset_max_fences(struct dma_resv *obj) { - struct dma_resv_list *fences = dma_resv_shared_list(obj); + struct dma_resv_list *fences = dma_resv_fences_list(obj); dma_resv_assert_held(obj); - /* Test shared fence slot reservation */ + /* Test fence slot reservation */ if (fences) - fences->shared_max = fences->shared_count; + fences->max_fences = fences->num_fences; } EXPORT_SYMBOL(dma_resv_reset_max_fences); #endif /** - * dma_resv_add_shared_fence - Add a fence to a shared slot + * dma_resv_add_fence - Add a fence to the dma_resv obj * @obj: the reservation object - * @fence: the shared fence to add + * @fence: the fence to add + * @usage: how the fence is used, see enum dma_resv_usage * - * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and + * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and * dma_resv_reserve_fences() has been called. * * See also &dma_resv.fence for a discussion of the semantics. */ -static void dma_resv_add_shared_fence(struct dma_resv *obj, - struct dma_fence *fence) +void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, + enum dma_resv_usage usage) { struct dma_resv_list *fobj; struct dma_fence *old; @@ -280,32 +289,33 @@ static void dma_resv_add_shared_fence(struct dma_resv *obj, */ WARN_ON(dma_fence_is_container(fence)); - fobj = dma_resv_shared_list(obj); - count = fobj->shared_count; + fobj = dma_resv_fences_list(obj); + count = fobj->num_fences; write_seqcount_begin(&obj->seq); for (i = 0; i < count; ++i) { + enum dma_resv_usage old_usage; - old = rcu_dereference_protected(fobj->shared[i], - dma_resv_held(obj)); - if (old->context == fence->context || + dma_resv_list_entry(fobj, i, obj, &old, &old_usage); + if ((old->context == fence->context && old_usage >= usage) || dma_fence_is_signaled(old)) goto replace; } - BUG_ON(fobj->shared_count >= fobj->shared_max); + BUG_ON(fobj->num_fences >= fobj->max_fences); old = NULL; count++; replace: - RCU_INIT_POINTER(fobj->shared[i], fence); - /* pointer update must be visible before we extend the shared_count */ - smp_store_mb(fobj->shared_count, count); + dma_resv_list_set(fobj, i, fence, usage); + /* pointer update must be visible before we extend the num_fences */ + smp_store_mb(fobj->num_fences, count); write_seqcount_end(&obj->seq); dma_fence_put(old); } +EXPORT_SYMBOL(dma_resv_add_fence); /** * dma_resv_replace_fences - replace fences in the dma_resv obj @@ -326,128 +336,63 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, enum dma_resv_usage usage) { struct dma_resv_list *list; - struct dma_fence *old; unsigned int i; - /* Only readers supported for now */ - WARN_ON(usage != DMA_RESV_USAGE_READ); - dma_resv_assert_held(obj); + list = dma_resv_fences_list(obj); write_seqcount_begin(&obj->seq); + for (i = 0; list && i < list->num_fences; ++i) { + struct dma_fence *old; - old = dma_resv_excl_fence(obj); - if (old->context == context) { - RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement)); - dma_fence_put(old); - } - - list = dma_resv_shared_list(obj); - for (i = 0; list && i < list->shared_count; ++i) { - old = rcu_dereference_protected(list->shared[i], - dma_resv_held(obj)); + dma_resv_list_entry(list, i, obj, &old, NULL); if (old->context != context) continue; - rcu_assign_pointer(list->shared[i], dma_fence_get(replacement)); + dma_resv_list_set(list, i, replacement, usage); dma_fence_put(old); } - write_seqcount_end(&obj->seq); } EXPORT_SYMBOL(dma_resv_replace_fences); -/** - * dma_resv_add_excl_fence - Add an exclusive fence. - * @obj: the reservation object - * @fence: the exclusive fence to add - * - * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock(). - * See also &dma_resv.fence_excl for a discussion of the semantics. - */ -static void dma_resv_add_excl_fence(struct dma_resv *obj, - struct dma_fence *fence) -{ - struct dma_fence *old_fence = dma_resv_excl_fence(obj); - - dma_resv_assert_held(obj); - - dma_fence_get(fence); - - write_seqcount_begin(&obj->seq); - /* write_seqcount_begin provides the necessary memory barrier */ - RCU_INIT_POINTER(obj->fence_excl, fence); - write_seqcount_end(&obj->seq); - - dma_fence_put(old_fence); -} - -/** - * dma_resv_add_fence - Add a fence to the dma_resv obj - * @obj: the reservation object - * @fence: the fence to add - * @usage: how the fence is used, see enum dma_resv_usage - * - * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and - * dma_resv_reserve_fences() has been called. - * - * See also &dma_resv.fence for a discussion of the semantics. - */ -void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, - enum dma_resv_usage usage) -{ - if (usage == DMA_RESV_USAGE_WRITE) - dma_resv_add_excl_fence(obj, fence); - else - dma_resv_add_shared_fence(obj, fence); -} -EXPORT_SYMBOL(dma_resv_add_fence); - -/* Restart the iterator by initializing all the necessary fields, but not the - * relation to the dma_resv object. */ +/* Restart the unlocked iteration by initializing the cursor object. */ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) { cursor->seq = read_seqcount_begin(&cursor->obj->seq); - cursor->index = -1; - cursor->shared_count = 0; - if (cursor->usage >= DMA_RESV_USAGE_READ) { - cursor->fences = dma_resv_shared_list(cursor->obj); - if (cursor->fences) - cursor->shared_count = cursor->fences->shared_count; - } else { - cursor->fences = NULL; - } + cursor->index = 0; + cursor->num_fences = 0; + cursor->fences = dma_resv_fences_list(cursor->obj); + if (cursor->fences) + cursor->num_fences = cursor->fences->num_fences; cursor->is_restarted = true; } /* Walk to the next not signaled fence and grab a reference to it */ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) { - struct dma_resv *obj = cursor->obj; + if (!cursor->fences) + return; do { /* Drop the reference from the previous round */ dma_fence_put(cursor->fence); - if (cursor->index == -1) { - cursor->fence = dma_resv_excl_fence(obj); - cursor->index++; - if (!cursor->fence) - continue; - - } else if (!cursor->fences || - cursor->index >= cursor->shared_count) { + if (cursor->index >= cursor->num_fences) { cursor->fence = NULL; break; - } else { - struct dma_resv_list *fences = cursor->fences; - unsigned int idx = cursor->index++; - - cursor->fence = rcu_dereference(fences->shared[idx]); } + + dma_resv_list_entry(cursor->fences, cursor->index++, + cursor->obj, &cursor->fence, + &cursor->fence_usage); cursor->fence = dma_fence_get_rcu(cursor->fence); - if (!cursor->fence || !dma_fence_is_signaled(cursor->fence)) + if (!cursor->fence) + break; + + if (!dma_fence_is_signaled(cursor->fence) && + cursor->usage >= cursor->fence_usage) break; } while (true); } @@ -522,15 +467,9 @@ struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor) dma_resv_assert_held(cursor->obj); cursor->index = 0; - if (cursor->usage >= DMA_RESV_USAGE_READ) - cursor->fences = dma_resv_shared_list(cursor->obj); - else - cursor->fences = NULL; - - fence = dma_resv_excl_fence(cursor->obj); - if (!fence) - fence = dma_resv_iter_next(cursor); + cursor->fences = dma_resv_fences_list(cursor->obj); + fence = dma_resv_iter_next(cursor); cursor->is_restarted = true; return fence; } @@ -545,17 +484,22 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_first); */ struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor) { - unsigned int idx; + struct dma_fence *fence; dma_resv_assert_held(cursor->obj); cursor->is_restarted = false; - if (!cursor->fences || cursor->index >= cursor->fences->shared_count) - return NULL; - idx = cursor->index++; - return rcu_dereference_protected(cursor->fences->shared[idx], - dma_resv_held(cursor->obj)); + do { + if (!cursor->fences || + cursor->index >= cursor->fences->num_fences) + return NULL; + + dma_resv_list_entry(cursor->fences, cursor->index++, + cursor->obj, &fence, &cursor->fence_usage); + } while (cursor->fence_usage > cursor->usage); + + return fence; } EXPORT_SYMBOL_GPL(dma_resv_iter_next); @@ -570,57 +514,43 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) { struct dma_resv_iter cursor; struct dma_resv_list *list; - struct dma_fence *f, *excl; + struct dma_fence *f; dma_resv_assert_held(dst); list = NULL; - excl = NULL; dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ); dma_resv_for_each_fence_unlocked(&cursor, f) { if (dma_resv_iter_is_restarted(&cursor)) { dma_resv_list_free(list); - dma_fence_put(excl); - if (cursor.shared_count) { - list = dma_resv_list_alloc(cursor.shared_count); - if (!list) { - dma_resv_iter_end(&cursor); - return -ENOMEM; - } - - list->shared_count = 0; - - } else { - list = NULL; + list = dma_resv_list_alloc(cursor.num_fences); + if (!list) { + dma_resv_iter_end(&cursor); + return -ENOMEM; } - excl = NULL; + list->num_fences = 0; } dma_fence_get(f); - if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_WRITE) - excl = f; - else - RCU_INIT_POINTER(list->shared[list->shared_count++], f); + dma_resv_list_set(list, list->num_fences++, f, + dma_resv_iter_usage(&cursor)); } dma_resv_iter_end(&cursor); write_seqcount_begin(&dst->seq); - excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst)); - list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst)); + list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst)); write_seqcount_end(&dst->seq); dma_resv_list_free(list); - dma_fence_put(excl); - return 0; } EXPORT_SYMBOL(dma_resv_copy_fences); /** - * dma_resv_get_fences - Get an object's shared and exclusive + * dma_resv_get_fences - Get an object's fences * fences without update side lock held * @obj: the reservation object * @usage: controls which fences to include, see enum dma_resv_usage. @@ -649,7 +579,7 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, while (*num_fences) dma_fence_put((*fences)[--(*num_fences)]); - count = cursor.shared_count + 1; + count = cursor.num_fences + 1; /* Eventually re-allocate the array */ *fences = krealloc_array(*fences, count, @@ -723,8 +653,7 @@ int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, EXPORT_SYMBOL_GPL(dma_resv_get_singleton); /** - * dma_resv_wait_timeout - Wait on reservation's objects - * shared and/or exclusive fences. + * dma_resv_wait_timeout - Wait on reservation's objects fences * @obj: the reservation object * @usage: controls which fences to include, see enum dma_resv_usage. * @intr: if true, do interruptible wait diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index 044b41f0bfd9..529d52a204cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -34,7 +34,6 @@ struct amdgpu_fpriv; struct amdgpu_bo_list_entry { struct ttm_validate_buffer tv; struct amdgpu_bo_va *bo_va; - struct dma_fence_chain *chain; uint32_t priority; struct page **user_pages; bool user_invalidated; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 76fd916424d6..8de283997769 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -574,14 +574,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); e->bo_va = amdgpu_vm_bo_find(vm, bo); - - if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) { - e->chain = dma_fence_chain_alloc(); - if (!e->chain) { - r = -ENOMEM; - goto error_validate; - } - } } /* Move fence waiting after getting reservation lock of @@ -642,13 +634,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } error_validate: - if (r) { - amdgpu_bo_list_for_each_entry(e, p->bo_list) { - dma_fence_chain_free(e->chain); - e->chain = NULL; - } + if (r) ttm_eu_backoff_reservation(&p->ticket, &p->validated); - } out: return r; } @@ -688,17 +675,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, { unsigned i; - if (error && backoff) { - struct amdgpu_bo_list_entry *e; - - amdgpu_bo_list_for_each_entry(e, parser->bo_list) { - dma_fence_chain_free(e->chain); - e->chain = NULL; - } - + if (error && backoff) ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); - } for (i = 0; i < parser->num_post_deps; i++) { drm_syncobj_put(parser->post_deps[i].syncobj); @@ -1272,31 +1251,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); - amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct dma_resv *resv = e->tv.bo->base.resv; - struct dma_fence_chain *chain = e->chain; - struct dma_resv_iter cursor; - struct dma_fence *fence; - - if (!chain) - continue; - - /* - * Temporary workaround dma_resv shortcommings by wrapping up - * the submission in a dma_fence_chain and add it as exclusive - * fence. - * - * TODO: Remove together with dma_resv rework. - */ - dma_resv_for_each_fence(&cursor, resv, - DMA_RESV_USAGE_WRITE, - fence) { - break; - } - dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1); - rcu_assign_pointer(resv->fence_excl, &chain->base); - e->chain = NULL; - } + /* Make sure all BOs are remembered as writers */ + amdgpu_bo_list_for_each_entry(e, p->bo_list) + e->tv.num_shared = 0; ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); mutex_unlock(&p->adev->notifier_lock); diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 98dc5234b487..7bb7e7edbb6f 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -99,8 +99,8 @@ static inline enum dma_resv_usage dma_resv_usage_rw(bool write) /** * struct dma_resv - a reservation object manages fences for a buffer * - * There are multiple uses for this, with sometimes slightly different rules in - * how the fence slots are used. + * This is a container for dma_fence objects which needs to handle multiple use + * cases. * * One use is to synchronize cross-driver access to a struct dma_buf, either for * dynamic buffer management or just to handle implicit synchronization between @@ -130,47 +130,22 @@ struct dma_resv { * @seq: * * Sequence count for managing RCU read-side synchronization, allows - * read-only access to @fence_excl and @fence while ensuring we take a - * consistent snapshot. + * read-only access to @fences while ensuring we take a consistent + * snapshot. */ seqcount_ww_mutex_t seq; /** - * @fence_excl: + * @fences: * - * The exclusive fence, if there is one currently. + * Array of fences which where added to the dma_resv object * - * To guarantee that no fences are lost, this new fence must signal - * only after the previous exclusive fence has signalled. If - * semantically only a new access is added without actually treating the - * previous one as a dependency the exclusive fences can be strung - * together using struct dma_fence_chain. - * - * Note that actual semantics of what an exclusive or shared fence mean - * is defined by the user, for reservation objects shared across drivers - * see &dma_buf.resv. - */ - struct dma_fence __rcu *fence_excl; - - /** - * @fence: - * - * List of current shared fences. - * - * There are no ordering constraints of shared fences against the - * exclusive fence slot. If a waiter needs to wait for all access, it - * has to wait for both sets of fences to signal. - * - * A new fence is added by calling dma_resv_add_shared_fence(). Since - * this often needs to be done past the point of no return in command + * A new fence is added by calling dma_resv_add_fence(). Since this + * often needs to be done past the point of no return in command * submission it cannot fail, and therefore sufficient slots need to be * reserved by calling dma_resv_reserve_fences(). - * - * Note that actual semantics of what an exclusive or shared fence mean - * is defined by the user, for reservation objects shared across drivers - * see &dma_buf.resv. */ - struct dma_resv_list __rcu *fence; + struct dma_resv_list __rcu *fences; }; /** @@ -207,8 +182,8 @@ struct dma_resv_iter { /** @fences: the shared fences; private, *MUST* not dereference */ struct dma_resv_list *fences; - /** @shared_count: number of shared fences */ - unsigned int shared_count; + /** @num_fences: number of fences */ + unsigned int num_fences; /** @is_restarted: true if this is the first returned fence */ bool is_restarted; From b29895e18304feb7e8afc6388db7ece60327b23c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 26 Nov 2021 14:12:42 +0100 Subject: [PATCH 008/219] dma-buf: add DMA_RESV_USAGE_KERNEL v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add an usage for kernel submissions. Waiting for those are mandatory for dynamic DMA-bufs. As a precaution this patch also changes all occurrences where fences are added as part of memory management in TTM, VMWGFX and i915 to use the new value because it now becomes possible for drivers to ignore fences with the WRITE usage. v2: use "must" in documentation, fix whitespaces v3: separate out some driver changes and better document why some changes should still be part of this patch. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-5-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 2 +- drivers/dma-buf/st-dma-resv.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- drivers/gpu/drm/ttm/ttm_bo_util.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 2 +- include/linux/dma-resv.h | 24 ++++++++++++++++++--- 7 files changed, 28 insertions(+), 10 deletions(-) diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 378d47e1cfea..f4860e5f2d8b 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -726,7 +726,7 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled); */ void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) { - static const char *usage[] = { "write", "read" }; + static const char *usage[] = { "kernel", "write", "read" }; struct dma_resv_iter cursor; struct dma_fence *fence; diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index d0f7c2bfd4f0..062b57d63fa6 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -296,7 +296,7 @@ int dma_resv(void) int r; spin_lock_init(&fence_lock); - for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ; + for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_READ; ++usage) { r = subtests(tests, (void *)(unsigned long)usage); if (r) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index f5f2b8b115ea..0512afdd20d8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -117,7 +117,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, i915_fence_timeout(i915), I915_FENCE_GFP); dma_resv_add_fence(obj->base.resv, &clflush->base.dma, - DMA_RESV_USAGE_WRITE); + DMA_RESV_USAGE_KERNEL); dma_fence_work_commit(&clflush->base); /* * We must have successfully populated the pages(since we are diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d74f9eea855e..6bf3fb1c8045 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -739,7 +739,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, return ret; } - dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE); + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); ret = dma_resv_reserve_fences(bo->base.resv, 1); if (unlikely(ret)) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7a96a1db13a7..99deb45894f4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -508,7 +508,7 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, return ret; dma_resv_add_fence(&ghost_obj->base._resv, fence, - DMA_RESV_USAGE_WRITE); + DMA_RESV_USAGE_KERNEL); /** * If we're not moving to fixed memory, the TTM object @@ -562,7 +562,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); int ret = 0; - dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE); + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); if (!evict) ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); else if (!from->use_tt && pipeline) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index bec50223efe5..408ede1f967f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -759,7 +759,7 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo, ret = dma_resv_reserve_fences(bo->base.resv, 1); if (!ret) dma_resv_add_fence(bo->base.resv, &fence->base, - DMA_RESV_USAGE_WRITE); + DMA_RESV_USAGE_KERNEL); else /* Last resort fallback when we are OOM */ dma_fence_wait(&fence->base, false); diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 7bb7e7edbb6f..a749f229ae91 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -55,11 +55,29 @@ struct dma_resv_list; * This enum describes the different use cases for a dma_resv object and * controls which fences are returned when queried. * - * An important fact is that there is the order WRITE Date: Mon, 4 Apr 2022 12:57:30 +0200 Subject: [PATCH 009/219] drm/amdgpu: use DMA_RESV_USAGE_KERNEL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Wait only for kernel fences before kmap or UVD direct submission. This also makes sure that we always wait in amdgpu_bo_kmap() even when returning a cached pointer. Signed-off-by: Christian König Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-6-christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index a3cdf8a24377..5832c05ab10d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -761,6 +761,11 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) return -EPERM; + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL, + false, MAX_SCHEDULE_TIMEOUT); + if (r < 0) + return r; + kptr = amdgpu_bo_kptr(bo); if (kptr) { if (ptr) @@ -768,11 +773,6 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) return 0; } - r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE, - false, MAX_SCHEDULE_TIMEOUT); - if (r < 0) - return r; - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 3654326219e0..6eac649499d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1164,7 +1164,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (direct) { r = dma_resv_wait_timeout(bo->tbo.base.resv, - DMA_RESV_USAGE_WRITE, false, + DMA_RESV_USAGE_KERNEL, false, msecs_to_jiffies(10)); if (r == 0) r = -ETIMEDOUT; From 91f0c245dd5cd7039090283031b485384b315852 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 4 Apr 2022 12:59:32 +0200 Subject: [PATCH 010/219] drm/radeon: use DMA_RESV_USAGE_KERNEL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always wait for kernel fences before kmap and not only for UVD kmaps. Signed-off-by: Christian König Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-7-christian.koenig@amd.com --- drivers/gpu/drm/radeon/radeon_object.c | 7 ++++++- drivers/gpu/drm/radeon/radeon_uvd.c | 12 ++---------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index cb5c4aa45cef..6c4a6802ca96 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -219,7 +219,12 @@ int radeon_bo_create(struct radeon_device *rdev, int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) { bool is_iomem; - int r; + long r; + + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL, + false, MAX_SCHEDULE_TIMEOUT); + if (r < 0) + return r; if (bo->kptr) { if (ptr) { diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index a50750740ab0..a2cda184b2b2 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -470,24 +470,16 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, int32_t *msg, msg_type, handle; unsigned img_size = 0; void *ptr; - long r; - int i; + int i, r; if (offset & 0x3F) { DRM_ERROR("UVD messages must be 64 byte aligned!\n"); return -EINVAL; } - r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE, - false, MAX_SCHEDULE_TIMEOUT); - if (r <= 0) { - DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r); - return r ? r : -ETIME; - } - r = radeon_bo_kmap(bo, &ptr); if (r) { - DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); + DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); return r; } From 61e55c6f5ce11b15b1c21ba9211c09d2354dbe66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 4 Apr 2022 13:20:02 +0200 Subject: [PATCH 011/219] RDMA: use DMA_RESV_USAGE_KERNEL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We only need to wait for kernel submissions here. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-9-christian.koenig@amd.com --- drivers/infiniband/core/umem_dmabuf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c index f9901d273b8e..fce80a4a5147 100644 --- a/drivers/infiniband/core/umem_dmabuf.c +++ b/drivers/infiniband/core/umem_dmabuf.c @@ -68,7 +68,7 @@ wait_fence: * the migration. */ return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, - DMA_RESV_USAGE_WRITE, + DMA_RESV_USAGE_KERNEL, false, MAX_SCHEDULE_TIMEOUT); } EXPORT_SYMBOL(ib_umem_dmabuf_map_pages); From 0cc848a75b742c3f9800e643cd2c03b9cfdc3d69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 9 Nov 2021 11:08:18 +0100 Subject: [PATCH 012/219] dma-buf: add DMA_RESV_USAGE_BOOKKEEP v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add an usage for submissions independent of implicit sync but still interesting for memory management. v2: cleanup the kerneldoc a bit v3: separate amdgpu changes from this Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-10-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 4 ++-- drivers/dma-buf/st-dma-resv.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 +++--- drivers/gpu/drm/i915/gem/i915_gem_lmem.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 2 +- drivers/gpu/drm/qxl/qxl_debugfs.c | 2 +- drivers/gpu/drm/radeon/radeon_gem.c | 2 +- drivers/gpu/drm/radeon/radeon_mn.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 14 +++++++------- include/linux/dma-resv.h | 13 ++++++++++++- 14 files changed, 35 insertions(+), 24 deletions(-) diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index f4860e5f2d8b..5b64aa554c36 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -520,7 +520,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) list = NULL; - dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ); + dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP); dma_resv_for_each_fence_unlocked(&cursor, f) { if (dma_resv_iter_is_restarted(&cursor)) { @@ -726,7 +726,7 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled); */ void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) { - static const char *usage[] = { "kernel", "write", "read" }; + static const char *usage[] = { "kernel", "write", "read", "bookkeep" }; struct dma_resv_iter cursor; struct dma_fence *fence; diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index 062b57d63fa6..8ace9e84c845 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -296,7 +296,7 @@ int dma_resv(void) int r; spin_lock_init(&fence_lock); - for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_READ; + for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_BOOKKEEP; ++usage) { r = subtests(tests, (void *)(unsigned long)usage); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 65998cbcd7f7..4ba4b54092f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -111,7 +111,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, struct dma_fence *fence; int r; - r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_READ, &fence); + r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence); if (r) goto fallback; @@ -139,7 +139,7 @@ fallback: /* Not enough memory for the delayed delete, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ, + dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, false, MAX_SCHEDULE_TIMEOUT); amdgpu_pasid_free(pasid); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 86f5248676b0..b86c0b8252a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -75,7 +75,7 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, mmu_interval_set_seq(mni, cur_seq); - r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ, + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, false, MAX_SCHEDULE_TIMEOUT); mutex_unlock(&adev->notifier_lock); if (r <= 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 744e144e5fc2..11c46b3e4c60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -260,7 +260,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, return -EINVAL; /* TODO: Use DMA_RESV_USAGE_READ here */ - dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) { + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) { dma_fence_chain_for_each(f, f) { struct dma_fence *tmp = dma_fence_chain_contained(f); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 5db5066e74b4..49ffad312d5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1345,7 +1345,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * be resident to run successfully */ dma_resv_for_each_fence(&resv_cursor, bo->base.resv, - DMA_RESV_USAGE_READ, f) { + DMA_RESV_USAGE_BOOKKEEP, f) { if (amdkfd_fence_check_mm(f, current->mm)) return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a0376fd36a82..5277c10d901d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2059,7 +2059,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, fence) { + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) { /* Add a callback for each fence in the reservation object */ amdgpu_vm_prt_get(adev); amdgpu_vm_add_prt_cb(adev, fence); @@ -2665,7 +2665,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return true; /* Don't evict VM page tables while they are busy */ - if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_READ)) + if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP)) return false; /* Try to block ongoing updates */ @@ -2846,7 +2846,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, - DMA_RESV_USAGE_READ, + DMA_RESV_USAGE_BOOKKEEP, true, timeout); if (timeout <= 0) return timeout; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index a200d3e66573..4115a222a853 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -66,7 +66,7 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) struct intel_memory_region *mr = READ_ONCE(obj->mm.region); #ifdef CONFIG_LOCKDEP - GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_READ) && + GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) && i915_gem_object_evictable(obj)); #endif return mr && (mr->type == INTEL_MEMORY_LOCAL || diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 644fe237601c..094f06b4ce33 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -86,7 +86,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, return true; /* we will unbind on next submission, still have userptr pins */ - r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_READ, false, + r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 33e5889d6608..2d9ed3b94574 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -62,7 +62,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data) int rel = 0; dma_resv_iter_begin(&cursor, bo->tbo.base.resv, - DMA_RESV_USAGE_READ); + DMA_RESV_USAGE_BOOKKEEP); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (dma_resv_iter_is_restarted(&cursor)) rel = 0; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 6616a828f40b..8c01a7f0e027 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -163,7 +163,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj, if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */ r = dma_resv_wait_timeout(robj->tbo.base.resv, - DMA_RESV_USAGE_READ, + DMA_RESV_USAGE_BOOKKEEP, true, 30 * HZ); if (!r) r = -EBUSY; diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 68ebeb1bdfff..29fe8423bd90 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -66,7 +66,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn, return true; } - r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ, + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 6bf3fb1c8045..360f980c7e10 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -223,7 +223,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ); + dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (!fence->ops->signaled) dma_fence_enable_sw_signaling(fence); @@ -252,7 +252,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, struct dma_resv *resv = &bo->base._resv; int ret; - if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_READ)) + if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) ret = 0; else ret = -EBUSY; @@ -264,7 +264,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, dma_resv_unlock(bo->base.resv); spin_unlock(&bo->bdev->lru_lock); - lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ, + lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, interruptible, 30 * HZ); @@ -369,7 +369,7 @@ static void ttm_bo_release(struct kref *kref) * fences block for the BO to become idle */ dma_resv_wait_timeout(bo->base.resv, - DMA_RESV_USAGE_READ, false, + DMA_RESV_USAGE_BOOKKEEP, false, 30 * HZ); } @@ -380,7 +380,7 @@ static void ttm_bo_release(struct kref *kref) ttm_mem_io_free(bdev, bo->resource); } - if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ) || + if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) || !dma_resv_trylock(bo->base.resv)) { /* The BO is not idle, resurrect it for delayed destroy */ ttm_bo_flush_all_fences(bo); @@ -1046,13 +1046,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, long timeout = 15 * HZ; if (no_wait) { - if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) + if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) return 0; else return -EBUSY; } - timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, + timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, interruptible, timeout); if (timeout < 0) return timeout; diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index a749f229ae91..1db759eacc98 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -55,7 +55,7 @@ struct dma_resv_list; * This enum describes the different use cases for a dma_resv object and * controls which fences are returned when queried. * - * An important fact is that there is the order KERNEL Date: Tue, 23 Nov 2021 09:58:36 +0100 Subject: [PATCH 013/219] dma-buf: wait for map to complete for static attachments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have previously done that in the individual drivers but it is more defensive to move that into the common code. Dynamic attachments should wait for map operations to complete by themselves. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-12-christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 16 ++++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 14 +------------- drivers/gpu/drm/nouveau/nouveau_prime.c | 17 +---------------- drivers/gpu/drm/radeon/radeon_prime.c | 14 ++------------ 4 files changed, 18 insertions(+), 43 deletions(-) diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 1cddb65eafda..79795857be3e 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -661,12 +661,24 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction direction) { struct sg_table *sg_table; + signed long ret; sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); + if (IS_ERR_OR_NULL(sg_table)) + return sg_table; - if (!IS_ERR_OR_NULL(sg_table)) - mangle_sg_table(sg_table); + if (!dma_buf_attachment_is_dynamic(attach)) { + ret = dma_resv_wait_timeout(attach->dmabuf->resv, + DMA_RESV_USAGE_KERNEL, true, + MAX_SCHEDULE_TIMEOUT); + if (ret < 0) { + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, + direction); + return ERR_PTR(ret); + } + } + mangle_sg_table(sg_table); return sg_table; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 579adfafe4d0..782cbca37538 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -102,21 +102,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach) { struct drm_gem_object *obj = attach->dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - int r; /* pin buffer into GTT */ - r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); - if (r) - return r; - - if (bo->tbo.moving) { - r = dma_fence_wait(bo->tbo.moving, true); - if (r) { - amdgpu_bo_unpin(bo); - return r; - } - } - return 0; + return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); } /** diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index 60019d0532fc..347488685f74 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -93,22 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj) if (ret) return -EINVAL; - ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); - if (ret) - goto error; - - if (nvbo->bo.moving) - ret = dma_fence_wait(nvbo->bo.moving, true); - - ttm_bo_unreserve(&nvbo->bo); - if (ret) - goto error; - - return ret; - -error: - nouveau_bo_unpin(nvbo); - return ret; + return 0; } void nouveau_gem_prime_unpin(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index 4a90807351e7..42a87948e28c 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -77,19 +77,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj) /* pin buffer into GTT */ ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); - if (unlikely(ret)) - goto error; + if (likely(ret == 0)) + bo->prime_shared_count++; - if (bo->tbo.moving) { - ret = dma_fence_wait(bo->tbo.moving, false); - if (unlikely(ret)) { - radeon_bo_unpin(bo); - goto error; - } - } - - bo->prime_shared_count++; -error: radeon_bo_unreserve(bo); return ret; } From 1d7f5e6c5240c324afa138738a7d50218a7584c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 22 Dec 2021 11:23:56 +0100 Subject: [PATCH 014/219] drm/i915: drop bo->moving dependency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That should now be handled by the common dma_resv framework. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Cc: intel-gfx@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-13-christian.koenig@amd.com --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 41 ++++--------------- drivers/gpu/drm/i915/gem/i915_gem_object.h | 8 +--- drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c | 15 +------ .../drm/i915/gem/selftests/i915_gem_migrate.c | 3 +- .../drm/i915/gem/selftests/i915_gem_mman.c | 3 +- drivers/gpu/drm/i915/i915_vma.c | 9 +++- 6 files changed, 21 insertions(+), 58 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 372bc220faeb..ffde7bc0a95d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -741,30 +741,19 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = { /** * i915_gem_object_get_moving_fence - Get the object's moving fence if any * @obj: The object whose moving fence to get. + * @fence: The resulting fence * * A non-signaled moving fence means that there is an async operation * pending on the object that needs to be waited on before setting up * any GPU- or CPU PTEs to the object's pages. * - * Return: A refcounted pointer to the object's moving fence if any, - * NULL otherwise. + * Return: Negative error code or 0 for success. */ -struct dma_fence * -i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj) +int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj, + struct dma_fence **fence) { - return dma_fence_get(i915_gem_to_ttm(obj)->moving); -} - -void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj, - struct dma_fence *fence) -{ - struct dma_fence **moving = &i915_gem_to_ttm(obj)->moving; - - if (*moving == fence) - return; - - dma_fence_put(*moving); - *moving = dma_fence_get(fence); + return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL, + fence); } /** @@ -782,23 +771,9 @@ void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj, int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, bool intr) { - struct dma_fence *fence = i915_gem_to_ttm(obj)->moving; - int ret; - assert_object_held(obj); - if (!fence) - return 0; - - ret = dma_fence_wait(fence, intr); - if (ret) - return ret; - - if (fence->error) - return fence->error; - - i915_gem_to_ttm(obj)->moving = NULL; - dma_fence_put(fence); - return 0; + return dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL, + intr, MAX_SCHEDULE_TIMEOUT); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 02c37fe4a535..e11d82a9f7c3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -520,12 +520,8 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); } -struct dma_fence * -i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj); - -void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj, - struct dma_fence *fence); - +int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj, + struct dma_fence **fence); int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, bool intr); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 438b8a95b3d1..a10716f4e717 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -467,19 +467,6 @@ out: return fence; } -static int -prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - struct i915_deps *deps) -{ - int ret; - - ret = i915_deps_add_dependency(deps, bo->moving, ctx); - if (!ret) - ret = i915_deps_add_resv(deps, bo->base.resv, ctx); - - return ret; -} - /** * i915_ttm_move - The TTM move callback used by i915. * @bo: The buffer object. @@ -534,7 +521,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, struct i915_deps deps; i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); - ret = prev_deps(bo, ctx, &deps); + ret = i915_deps_add_resv(&deps, bo->base.resv, ctx); if (ret) { i915_refct_sgt_put(dst_rsgt); return ret; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c index 4997ed18b6e4..0ad443a90c8b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c @@ -219,8 +219,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt, err = dma_resv_reserve_fences(obj->base.resv, 1); if (!err) dma_resv_add_fence(obj->base.resv, &rq->fence, - DMA_RESV_USAGE_WRITE); - i915_gem_object_set_moving_fence(obj, &rq->fence); + DMA_RESV_USAGE_KERNEL); i915_request_put(rq); } if (err) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 3a6e3f6d239f..dfc34cc2ef8c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -1221,8 +1221,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements, i915_gem_object_unpin_pages(obj); if (rq) { dma_resv_add_fence(obj->base.resv, &rq->fence, - DMA_RESV_USAGE_WRITE); - i915_gem_object_set_moving_fence(obj, &rq->fence); + DMA_RESV_USAGE_KERNEL); i915_request_put(rq); } i915_gem_object_unlock(obj); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 524477d8939e..d077f7b9eaad 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1357,10 +1357,17 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, if (err) return err; + if (vma->obj) { + err = i915_gem_object_get_moving_fence(vma->obj, &moving); + if (err) + return err; + } else { + moving = NULL; + } + if (flags & PIN_GLOBAL) wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); - moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL; if (flags & vma->vm->bind_async_flags || moving) { /* lock VM */ err = i915_vm_lock_objects(vma->vm, ww); From 8bb31587820a6e04cb613b49238b1800d1a97223 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 23 Nov 2021 11:30:35 +0100 Subject: [PATCH 015/219] drm/ttm: remove bo->moving MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is now handled by the DMA-buf framework in the dma_resv obj. Also remove the workaround inside VMWGFX to update the moving fence. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-14-christian.koenig@amd.com --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 13 ++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c | 11 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 11 ++++-- drivers/gpu/drm/ttm/ttm_bo.c | 10 ++---- drivers/gpu/drm/ttm/ttm_bo_util.c | 7 ---- drivers/gpu/drm/ttm/ttm_bo_vm.c | 34 +++++++------------ drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 6 ---- include/drm/ttm/ttm_bo_api.h | 2 -- 9 files changed, 39 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 5031e26e6716..3dc5ab2764ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2447,6 +2447,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) struct amdgpu_bo *bo = mem->bo; uint32_t domain = mem->domain; struct kfd_mem_attachment *attachment; + struct dma_resv_iter cursor; + struct dma_fence *fence; total_size += amdgpu_bo_size(bo); @@ -2461,10 +2463,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) goto validate_map_fail; } } - ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving); - if (ret) { - pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); - goto validate_map_fail; + dma_resv_for_each_fence(&cursor, bo->tbo.base.resv, + DMA_RESV_USAGE_KERNEL, fence) { + ret = amdgpu_sync_fence(&sync_obj, fence); + if (ret) { + pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); + goto validate_map_fail; + } } list_for_each_entry(attachment, &mem->attachments, list) { if (!attachment->is_mapped) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 5832c05ab10d..ef93abec13b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -612,9 +612,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (unlikely(r)) goto fail_unreserve; - amdgpu_bo_fence(bo, fence, false); - dma_fence_put(bo->tbo.moving); - bo->tbo.moving = dma_fence_get(fence); + dma_resv_add_fence(bo->tbo.base.resv, fence, + DMA_RESV_USAGE_KERNEL); dma_fence_put(fence); } if (!bp->resv) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index e3fbf0f10add..31913ae86de6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -74,13 +74,12 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, { unsigned int i; uint64_t value; - int r; + long r; - if (vmbo->bo.tbo.moving) { - r = dma_fence_wait(vmbo->bo.tbo.moving, true); - if (r) - return r; - } + r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL, + true, MAX_SCHEDULE_TIMEOUT); + if (r < 0) + return r; pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index dbb551762805..bdb44cee19d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -204,14 +204,19 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, struct amdgpu_bo *bo = &vmbo->bo; enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE : AMDGPU_IB_POOL_DELAYED; + struct dma_resv_iter cursor; unsigned int i, ndw, nptes; + struct dma_fence *fence; uint64_t *pte; int r; /* Wait for PD/PT moves to be completed */ - r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving); - if (r) - return r; + dma_resv_for_each_fence(&cursor, bo->tbo.base.resv, + DMA_RESV_USAGE_KERNEL, fence) { + r = amdgpu_sync_fence(&p->job->sync, fence); + if (r) + return r; + } do { ndw = p->num_dw_left; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 360f980c7e10..015a94f766de 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -418,7 +418,6 @@ static void ttm_bo_release(struct kref *kref) dma_resv_unlock(bo->base.resv); atomic_dec(&ttm_glob.bo_count); - dma_fence_put(bo->moving); bo->destroy(bo); } @@ -714,9 +713,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo) EXPORT_SYMBOL(ttm_bo_unpin); /* - * Add the last move fence to the BO and reserve a new shared slot. We only use - * a shared slot to avoid unecessary sync and rely on the subsequent bo move to - * either stall or use an exclusive fence respectively set bo->moving. + * Add the last move fence to the BO as kernel dependency and reserve a new + * fence slot. */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, struct ttm_resource_manager *man, @@ -746,9 +744,6 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, dma_fence_put(fence); return ret; } - - dma_fence_put(bo->moving); - bo->moving = fence; return 0; } @@ -951,7 +946,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, bo->bdev = bdev; bo->type = type; bo->page_alignment = page_alignment; - bo->moving = NULL; bo->pin_count = 0; bo->sg = sg; bo->bulk_move = NULL; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 99deb45894f4..bc5190340b9c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -228,7 +228,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, atomic_inc(&ttm_glob.bo_count); INIT_LIST_HEAD(&fbo->base.ddestroy); - fbo->base.moving = NULL; drm_vma_node_reset(&fbo->base.base.vma_node); kref_init(&fbo->base.kref); @@ -500,9 +499,6 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, * operation has completed. */ - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); - ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) return ret; @@ -546,9 +542,6 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, spin_unlock(&from->move_lock); ttm_resource_free(bo, &bo->resource); - - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); } int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 08ba083a80d2..5b324f245265 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -46,17 +46,13 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_fault *vmf) { - vm_fault_t ret = 0; - int err = 0; - - if (likely(!bo->moving)) - goto out_unlock; + long err = 0; /* * Quick non-stalling check for idle. */ - if (dma_fence_is_signaled(bo->moving)) - goto out_clear; + if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL)) + return 0; /* * If possible, avoid waiting for GPU with mmap_lock @@ -64,34 +60,30 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, * is the first attempt. */ if (fault_flag_allow_retry_first(vmf->flags)) { - ret = VM_FAULT_RETRY; if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) - goto out_unlock; + return VM_FAULT_RETRY; ttm_bo_get(bo); mmap_read_unlock(vmf->vma->vm_mm); - (void) dma_fence_wait(bo->moving, true); + (void)dma_resv_wait_timeout(bo->base.resv, + DMA_RESV_USAGE_KERNEL, true, + MAX_SCHEDULE_TIMEOUT); dma_resv_unlock(bo->base.resv); ttm_bo_put(bo); - goto out_unlock; + return VM_FAULT_RETRY; } /* * Ordinary wait. */ - err = dma_fence_wait(bo->moving, true); - if (unlikely(err != 0)) { - ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS : + err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true, + MAX_SCHEDULE_TIMEOUT); + if (unlikely(err < 0)) { + return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; - goto out_unlock; } -out_clear: - dma_fence_put(bo->moving); - bo->moving = NULL; - -out_unlock: - return ret; + return 0; } static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index a84d1d5628d0..a7d62a4eb47b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1161,12 +1161,6 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start, PAGE_SIZE); vmw_bo_fence_single(bo, NULL); - if (bo->moving) - dma_fence_put(bo->moving); - - return dma_resv_get_singleton(bo->base.resv, - DMA_RESV_USAGE_WRITE, - &bo->moving); } return 0; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index c76932b68a33..2d524f8b0802 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -94,7 +94,6 @@ struct ttm_tt; * @deleted: True if the object is only a zombie and already deleted. * @ddestroy: List head for the delayed destroy list. * @swap: List head for swap LRU list. - * @moving: Fence set when BO is moving * @offset: The current GPU offset, which can have different meanings * depending on the memory type. For SYSTEM type memory, it should be 0. * @cur_placement: Hint of current placement. @@ -147,7 +146,6 @@ struct ttm_buffer_object { * Members protected by a bo reservation. */ - struct dma_fence *moving; unsigned priority; unsigned pin_count; From 8f94eda39952a8c7323bad2bf752bdfe78101b20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 4 Apr 2022 14:58:37 +0200 Subject: [PATCH 016/219] dma-buf: drop seq count based update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This should be possible now since we don't have the distinction between exclusive and shared fences any more. The only possible pitfall is that a dma_fence would be reused during the RCU grace period, but even that could be handled with a single extra check. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-15-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 33 ++++++++++++--------------------- drivers/dma-buf/st-dma-resv.c | 2 +- include/linux/dma-resv.h | 12 ------------ 3 files changed, 13 insertions(+), 34 deletions(-) diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 5b64aa554c36..0cce6e4ec946 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -133,7 +133,6 @@ static void dma_resv_list_free(struct dma_resv_list *list) void dma_resv_init(struct dma_resv *obj) { ww_mutex_init(&obj->lock, &reservation_ww_class); - seqcount_ww_mutex_init(&obj->seq, &obj->lock); RCU_INIT_POINTER(obj->fences, NULL); } @@ -292,28 +291,24 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, fobj = dma_resv_fences_list(obj); count = fobj->num_fences; - write_seqcount_begin(&obj->seq); - for (i = 0; i < count; ++i) { enum dma_resv_usage old_usage; dma_resv_list_entry(fobj, i, obj, &old, &old_usage); if ((old->context == fence->context && old_usage >= usage) || - dma_fence_is_signaled(old)) - goto replace; + dma_fence_is_signaled(old)) { + dma_resv_list_set(fobj, i, fence, usage); + dma_fence_put(old); + return; + } } BUG_ON(fobj->num_fences >= fobj->max_fences); - old = NULL; count++; -replace: dma_resv_list_set(fobj, i, fence, usage); /* pointer update must be visible before we extend the num_fences */ smp_store_mb(fobj->num_fences, count); - - write_seqcount_end(&obj->seq); - dma_fence_put(old); } EXPORT_SYMBOL(dma_resv_add_fence); @@ -341,7 +336,6 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, dma_resv_assert_held(obj); list = dma_resv_fences_list(obj); - write_seqcount_begin(&obj->seq); for (i = 0; list && i < list->num_fences; ++i) { struct dma_fence *old; @@ -352,14 +346,12 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, dma_resv_list_set(list, i, replacement, usage); dma_fence_put(old); } - write_seqcount_end(&obj->seq); } EXPORT_SYMBOL(dma_resv_replace_fences); /* Restart the unlocked iteration by initializing the cursor object. */ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) { - cursor->seq = read_seqcount_begin(&cursor->obj->seq); cursor->index = 0; cursor->num_fences = 0; cursor->fences = dma_resv_fences_list(cursor->obj); @@ -388,8 +380,10 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) cursor->obj, &cursor->fence, &cursor->fence_usage); cursor->fence = dma_fence_get_rcu(cursor->fence); - if (!cursor->fence) - break; + if (!cursor->fence) { + dma_resv_iter_restart_unlocked(cursor); + continue; + } if (!dma_fence_is_signaled(cursor->fence) && cursor->usage >= cursor->fence_usage) @@ -415,7 +409,7 @@ struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor) do { dma_resv_iter_restart_unlocked(cursor); dma_resv_iter_walk_unlocked(cursor); - } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq)); + } while (dma_resv_fences_list(cursor->obj) != cursor->fences); rcu_read_unlock(); return cursor->fence; @@ -438,13 +432,13 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor) rcu_read_lock(); cursor->is_restarted = false; - restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq); + restart = dma_resv_fences_list(cursor->obj) != cursor->fences; do { if (restart) dma_resv_iter_restart_unlocked(cursor); dma_resv_iter_walk_unlocked(cursor); restart = true; - } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq)); + } while (dma_resv_fences_list(cursor->obj) != cursor->fences); rcu_read_unlock(); return cursor->fence; @@ -540,10 +534,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) } dma_resv_iter_end(&cursor); - write_seqcount_begin(&dst->seq); list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst)); - write_seqcount_end(&dst->seq); - dma_resv_list_free(list); return 0; } diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index 8ace9e84c845..813779e3c9be 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -217,7 +217,7 @@ static int test_for_each_unlocked(void *arg) if (r == -ENOENT) { r = -EINVAL; /* That should trigger an restart */ - cursor.seq--; + cursor.fences = (void*)~0; } else if (r == -EINVAL) { r = 0; } diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 1db759eacc98..c8ccbc94d5d2 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -155,15 +155,6 @@ struct dma_resv { */ struct ww_mutex lock; - /** - * @seq: - * - * Sequence count for managing RCU read-side synchronization, allows - * read-only access to @fences while ensuring we take a consistent - * snapshot. - */ - seqcount_ww_mutex_t seq; - /** * @fences: * @@ -202,9 +193,6 @@ struct dma_resv_iter { /** @fence_usage: the usage of the current fence */ enum dma_resv_usage fence_usage; - /** @seq: sequence number to check for modifications */ - unsigned int seq; - /** @index: index into the shared fences */ unsigned int index; From e84815cbbc767617221e6891e77f2486c9199dfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 7 Apr 2022 10:20:55 +0200 Subject: [PATCH 017/219] seqlock: drop seqcount_ww_mutex_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Daniel pointed out that this series removes the last user of seqcount_ww_mutex_t, so let's drop this. Signed-off-by: Christian König Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Will Deacon Cc: Waiman Long Cc: Boqun Feng Cc: linux-kernel@vger.kernel.org Acked-by: Peter Zijlstra (Intel) Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-16-christian.koenig@amd.com --- include/linux/seqlock.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 37ded6b8fee6..3926e9027947 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -164,7 +163,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) * static initializer or init function. This enables lockdep to validate * that the write side critical section is properly serialized. * - * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex. + * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex */ /* @@ -184,7 +183,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) -#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex) /* * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers @@ -277,7 +275,6 @@ SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_s SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock)) SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock)) SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock)) -SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL)) /* * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t @@ -304,8 +301,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu __seqprop_case((s), raw_spinlock, prop), \ __seqprop_case((s), spinlock, prop), \ __seqprop_case((s), rwlock, prop), \ - __seqprop_case((s), mutex, prop), \ - __seqprop_case((s), ww_mutex, prop)) + __seqprop_case((s), mutex, prop)) #define seqprop_ptr(s) __seqprop(s, ptr) #define seqprop_sequence(s) __seqprop(s, sequence) From 807ff7ed34d2c83e09d7987bd6506cc3d520dcdf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 7 Apr 2022 13:43:15 +0200 Subject: [PATCH 018/219] futex: add missing rtmutex.h include MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This isn't included here any more since the removal of ww_mutex.h from seqlock.h which causes a build break. Signed-off-by: Christian König Acked-by: Shashank Sharma Fixes: e84815cbbc76 ("seqlock: drop seqcount_ww_mutex_t") Link: https://patchwork.freedesktop.org/patch/msgid/20220407114619.961750-1-christian.koenig@amd.com --- kernel/futex/futex.h | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h index c264cbeab71c..b5379c0e6d6d 100644 --- a/kernel/futex/futex.h +++ b/kernel/futex/futex.h @@ -3,6 +3,7 @@ #define _FUTEX_H #include +#include #include #ifdef CONFIG_PREEMPT_RT From 2f073eb41230cf10d0c1fd0719fdb3f19e8497dd Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Thu, 7 Apr 2022 01:29:53 +0800 Subject: [PATCH 019/219] dt-bindings: vendor-prefixes: Add prefix for SINO WEALTH Eletronics Ltd. Add a vendor prefix entry for SINO WEALTH Eletronics Ltd. (http://www.sinowealth.com). Signed-off-by: Chen-Yu Tsai Acked-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20220406172956.3953-2-wens@kernel.org --- Documentation/devicetree/bindings/vendor-prefixes.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 01430973ecec..79b72e370ade 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -1130,6 +1130,8 @@ patternProperties: description: Sinlinx Electronics Technology Co., LTD "^sinovoip,.*": description: SinoVoip Co., Ltd + "^sinowealth,.*": + description: SINO WEALTH Electronic Ltd. "^sipeed,.*": description: Shenzhen Sipeed Technology Co., Ltd. "^sirf,.*": From 97a40c23cda5d64a1c5e968e448435084ebc8c9b Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Thu, 7 Apr 2022 01:29:54 +0800 Subject: [PATCH 020/219] dt-bindings: display: ssd1307fb: Add entry for SINO WEALTH SH1106 The SINO WEALTH SH1106 is an OLED display driver that is somewhat compatible with the SSD1306. It supports a slightly wider display, at 132 instead of 128 pixels. The basic commands are the same, but the SH1106 doesn't support the horizontal or vertical address modes. Add a compatible string for it. Signed-off-by: Chen-Yu Tsai Acked-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20220406172956.3953-3-wens@kernel.org --- .../bindings/display/solomon,ssd1307fb.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml index 9baafd0c42dd..ade61d502edd 100644 --- a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml +++ b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml @@ -13,6 +13,7 @@ maintainers: properties: compatible: enum: + - sinowealth,sh1106-i2c - solomon,ssd1305fb-i2c - solomon,ssd1306fb-i2c - solomon,ssd1307fb-i2c @@ -131,6 +132,18 @@ required: - reg allOf: + - if: + properties: + compatible: + contains: + const: sinowealth,sh1106-i2c + then: + properties: + solomon,dclk-div: + default: 1 + solomon,dclk-frq: + default: 5 + - if: properties: compatible: From b0daaa5cfaa561477b8d3d10fb0697a2cce0c2ba Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Thu, 7 Apr 2022 01:29:55 +0800 Subject: [PATCH 021/219] drm/ssd130x: Support page addressing mode On the SINO WEALTH SH1106, which is mostly compatible with the SSD1306, only the basic page addressing mode is supported. This addressing mode is not as easy to use compared to the currently supported horizontal addressing mode, as the page address has to be set prior to writing out each page, and each page must be written out separately as a result. Also, there is no way to force the column address to wrap around early, thus the column address must also be reset for each page to be accurate. Add support for this addressing mode, with a flag to choose it. This flag is designed to be set from the device info data structure, but can be extended to be explicitly forced on through a device tree property if such a need arises. Signed-off-by: Chen-Yu Tsai Acked-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20220406172956.3953-4-wens@kernel.org --- drivers/gpu/drm/solomon/ssd130x.c | 73 ++++++++++++++++++++++++++++--- drivers/gpu/drm/solomon/ssd130x.h | 2 + 2 files changed, 68 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index 38b6c2c14f53..a7e784518c69 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -42,6 +42,8 @@ #define SSD130X_DATA 0x40 #define SSD130X_COMMAND 0x80 +#define SSD130X_PAGE_COL_START_LOW 0x00 +#define SSD130X_PAGE_COL_START_HIGH 0x10 #define SSD130X_SET_ADDRESS_MODE 0x20 #define SSD130X_SET_COL_RANGE 0x21 #define SSD130X_SET_PAGE_RANGE 0x22 @@ -61,6 +63,11 @@ #define SSD130X_SET_COM_PINS_CONFIG 0xda #define SSD130X_SET_VCOMH 0xdb +#define SSD130X_PAGE_COL_START_MASK GENMASK(3, 0) +#define SSD130X_PAGE_COL_START_HIGH_SET(val) FIELD_PREP(SSD130X_PAGE_COL_START_MASK, (val) >> 4) +#define SSD130X_PAGE_COL_START_LOW_SET(val) FIELD_PREP(SSD130X_PAGE_COL_START_MASK, (val)) +#define SSD130X_START_PAGE_ADDRESS_MASK GENMASK(2, 0) +#define SSD130X_START_PAGE_ADDRESS_SET(val) FIELD_PREP(SSD130X_START_PAGE_ADDRESS_MASK, (val)) #define SSD130X_SET_SEG_REMAP_MASK GENMASK(0, 0) #define SSD130X_SET_SEG_REMAP_SET(val) FIELD_PREP(SSD130X_SET_SEG_REMAP_MASK, (val)) #define SSD130X_SET_COM_SCAN_DIR_MASK GENMASK(3, 3) @@ -130,6 +137,7 @@ out_end: return ret; } +/* Set address range for horizontal/vertical addressing modes */ static int ssd130x_set_col_range(struct ssd130x_device *ssd130x, u8 col_start, u8 cols) { @@ -166,6 +174,26 @@ static int ssd130x_set_page_range(struct ssd130x_device *ssd130x, return 0; } +/* Set page and column start address for page addressing mode */ +static int ssd130x_set_page_pos(struct ssd130x_device *ssd130x, + u8 page_start, u8 col_start) +{ + int ret; + u32 page, col_low, col_high; + + page = SSD130X_START_PAGE_ADDRESS | + SSD130X_START_PAGE_ADDRESS_SET(page_start); + col_low = SSD130X_PAGE_COL_START_LOW | + SSD130X_PAGE_COL_START_LOW_SET(col_start); + col_high = SSD130X_PAGE_COL_START_HIGH | + SSD130X_PAGE_COL_START_HIGH_SET(col_start); + ret = ssd130x_write_cmd(ssd130x, 3, page, col_low, col_high); + if (ret < 0) + return ret; + + return 0; +} + static int ssd130x_pwm_enable(struct ssd130x_device *ssd130x) { struct device *dev = ssd130x->dev; @@ -342,6 +370,11 @@ static int ssd130x_init(struct ssd130x_device *ssd130x) } } + /* Switch to page addressing mode */ + if (ssd130x->page_address_mode) + return ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_ADDRESS_MODE, + SSD130X_SET_ADDRESS_MODE_PAGE); + /* Switch to horizontal addressing mode */ return ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_ADDRESS_MODE, SSD130X_SET_ADDRESS_MODE_HORIZONTAL); @@ -396,13 +429,16 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf, * (5) A4 B4 C4 D4 E4 F4 G4 H4 */ - ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width); - if (ret < 0) - goto out_free; + if (!ssd130x->page_address_mode) { + /* Set address range for horizontal addressing mode */ + ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width); + if (ret < 0) + goto out_free; - ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset + y / 8, pages); - if (ret < 0) - goto out_free; + ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset + y / 8, pages); + if (ret < 0) + goto out_free; + } for (i = 0; i < pages; i++) { int m = 8; @@ -421,9 +457,29 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf, } data_array[array_idx++] = data; } + + /* + * In page addressing mode, the start address needs to be reset, + * and each page then needs to be written out separately. + */ + if (ssd130x->page_address_mode) { + ret = ssd130x_set_page_pos(ssd130x, + ssd130x->page_offset + i, + ssd130x->col_offset + x); + if (ret < 0) + goto out_free; + + ret = ssd130x_write_data(ssd130x, data_array, width); + if (ret < 0) + goto out_free; + + array_idx = 0; + } } - ret = ssd130x_write_data(ssd130x, data_array, width * pages); + /* Write out update in one go if we aren't using page addressing mode */ + if (!ssd130x->page_address_mode) + ret = ssd130x_write_data(ssd130x, data_array, width * pages); out_free: kfree(data_array); @@ -806,6 +862,9 @@ struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap) ssd130x->regmap = regmap; ssd130x->device_info = device_get_match_data(dev); + if (ssd130x->device_info->page_mode_only) + ssd130x->page_address_mode = 1; + ssd130x_parse_properties(ssd130x); ret = ssd130x_get_resources(ssd130x); diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h index cd21cdccb566..f5b062576fdf 100644 --- a/drivers/gpu/drm/solomon/ssd130x.h +++ b/drivers/gpu/drm/solomon/ssd130x.h @@ -24,6 +24,7 @@ struct ssd130x_deviceinfo { u32 default_dclk_frq; int need_pwm; int need_chargepump; + bool page_mode_only; }; struct ssd130x_device { @@ -38,6 +39,7 @@ struct ssd130x_device { const struct ssd130x_deviceinfo *device_info; + unsigned page_address_mode : 1; unsigned area_color_enable : 1; unsigned com_invdir : 1; unsigned com_lrremap : 1; From 7cee157b4225089cb831f77515f9c4f4f7c24182 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Thu, 7 Apr 2022 01:29:56 +0800 Subject: [PATCH 022/219] drm/ssd130x: Add support for SINO WEALTH SH1106 The SINO WEALTH SH1106 is an OLED display driver that is somewhat compatible with the SSD1306. It supports a slightly wider display, at 132 instead of 128 pixels. The basic commands are the same, but the SH1106 doesn't support the horizontal or vertical address modes. Add support for this display driver. The default values for some of the hardware settings are taken from the datasheet. Signed-off-by: Chen-Yu Tsai Acked-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20220406172956.3953-5-wens@kernel.org --- drivers/gpu/drm/solomon/Kconfig | 9 +++++---- drivers/gpu/drm/solomon/ssd130x-i2c.c | 11 +++++++++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/solomon/Kconfig b/drivers/gpu/drm/solomon/Kconfig index 6230369505c9..8c0a0c788385 100644 --- a/drivers/gpu/drm/solomon/Kconfig +++ b/drivers/gpu/drm/solomon/Kconfig @@ -5,9 +5,9 @@ config DRM_SSD130X select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER help - DRM driver for the SSD1305, SSD1306, SSD1307 and SSD1309 Solomon - OLED controllers. This is only for the core driver, a driver for - the appropriate bus transport in your chip also must be selected. + DRM driver for the SSD130x Solomon and SINO WEALTH SH110x OLED + controllers. This is only for the core driver, a driver for the + appropriate bus transport in your chip also must be selected. If M is selected the module will be called ssd130x. @@ -16,6 +16,7 @@ config DRM_SSD130X_I2C depends on DRM_SSD130X && I2C select REGMAP_I2C help - Say Y here if the SSD130x OLED display is connected via I2C bus. + Say Y here if the SSD130x or SH110x OLED display is connected via + I2C bus. If M is selected the module will be called ssd130x-i2c. diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c index 3126aeda4ced..d099b241dd3f 100644 --- a/drivers/gpu/drm/solomon/ssd130x-i2c.c +++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c @@ -53,6 +53,13 @@ static void ssd130x_i2c_shutdown(struct i2c_client *client) ssd130x_shutdown(ssd130x); } +static struct ssd130x_deviceinfo ssd130x_sh1106_deviceinfo = { + .default_vcomh = 0x40, + .default_dclk_div = 1, + .default_dclk_frq = 5, + .page_mode_only = 1, +}; + static struct ssd130x_deviceinfo ssd130x_ssd1305_deviceinfo = { .default_vcomh = 0x34, .default_dclk_div = 1, @@ -80,6 +87,10 @@ static struct ssd130x_deviceinfo ssd130x_ssd1309_deviceinfo = { }; static const struct of_device_id ssd130x_of_match[] = { + { + .compatible = "sinowealth,sh1106-i2c", + .data = &ssd130x_sh1106_deviceinfo, + }, { .compatible = "solomon,ssd1305fb-i2c", .data = &ssd130x_ssd1305_deviceinfo, From be273ecfbe8a32fd8f875cc2990223cfc8a32031 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 7 Apr 2022 15:19:50 +0200 Subject: [PATCH 023/219] drm/vc4: Use newer fence API properly to fix build errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The commit 73511edf8b19 ("dma-buf: specify usage while adding fences to dma_resv obj v7") ported all the DRM drivers to use the newer fence API that specifies the usage with the enum dma_resv_usage rather than doing an explicit shared / exclusive distinction. But the commit didn't do it properly in two callers of the vc4 driver, leading to build errors. Fixes: 73511edf8b19 ("dma-buf: specify usage while adding fences to dma_resv obj v7") Signed-off-by: Javier Martinez Canillas Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20220407131950.915091-1-javierm@redhat.com --- drivers/gpu/drm/vc4/vc4_gem.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 38550317e025..9eaf304fc20d 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -546,7 +546,8 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) bo = to_vc4_bo(&exec->bo[i]->base); bo->seqno = seqno; - dma_resv_add_fence(bo->base.base.resv, exec->fence); + dma_resv_add_fence(bo->base.base.resv, exec->fence, + DMA_RESV_USAGE_READ); } list_for_each_entry(bo, &exec->unref_list, unref_head) { @@ -557,7 +558,8 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); bo->write_seqno = seqno; - dma_resv_add_excl_fence(bo->base.base.resv, exec->fence); + dma_resv_add_fence(bo->base.base.resv, exec->fence, + DMA_RESV_USAGE_WRITE); } } From 9ad7acdad1d91545b99bf9fda3de4b86cf48b272 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:19 +0200 Subject: [PATCH 024/219] fbcon: delete a few unneeded forward decl I didn't bother with any code movement to fix the others, these just got a bit in the way. v2: Rebase on top of Helge's reverts. Acked-by: Thomas Zimmermann Acked-by: Sam Ravnborg (v1) Reviewed-by: Geert Uytterhoeven (v1) Signed-off-by: Daniel Vetter Cc: Helge Deller Cc: Daniel Vetter Cc: Thomas Zimmermann Cc: Du Cheng Cc: Tetsuo Handa Cc: Claudio Suarez Cc: Greg Kroah-Hartman Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-2-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 2fc1b80a26ad..235eaab37d84 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -163,29 +163,14 @@ static int fbcon_cursor_noblink; * Interface used by the world */ -static const char *fbcon_startup(void); -static void fbcon_init(struct vc_data *vc, int init); -static void fbcon_deinit(struct vc_data *vc); -static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height, - int width); -static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos); -static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, - int count, int ypos, int xpos); static void fbcon_clear_margins(struct vc_data *vc, int bottom_only); -static void fbcon_cursor(struct vc_data *vc, int mode); static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, int height, int width); -static int fbcon_switch(struct vc_data *vc); -static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch); static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table); /* * Internal routines */ -static __inline__ void ywrap_up(struct vc_data *vc, int count); -static __inline__ void ywrap_down(struct vc_data *vc, int count); -static __inline__ void ypan_up(struct vc_data *vc, int count); -static __inline__ void ypan_down(struct vc_data *vc, int count); static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, int dy, int dx, int height, int width, u_int y_break); static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, @@ -194,8 +179,8 @@ static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, int line, int count, int dy); static void fbcon_modechanged(struct fb_info *info); static void fbcon_set_all_vcs(struct fb_info *info); -static void fbcon_start(void); static void fbcon_exit(void); + static struct device *fbcon_device; #ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION From 689333136327b6cd618df85d83d79f2aa620d585 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:20 +0200 Subject: [PATCH 025/219] fbcon: Move fbcon_bmove(_rec) functions Avoids two forward declarations, and more importantly, matches what I've done in my fbcon scrolling restore patches - so I need this to avoid a bunch of conflicts in rebasing since we ended up merging Helge's series instead. Acked-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Signed-off-by: Daniel Vetter Cc: Helge Deller Cc: Daniel Vetter Cc: Thomas Zimmermann Cc: Du Cheng Cc: Tetsuo Handa Cc: Claudio Suarez Cc: Greg Kroah-Hartman Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-3-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 134 +++++++++++++++---------------- 1 file changed, 65 insertions(+), 69 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 235eaab37d84..e925bb608e25 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -164,15 +164,11 @@ static int fbcon_cursor_noblink; */ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only); -static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, - int height, int width); static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table); /* * Internal routines */ -static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, - int dy, int dx, int height, int width, u_int y_break); static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, int unit); static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, @@ -1667,6 +1663,71 @@ static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p, } } +static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, + int dy, int dx, int height, int width, u_int y_break) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + u_int b; + + if (sy < y_break && sy + height > y_break) { + b = y_break - sy; + if (dy < sy) { /* Avoid trashing self */ + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + } else { + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + } + return; + } + + if (dy < y_break && dy + height > y_break) { + b = y_break - dy; + if (dy < sy) { /* Avoid trashing self */ + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + } else { + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + } + return; + } + ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, + height, width); +} + +static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, + int height, int width) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + if (fbcon_is_inactive(vc, info)) + return; + + if (!width || !height) + return; + + /* Split blits that cross physical y_wrap case. + * Pathological case involves 4 blits, better to use recursive + * code rather than unrolled case + * + * Recursive invocations don't need to erase the cursor over and + * over again, so we use fbcon_bmove_rec() + */ + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width, + p->vrows - p->yscroll); +} + static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, enum con_scroll dir, unsigned int count) { @@ -1867,71 +1928,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, } -static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, - int height, int width) -{ - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct fbcon_display *p = &fb_display[vc->vc_num]; - - if (fbcon_is_inactive(vc, info)) - return; - - if (!width || !height) - return; - - /* Split blits that cross physical y_wrap case. - * Pathological case involves 4 blits, better to use recursive - * code rather than unrolled case - * - * Recursive invocations don't need to erase the cursor over and - * over again, so we use fbcon_bmove_rec() - */ - fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width, - p->vrows - p->yscroll); -} - -static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, - int dy, int dx, int height, int width, u_int y_break) -{ - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct fbcon_ops *ops = info->fbcon_par; - u_int b; - - if (sy < y_break && sy + height > y_break) { - b = y_break - sy; - if (dy < sy) { /* Avoid trashing self */ - fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, - y_break); - fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, - height - b, width, y_break); - } else { - fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, - height - b, width, y_break); - fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, - y_break); - } - return; - } - - if (dy < y_break && dy + height > y_break) { - b = y_break - dy; - if (dy < sy) { /* Avoid trashing self */ - fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, - y_break); - fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, - height - b, width, y_break); - } else { - fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, - height - b, width, y_break); - fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, - y_break); - } - return; - } - ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, - height, width); -} - static void updatescrollmode_accel(struct fbcon_display *p, struct fb_info *info, struct vc_data *vc) From 409d6c95f9c68bb7046410a3502e26e454a1e636 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:21 +0200 Subject: [PATCH 026/219] fbcon: Introduce wrapper for console->fb_info lookup Half of it is protected by console_lock, but the other half is a lot more awkward: Registration/deregistration of fbdev are serialized, but we don't really clear out anything in con2fb_map and so there's potential for use-after free mixups. First step is to encapsulate the lookup. Acked-by: Thomas Zimmermann Acked-by: Sam Ravnborg Signed-off-by: Daniel Vetter Cc: Helge Deller Cc: Daniel Vetter Cc: Greg Kroah-Hartman Cc: Tetsuo Handa Cc: Du Cheng Cc: Claudio Suarez Cc: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-4-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 76 ++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 32 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index e925bb608e25..b75e638cb83d 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -110,6 +110,18 @@ static struct fbcon_display fb_display[MAX_NR_CONSOLES]; static signed char con2fb_map[MAX_NR_CONSOLES]; static signed char con2fb_map_boot[MAX_NR_CONSOLES]; +static struct fb_info *fbcon_info_from_console(int console) +{ + WARN_CONSOLE_UNLOCKED(); + + /* + * Note that only con2fb_map is protected by the console lock, + * registered_fb is protected by a separate mutex. This lookup can + * therefore race. + */ + return registered_fb[con2fb_map[console]]; +} + static int logo_lines; /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO enums. */ @@ -199,7 +211,7 @@ static void fbcon_rotate(struct fb_info *info, u32 rotate) if (!ops || ops->currcon == -1) return; - fb_info = registered_fb[con2fb_map[ops->currcon]]; + fb_info = fbcon_info_from_console(ops->currcon); if (info == fb_info) { struct fbcon_display *p = &fb_display[ops->currcon]; @@ -226,7 +238,7 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate) for (i = first_fb_vc; i <= last_fb_vc; i++) { vc = vc_cons[i].d; if (!vc || vc->vc_mode != KD_TEXT || - registered_fb[con2fb_map[i]] != info) + fbcon_info_from_console(i) != info) continue; p = &fb_display[vc->vc_num]; @@ -356,7 +368,7 @@ static void fb_flashcursor(struct work_struct *work) vc = vc_cons[ops->currcon].d; if (!vc || !con_is_visible(vc) || - registered_fb[con2fb_map[vc->vc_num]] != info || + fbcon_info_from_console(vc->vc_num) != info || vc->vc_deccm != 1) { console_unlock(); return; @@ -791,7 +803,7 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, if (show_logo) { struct vc_data *fg_vc = vc_cons[fg_console].d; struct fb_info *fg_info = - registered_fb[con2fb_map[fg_console]]; + fbcon_info_from_console(fg_console); fbcon_prepare_logo(fg_vc, fg_info, fg_vc->vc_cols, fg_vc->vc_rows, fg_vc->vc_cols, @@ -1014,7 +1026,7 @@ static void fbcon_init(struct vc_data *vc, int init) if (con2fb_map[vc->vc_num] == -1) con2fb_map[vc->vc_num] = info_idx; - info = registered_fb[con2fb_map[vc->vc_num]]; + info = fbcon_info_from_console(vc->vc_num); if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET) logo_shown = FBCON_LOGO_DONTSHOW; @@ -1231,7 +1243,7 @@ finished: static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height, int width) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; @@ -1269,7 +1281,7 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height, static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, int count, int ypos, int xpos) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; struct fbcon_ops *ops = info->fbcon_par; @@ -1289,7 +1301,7 @@ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos) static void fbcon_clear_margins(struct vc_data *vc, int bottom_only) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; if (!fbcon_is_inactive(vc, info)) @@ -1298,7 +1310,7 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only) static void fbcon_cursor(struct vc_data *vc, int mode) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; int c = scr_readw((u16 *) vc->vc_pos); @@ -1392,7 +1404,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, static __inline__ void ywrap_up(struct vc_data *vc, int count) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; @@ -1411,7 +1423,7 @@ static __inline__ void ywrap_up(struct vc_data *vc, int count) static __inline__ void ywrap_down(struct vc_data *vc, int count) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; @@ -1430,7 +1442,7 @@ static __inline__ void ywrap_down(struct vc_data *vc, int count) static __inline__ void ypan_up(struct vc_data *vc, int count) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; struct fbcon_ops *ops = info->fbcon_par; @@ -1454,7 +1466,7 @@ static __inline__ void ypan_up(struct vc_data *vc, int count) static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; @@ -1478,7 +1490,7 @@ static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count) static __inline__ void ypan_down(struct vc_data *vc, int count) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; struct fbcon_ops *ops = info->fbcon_par; @@ -1502,7 +1514,7 @@ static __inline__ void ypan_down(struct vc_data *vc, int count) static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; @@ -1666,7 +1678,7 @@ static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p, static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, int dy, int dx, int height, int width, u_int y_break) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; u_int b; @@ -1708,7 +1720,7 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, int height, int width) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; if (fbcon_is_inactive(vc, info)) @@ -1731,7 +1743,7 @@ static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, enum con_scroll dir, unsigned int count) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK; @@ -1996,7 +2008,7 @@ static void updatescrollmode(struct fbcon_display *p, static int fbcon_resize(struct vc_data *vc, unsigned int width, unsigned int height, unsigned int user) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; struct fb_var_screeninfo var = info->var; @@ -2065,7 +2077,7 @@ static int fbcon_switch(struct vc_data *vc) struct fb_var_screeninfo var; int i, ret, prev_console; - info = registered_fb[con2fb_map[vc->vc_num]]; + info = fbcon_info_from_console(vc->vc_num); ops = info->fbcon_par; if (logo_shown >= 0) { @@ -2079,7 +2091,7 @@ static int fbcon_switch(struct vc_data *vc) prev_console = ops->currcon; if (prev_console != -1) - old_info = registered_fb[con2fb_map[prev_console]]; + old_info = fbcon_info_from_console(prev_console); /* * FIXME: If we have multiple fbdev's loaded, we need to * update all info->currcon. Perhaps, we can place this @@ -2202,7 +2214,7 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info, static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; if (mode_switch) { @@ -2244,7 +2256,7 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) static int fbcon_debug_enter(struct vc_data *vc) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; ops->save_graphics = ops->graphics; @@ -2257,7 +2269,7 @@ static int fbcon_debug_enter(struct vc_data *vc) static int fbcon_debug_leave(struct vc_data *vc) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; ops->graphics = ops->save_graphics; @@ -2393,7 +2405,7 @@ static void set_vc_hi_font(struct vc_data *vc, bool set) static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount, const u8 * data, int userfont) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; int resize; @@ -2447,7 +2459,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount, static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigned int flags) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); unsigned charcount = font->charcount; int w = font->width; int h = font->height; @@ -2511,7 +2523,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); const struct font_desc *f; if (!name) @@ -2535,7 +2547,7 @@ static struct fb_cmap palette_cmap = { static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fb_info *info = fbcon_info_from_console(vc->vc_num); int i, j, k, depth; u8 val; @@ -2651,7 +2663,7 @@ static void fbcon_modechanged(struct fb_info *info) return; vc = vc_cons[ops->currcon].d; if (vc->vc_mode != KD_TEXT || - registered_fb[con2fb_map[ops->currcon]] != info) + fbcon_info_from_console(ops->currcon) != info) return; p = &fb_display[vc->vc_num]; @@ -2691,7 +2703,7 @@ static void fbcon_set_all_vcs(struct fb_info *info) for (i = first_fb_vc; i <= last_fb_vc; i++) { vc = vc_cons[i].d; if (!vc || vc->vc_mode != KD_TEXT || - registered_fb[con2fb_map[i]] != info) + fbcon_info_from_console(i) != info) continue; if (con_is_visible(vc)) { @@ -2954,7 +2966,7 @@ void fbcon_fb_blanked(struct fb_info *info, int blank) vc = vc_cons[ops->currcon].d; if (vc->vc_mode != KD_TEXT || - registered_fb[con2fb_map[ops->currcon]] != info) + fbcon_info_from_console(ops->currcon) != info) return; if (con_is_visible(vc)) { @@ -2974,7 +2986,7 @@ void fbcon_new_modelist(struct fb_info *info) const struct fb_videomode *mode; for (i = first_fb_vc; i <= last_fb_vc; i++) { - if (registered_fb[con2fb_map[i]] != info) + if (fbcon_info_from_console(i) != info) continue; if (!fb_display[i].mode) continue; From 9b0a490e71ebf76b0b59ab9382f5289f8a3dcf9e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:22 +0200 Subject: [PATCH 027/219] fbcon: delete delayed loading code Before commit 6104c37094e729f3d4ce65797002112735d49cd1 Author: Daniel Vetter Date: Tue Aug 1 17:32:07 2017 +0200 fbcon: Make fbcon a built-time depency for fbdev it was possible to load fbcon and fbdev drivers in any order, which means that fbcon init had to handle the case where fbdev drivers where already registered. This is no longer possible, hence delete that code. Note that the exit case is a bit more complex and will be done in a separate patch. Since I had to audit the entire fbcon load code I also spotted a wrong function name in a comment in fbcon_startup(), which this patch also fixes. v2: Explain why we also fix the comment (Sam) Acked-by: Thomas Zimmermann Acked-by: Sam Ravnborg Signed-off-by: Daniel Vetter Cc: Helge Deller Cc: Daniel Vetter Cc: Claudio Suarez Cc: Greg Kroah-Hartman Cc: Tetsuo Handa Cc: Du Cheng Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-5-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index b75e638cb83d..83f0223f5333 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -944,7 +944,7 @@ static const char *fbcon_startup(void) return display_desc; /* * Instead of blindly using registered_fb[0], we use info_idx, set by - * fb_console_init(); + * fbcon_fb_registered(); */ info = registered_fb[info_idx]; if (!info) @@ -3299,17 +3299,6 @@ static void fbcon_start(void) return; } #endif - - if (num_registered_fb) { - int i; - - for_each_registered_fb(i) { - info_idx = i; - break; - } - - do_fbcon_takeover(0); - } } static void fbcon_exit(void) From 9ad5cc9bcfd62b0f39e7f85992d2a371ff369324 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:23 +0200 Subject: [PATCH 028/219] fbdev/sysfs: Fix locking fb_set_var requires we hold the fb_info lock. Or at least this now matches what the ioctl does ... Note that ps3fb and sh_mobile_lcdcfb are busted in different ways here, but I will not fix them up. Also in practice this isn't a big deal, because really variable fbdev state is actually protected by console_lock (because fbcon just doesn't bother with lock_fb_info() at all), and lock_fb_info protecting anything is really just a neat lie. But that's a much bigger fish to fry. Acked-by: Thomas Zimmermann Acked-by: Sam Ravnborg Signed-off-by: Daniel Vetter Cc: Helge Deller Cc: Daniel Vetter Cc: Qing Wang Cc: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-6-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbsysfs.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c index 26892940c213..8c1ee9ecec3d 100644 --- a/drivers/video/fbdev/core/fbsysfs.c +++ b/drivers/video/fbdev/core/fbsysfs.c @@ -91,9 +91,11 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var) var->activate |= FB_ACTIVATE_FORCE; console_lock(); + lock_fb_info(fb_info); err = fb_set_var(fb_info, var); if (!err) fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL); + unlock_fb_info(fb_info); console_unlock(); if (err) return err; From 3b0fb6ab25dda03f6077bf8fce9407bb0d4db6ea Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:24 +0200 Subject: [PATCH 029/219] fbcon: Use delayed work for cursor Allows us to delete a bunch of hand-rolled stuff using a timer plus a separate work). Also to simplify the code we initialize the cursor_work completely when we allocate the fbcon_ops structure, instead of trying to cope with console re-initialization. The motiviation here is that fbcon code stops using the fb_info.queue, which helps with locking issues around cleanup and all that in a later patch. Also note that this allows us to ditch the hand-rolled work cleanup in fbcon_exit - we already call fbcon_del_cursor_timer, which takes care of everything. Plus this was racy anyway. v2: - Only INIT_DELAYED_WORK when kzalloc succeeded (Tetsuo) - Explain that we replace both the timer and a work with the combined delayed_work (Javier) Reviewed-by: Javier Martinez Canillas Acked-by: Thomas Zimmermann Signed-off-by: Daniel Vetter Cc: Daniel Vetter Cc: Claudio Suarez Cc: Du Cheng Cc: Thomas Zimmermann Cc: Greg Kroah-Hartman Cc: Tetsuo Handa Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-7-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 87 +++++++++++++------------------- drivers/video/fbdev/core/fbcon.h | 4 +- 2 files changed, 36 insertions(+), 55 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 83f0223f5333..759299667023 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -350,8 +350,8 @@ static int get_color(struct vc_data *vc, struct fb_info *info, static void fb_flashcursor(struct work_struct *work) { - struct fb_info *info = container_of(work, struct fb_info, queue); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_ops *ops = container_of(work, struct fbcon_ops, cursor_work.work); + struct fb_info *info; struct vc_data *vc = NULL; int c; int mode; @@ -364,7 +364,10 @@ static void fb_flashcursor(struct work_struct *work) if (ret == 0) return; - if (ops && ops->currcon != -1) + /* protected by console_lock */ + info = ops->info; + + if (ops->currcon != -1) vc = vc_cons[ops->currcon].d; if (!vc || !con_is_visible(vc) || @@ -380,42 +383,25 @@ static void fb_flashcursor(struct work_struct *work) ops->cursor(vc, info, mode, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); console_unlock(); + + queue_delayed_work(system_power_efficient_wq, &ops->cursor_work, + ops->cur_blink_jiffies); } -static void cursor_timer_handler(struct timer_list *t) -{ - struct fbcon_ops *ops = from_timer(ops, t, cursor_timer); - struct fb_info *info = ops->info; - - queue_work(system_power_efficient_wq, &info->queue); - mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies); -} - -static void fbcon_add_cursor_timer(struct fb_info *info) +static void fbcon_add_cursor_work(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; - if ((!info->queue.func || info->queue.func == fb_flashcursor) && - !(ops->flags & FBCON_FLAGS_CURSOR_TIMER) && - !fbcon_cursor_noblink) { - if (!info->queue.func) - INIT_WORK(&info->queue, fb_flashcursor); - - timer_setup(&ops->cursor_timer, cursor_timer_handler, 0); - mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies); - ops->flags |= FBCON_FLAGS_CURSOR_TIMER; - } + if (!fbcon_cursor_noblink) + queue_delayed_work(system_power_efficient_wq, &ops->cursor_work, + ops->cur_blink_jiffies); } -static void fbcon_del_cursor_timer(struct fb_info *info) +static void fbcon_del_cursor_work(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; - if (info->queue.func == fb_flashcursor && - ops->flags & FBCON_FLAGS_CURSOR_TIMER) { - del_timer_sync(&ops->cursor_timer); - ops->flags &= ~FBCON_FLAGS_CURSOR_TIMER; - } + cancel_delayed_work_sync(&ops->cursor_work); } #ifndef MODULE @@ -717,6 +703,8 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info, } if (!err) { + INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor); + ops->cur_blink_jiffies = HZ / 5; ops->info = info; info->fbcon_par = ops; @@ -751,7 +739,7 @@ static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo, } if (!err) { - fbcon_del_cursor_timer(oldinfo); + fbcon_del_cursor_work(oldinfo); kfree(ops->cursor_state.mask); kfree(ops->cursor_data); kfree(ops->cursor_src); @@ -867,7 +855,7 @@ static int set_con2fb_map(int unit, int newidx, int user) logo_shown != FBCON_LOGO_DONTSHOW); if (!found) - fbcon_add_cursor_timer(info); + fbcon_add_cursor_work(info); con2fb_map_boot[unit] = newidx; con2fb_init_display(vc, info, unit, show_logo); } @@ -964,6 +952,8 @@ static const char *fbcon_startup(void) return NULL; } + INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor); + ops->currcon = -1; ops->graphics = 1; ops->cur_rotate = -1; @@ -1006,7 +996,7 @@ static const char *fbcon_startup(void) info->var.yres, info->var.bits_per_pixel); - fbcon_add_cursor_timer(info); + fbcon_add_cursor_work(info); return display_desc; } @@ -1194,7 +1184,7 @@ static void fbcon_deinit(struct vc_data *vc) goto finished; if (con_is_visible(vc)) - fbcon_del_cursor_timer(info); + fbcon_del_cursor_work(info); ops->flags &= ~FBCON_FLAGS_INIT; finished: @@ -1320,9 +1310,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode) return; if (vc->vc_cursor_type & CUR_SW) - fbcon_del_cursor_timer(info); + fbcon_del_cursor_work(info); else - fbcon_add_cursor_timer(info); + fbcon_add_cursor_work(info); ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; @@ -2132,14 +2122,14 @@ static int fbcon_switch(struct vc_data *vc) } if (old_info != info) - fbcon_del_cursor_timer(old_info); + fbcon_del_cursor_work(old_info); } if (fbcon_is_inactive(vc, info) || ops->blank_state != FB_BLANK_UNBLANK) - fbcon_del_cursor_timer(info); + fbcon_del_cursor_work(info); else - fbcon_add_cursor_timer(info); + fbcon_add_cursor_work(info); set_blitting_type(vc, info); ops->cursor_reset = 1; @@ -2247,9 +2237,9 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) if (mode_switch || fbcon_is_inactive(vc, info) || ops->blank_state != FB_BLANK_UNBLANK) - fbcon_del_cursor_timer(info); + fbcon_del_cursor_work(info); else - fbcon_add_cursor_timer(info); + fbcon_add_cursor_work(info); return 0; } @@ -3181,7 +3171,7 @@ static ssize_t show_cursor_blink(struct device *device, if (!ops) goto err; - blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0; + blink = delayed_work_pending(&ops->cursor_work); err: console_unlock(); return snprintf(buf, PAGE_SIZE, "%d\n", blink); @@ -3210,10 +3200,10 @@ static ssize_t store_cursor_blink(struct device *device, if (blink) { fbcon_cursor_noblink = 0; - fbcon_add_cursor_timer(info); + fbcon_add_cursor_work(info); } else { fbcon_cursor_noblink = 1; - fbcon_del_cursor_timer(info); + fbcon_del_cursor_work(info); } err: @@ -3314,15 +3304,9 @@ static void fbcon_exit(void) #endif for_each_registered_fb(i) { - int pending = 0; - mapped = 0; info = registered_fb[i]; - if (info->queue.func) - pending = cancel_work_sync(&info->queue); - pr_debug("fbcon: %s pending work\n", (pending ? "canceled" : "no")); - for (j = first_fb_vc; j <= last_fb_vc; j++) { if (con2fb_map[j] == i) { mapped = 1; @@ -3338,15 +3322,12 @@ static void fbcon_exit(void) if (info->fbcon_par) { struct fbcon_ops *ops = info->fbcon_par; - fbcon_del_cursor_timer(info); + fbcon_del_cursor_work(info); kfree(ops->cursor_src); kfree(ops->cursor_state.mask); kfree(info->fbcon_par); info->fbcon_par = NULL; } - - if (info->queue.func == fb_flashcursor) - info->queue.func = NULL; } } } diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 969d41ecede5..6708ca0048aa 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -14,11 +14,11 @@ #include #include #include +#include #include #define FBCON_FLAGS_INIT 1 -#define FBCON_FLAGS_CURSOR_TIMER 2 /* * This is the interface between the low-level console driver and the @@ -68,7 +68,7 @@ struct fbcon_ops { int (*update_start)(struct fb_info *info); int (*rotate_font)(struct fb_info *info, struct vc_data *vc); struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */ - struct timer_list cursor_timer; /* Cursor timer */ + struct delayed_work cursor_work; /* Cursor timer */ struct fb_cursor cursor_state; struct fbcon_display *p; struct fb_info *info; From cae69e453d735c546fc407d049ba048e80adf05e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:25 +0200 Subject: [PATCH 030/219] fbcon: Replace FBCON_FLAGS_INIT with a boolean It's only one flag and slightly tidier code. Acked-by: Thomas Zimmermann Acked-by: Sam Ravnborg Signed-off-by: Daniel Vetter Cc: Daniel Vetter Cc: Tetsuo Handa Cc: Greg Kroah-Hartman Cc: Du Cheng Cc: Thomas Zimmermann Cc: Claudio Suarez Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-8-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 11 +++++------ drivers/video/fbdev/core/fbcon.h | 4 +--- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 759299667023..81860ca80838 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -775,7 +775,7 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, ops->currcon = fg_console; - if (info->fbops->fb_set_par && !(ops->flags & FBCON_FLAGS_INIT)) { + if (info->fbops->fb_set_par && !ops->initialized) { ret = info->fbops->fb_set_par(info); if (ret) @@ -784,7 +784,7 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, "error code %d\n", ret); } - ops->flags |= FBCON_FLAGS_INIT; + ops->initialized = true; ops->graphics = 0; fbcon_set_disp(info, &info->var, unit); @@ -1103,8 +1103,7 @@ static void fbcon_init(struct vc_data *vc, int init) * We need to do it in fbcon_init() to prevent screen corruption. */ if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) { - if (info->fbops->fb_set_par && - !(ops->flags & FBCON_FLAGS_INIT)) { + if (info->fbops->fb_set_par && !ops->initialized) { ret = info->fbops->fb_set_par(info); if (ret) @@ -1113,7 +1112,7 @@ static void fbcon_init(struct vc_data *vc, int init) "error code %d\n", ret); } - ops->flags |= FBCON_FLAGS_INIT; + ops->initialized = true; } ops->graphics = 0; @@ -1186,7 +1185,7 @@ static void fbcon_deinit(struct vc_data *vc) if (con_is_visible(vc)) fbcon_del_cursor_work(info); - ops->flags &= ~FBCON_FLAGS_INIT; + ops->initialized = false; finished: fbcon_free_font(p, free_font); diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 6708ca0048aa..0eaf54a21151 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -18,8 +18,6 @@ #include -#define FBCON_FLAGS_INIT 1 - /* * This is the interface between the low-level console driver and the * low-level frame buffer device @@ -79,7 +77,7 @@ struct fbcon_ops { int blank_state; int graphics; int save_graphics; /* for debug enter/leave */ - int flags; + bool initialized; int rotate; int cur_rotate; char *cursor_data; From 6b2060cf9138a2cd5f3468a949d3869abed049ef Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:26 +0200 Subject: [PATCH 031/219] fb: Delete fb_info->queue It was only used by fbcon, and that now switched to its own, private work. Acked-by: Sam Ravnborg Acked-by: Thomas Zimmermann Signed-off-by: Daniel Vetter Cc: Helge Deller Cc: linux-fbdev@vger.kernel.org Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-9-daniel.vetter@ffwll.ch --- include/linux/fb.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/fb.h b/include/linux/fb.h index 9a77ab615c36..f95da1af9ff6 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -450,7 +450,6 @@ struct fb_info { struct fb_var_screeninfo var; /* Current var */ struct fb_fix_screeninfo fix; /* Current fix */ struct fb_monspecs monspecs; /* Current Monitor specs */ - struct work_struct queue; /* Framebuffer event queue */ struct fb_pixmap pixmap; /* Image hardware mapper */ struct fb_pixmap sprite; /* Cursor hardware mapper */ struct fb_cmap cmap; /* Current cmap */ From bd6026a8c4e6b7edf4bafcb71da885b284b8f4fd Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:27 +0200 Subject: [PATCH 032/219] fbcon: Extract fbcon_open/release helpers There's two minor behaviour changes in here: - in error paths we now consistently call fb_ops->fb_release - fb_release really can't fail (fbmem.c ignores it too) and there's no reasonable cleanup we can do anyway. Note that everything in fbcon.c is protected by the big console_lock() lock (especially all the global variables), so the minor changes in ordering of setup/cleanup do not matter. v2: Explain a bit better why this is all correct (Sam) Acked-by: Sam Ravnborg Signed-off-by: Daniel Vetter Cc: Daniel Vetter Cc: Claudio Suarez Cc: Greg Kroah-Hartman Cc: Tetsuo Handa Cc: Du Cheng Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-10-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 111 +++++++++++++++---------------- 1 file changed, 55 insertions(+), 56 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 81860ca80838..d50154b62007 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -682,19 +682,37 @@ static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount) #endif /* CONFIG_MISC_TILEBLITTING */ +static int fbcon_open(struct fb_info *info) +{ + if (!try_module_get(info->fbops->owner)) + return -ENODEV; + + if (info->fbops->fb_open && + info->fbops->fb_open(info, 0)) { + module_put(info->fbops->owner); + return -ENODEV; + } + + return 0; +} + +static void fbcon_release(struct fb_info *info) +{ + if (info->fbops->fb_release) + info->fbops->fb_release(info, 0); + + module_put(info->fbops->owner); +} static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info, int unit, int oldidx) { struct fbcon_ops *ops = NULL; - int err = 0; + int err; - if (!try_module_get(info->fbops->owner)) - err = -ENODEV; - - if (!err && info->fbops->fb_open && - info->fbops->fb_open(info, 0)) - err = -ENODEV; + err = fbcon_open(info); + if (err) + return err; if (!err) { ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL); @@ -715,7 +733,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info, if (err) { con2fb_map[unit] = oldidx; - module_put(info->fbops->owner); + fbcon_release(info); } return err; @@ -726,45 +744,34 @@ static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo, int oldidx, int found) { struct fbcon_ops *ops = oldinfo->fbcon_par; - int err = 0, ret; + int ret; - if (oldinfo->fbops->fb_release && - oldinfo->fbops->fb_release(oldinfo, 0)) { - con2fb_map[unit] = oldidx; - if (!found && newinfo->fbops->fb_release) - newinfo->fbops->fb_release(newinfo, 0); - if (!found) - module_put(newinfo->fbops->owner); - err = -ENODEV; + fbcon_release(oldinfo); + + fbcon_del_cursor_work(oldinfo); + kfree(ops->cursor_state.mask); + kfree(ops->cursor_data); + kfree(ops->cursor_src); + kfree(ops->fontbuffer); + kfree(oldinfo->fbcon_par); + oldinfo->fbcon_par = NULL; + /* + If oldinfo and newinfo are driving the same hardware, + the fb_release() method of oldinfo may attempt to + restore the hardware state. This will leave the + newinfo in an undefined state. Thus, a call to + fb_set_par() may be needed for the newinfo. + */ + if (newinfo && newinfo->fbops->fb_set_par) { + ret = newinfo->fbops->fb_set_par(newinfo); + + if (ret) + printk(KERN_ERR "con2fb_release_oldinfo: " + "detected unhandled fb_set_par error, " + "error code %d\n", ret); } - if (!err) { - fbcon_del_cursor_work(oldinfo); - kfree(ops->cursor_state.mask); - kfree(ops->cursor_data); - kfree(ops->cursor_src); - kfree(ops->fontbuffer); - kfree(oldinfo->fbcon_par); - oldinfo->fbcon_par = NULL; - module_put(oldinfo->fbops->owner); - /* - If oldinfo and newinfo are driving the same hardware, - the fb_release() method of oldinfo may attempt to - restore the hardware state. This will leave the - newinfo in an undefined state. Thus, a call to - fb_set_par() may be needed for the newinfo. - */ - if (newinfo && newinfo->fbops->fb_set_par) { - ret = newinfo->fbops->fb_set_par(newinfo); - - if (ret) - printk(KERN_ERR "con2fb_release_oldinfo: " - "detected unhandled fb_set_par error, " - "error code %d\n", ret); - } - } - - return err; + return 0; } static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, @@ -919,7 +926,6 @@ static const char *fbcon_startup(void) struct fbcon_display *p = &fb_display[fg_console]; struct vc_data *vc = vc_cons[fg_console].d; const struct font_desc *font = NULL; - struct module *owner; struct fb_info *info = NULL; struct fbcon_ops *ops; int rows, cols; @@ -938,17 +944,12 @@ static const char *fbcon_startup(void) if (!info) return NULL; - owner = info->fbops->owner; - if (!try_module_get(owner)) + if (fbcon_open(info)) return NULL; - if (info->fbops->fb_open && info->fbops->fb_open(info, 0)) { - module_put(owner); - return NULL; - } ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL); if (!ops) { - module_put(owner); + fbcon_release(info); return NULL; } @@ -3314,10 +3315,6 @@ static void fbcon_exit(void) } if (mapped) { - if (info->fbops->fb_release) - info->fbops->fb_release(info, 0); - module_put(info->fbops->owner); - if (info->fbcon_par) { struct fbcon_ops *ops = info->fbcon_par; @@ -3327,6 +3324,8 @@ static void fbcon_exit(void) kfree(info->fbcon_par); info->fbcon_par = NULL; } + + fbcon_release(info); } } } From b07db39584856e16814e2f065380e533a001535d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:28 +0200 Subject: [PATCH 033/219] fbcon: Ditch error handling for con2fb_release_oldinfo It doesn't ever fail anymore. Acked-by: Sam Ravnborg Acked-by: Thomas Zimmermann Signed-off-by: Daniel Vetter Cc: Daniel Vetter Cc: Thomas Zimmermann Cc: Greg Kroah-Hartman Cc: Claudio Suarez Cc: Du Cheng Cc: Tetsuo Handa Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-11-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 37 +++++++++++--------------------- 1 file changed, 13 insertions(+), 24 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index d50154b62007..d828fef16910 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -739,9 +739,8 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info, return err; } -static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo, - struct fb_info *newinfo, int unit, - int oldidx, int found) +static void con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo, + struct fb_info *newinfo) { struct fbcon_ops *ops = oldinfo->fbcon_par; int ret; @@ -770,8 +769,6 @@ static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo, "detected unhandled fb_set_par error, " "error code %d\n", ret); } - - return 0; } static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, @@ -825,7 +822,7 @@ static int set_con2fb_map(int unit, int newidx, int user) int oldidx = con2fb_map[unit]; struct fb_info *info = registered_fb[newidx]; struct fb_info *oldinfo = NULL; - int found, err = 0; + int found, err = 0, show_logo; WARN_CONSOLE_UNLOCKED(); @@ -854,18 +851,15 @@ static int set_con2fb_map(int unit, int newidx, int user) * fbcon should release it. */ if (!err && oldinfo && !search_fb_in_map(oldidx)) - err = con2fb_release_oldinfo(vc, oldinfo, info, unit, oldidx, - found); + con2fb_release_oldinfo(vc, oldinfo, info); - if (!err) { - int show_logo = (fg_console == 0 && !user && - logo_shown != FBCON_LOGO_DONTSHOW); + show_logo = (fg_console == 0 && !user && + logo_shown != FBCON_LOGO_DONTSHOW); - if (!found) - fbcon_add_cursor_work(info); - con2fb_map_boot[unit] = newidx; - con2fb_init_display(vc, info, unit, show_logo); - } + if (!found) + fbcon_add_cursor_work(info); + con2fb_map_boot[unit] = newidx; + con2fb_init_display(vc, info, unit, show_logo); if (!search_fb_in_map(info_idx)) info_idx = newidx; @@ -2769,7 +2763,7 @@ static inline void fbcon_unbind(void) {} /* called with console_lock held */ void fbcon_fb_unbind(struct fb_info *info) { - int i, new_idx = -1, ret = 0; + int i, new_idx = -1; int idx = info->node; WARN_CONSOLE_UNLOCKED(); @@ -2803,13 +2797,8 @@ void fbcon_fb_unbind(struct fb_info *info) if (con2fb_map[i] == idx) { con2fb_map[i] = -1; if (!search_fb_in_map(idx)) { - ret = con2fb_release_oldinfo(vc_cons[i].d, - info, NULL, i, - idx, 0); - if (ret) { - con2fb_map[i] = idx; - return; - } + con2fb_release_oldinfo(vc_cons[i].d, + info, NULL); } } } From d443d93864726ad68c0a741d1e7b03934a9af143 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:29 +0200 Subject: [PATCH 034/219] fbcon: move more common code into fb_open() No idea why con2fb_acquire_newinfo() initializes much less than fbcon_startup(), but so be it. From a quick look most of the un-initialized stuff should be fairly harmless, but who knows. Note that the error handling for the con2fb_acquire_newinfo() failure case was very strange: Callers updated con2fb_map to the new value before calling this function, but upon error con2fb_acquire_newinfo reset it to the old value. Since I removed the call to fbcon_release anyway that strange error path was sticking out like a sore thumb, hence I removed it. Which also allows us to remove the oldidx parameter from that function. v2: Explain what's going on with oldidx and error paths (Sam) v3: Drop unused variable (0day) v4: Rebased over bisect fix in previous patch, unchagend end result. Acked-by: Sam Ravnborg (v2) Acked-by: Thomas Zimmermann Cc: kernel test robot Signed-off-by: Daniel Vetter Cc: Daniel Vetter Cc: Greg Kroah-Hartman Cc: Tetsuo Handa Cc: Thomas Zimmermann Cc: Claudio Suarez Cc: Du Cheng Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-12-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 77 +++++++++++++------------------- 1 file changed, 31 insertions(+), 46 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index d828fef16910..f0213a0e3870 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -682,8 +682,18 @@ static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount) #endif /* CONFIG_MISC_TILEBLITTING */ +static void fbcon_release(struct fb_info *info) +{ + if (info->fbops->fb_release) + info->fbops->fb_release(info, 0); + + module_put(info->fbops->owner); +} + static int fbcon_open(struct fb_info *info) { + struct fbcon_ops *ops; + if (!try_module_get(info->fbops->owner)) return -ENODEV; @@ -693,48 +703,31 @@ static int fbcon_open(struct fb_info *info) return -ENODEV; } + ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL); + if (!ops) { + fbcon_release(info); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor); + ops->info = info; + info->fbcon_par = ops; + ops->cur_blink_jiffies = HZ / 5; + return 0; } -static void fbcon_release(struct fb_info *info) -{ - if (info->fbops->fb_release) - info->fbops->fb_release(info, 0); - - module_put(info->fbops->owner); -} - static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info, - int unit, int oldidx) + int unit) { - struct fbcon_ops *ops = NULL; int err; err = fbcon_open(info); if (err) return err; - if (!err) { - ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL); - if (!ops) - err = -ENOMEM; - } - - if (!err) { - INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor); - - ops->cur_blink_jiffies = HZ / 5; - ops->info = info; - info->fbcon_par = ops; - - if (vc) - set_blitting_type(vc, info); - } - - if (err) { - con2fb_map[unit] = oldidx; - fbcon_release(info); - } + if (vc) + set_blitting_type(vc, info); return err; } @@ -842,9 +835,11 @@ static int set_con2fb_map(int unit, int newidx, int user) found = search_fb_in_map(newidx); - con2fb_map[unit] = newidx; - if (!err && !found) - err = con2fb_acquire_newinfo(vc, info, unit, oldidx); + if (!err && !found) { + err = con2fb_acquire_newinfo(vc, info, unit); + if (!err) + con2fb_map[unit] = newidx; + } /* * If old fb is not mapped to any of the consoles, @@ -941,20 +936,10 @@ static const char *fbcon_startup(void) if (fbcon_open(info)) return NULL; - ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL); - if (!ops) { - fbcon_release(info); - return NULL; - } - - INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor); - + ops = info->fbcon_par; ops->currcon = -1; ops->graphics = 1; ops->cur_rotate = -1; - ops->cur_blink_jiffies = HZ / 5; - ops->info = info; - info->fbcon_par = ops; p->con_rotate = initial_rotation; if (p->con_rotate == -1) @@ -1024,7 +1009,7 @@ static void fbcon_init(struct vc_data *vc, int init) return; if (!info->fbcon_par) - con2fb_acquire_newinfo(vc, info, vc->vc_num, -1); + con2fb_acquire_newinfo(vc, info, vc->vc_num); /* If we are not the first console on this fb, copy the font from that console */ From 04933a294dacca3aaa480889d53e6195778d4578 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:30 +0200 Subject: [PATCH 035/219] fbcon: use lock_fb_info in fbcon_open/release Now we get to the real motiviation, because fbmem.c insists that that's the right lock for these. Ofc fbcon.c has a lot more places where it probably should call lock_fb_info(). But looking at fbmem.c at least most of these seem to be protected by console_lock() too, which is probably what papers over any issues. Note that this means we're shuffling around a bit the locking sections for some of the console takeover and unbind paths, but not all: - console binding/unbinding from the console layer never with lock_fb_info - unbind (as opposed to unlink) never bother with lock_fb_info Also the real serialization against set_par and set_pan are still doing by wrapping the entire ioctl code in console_lock(). So this shuffling shouldn't be worse than what we had from a "can you trigger races?" pov, but it's at least clearer. Acked-by: Sam Ravnborg Signed-off-by: Daniel Vetter Cc: Daniel Vetter Cc: Claudio Suarez Cc: Tetsuo Handa Cc: Thomas Zimmermann Cc: Greg Kroah-Hartman Cc: Du Cheng Cc: Sam Ravnborg Cc: Matthew Wilcox Cc: William Kucharski Cc: Alex Deucher Cc: Zheyu Ma Cc: Zhen Lei Cc: Xiyu Yang Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-13-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 5 +++++ drivers/video/fbdev/core/fbmem.c | 4 ---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index f0213a0e3870..cc960bf49991 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -684,8 +684,10 @@ static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount) static void fbcon_release(struct fb_info *info) { + lock_fb_info(info); if (info->fbops->fb_release) info->fbops->fb_release(info, 0); + unlock_fb_info(info); module_put(info->fbops->owner); } @@ -697,11 +699,14 @@ static int fbcon_open(struct fb_info *info) if (!try_module_get(info->fbops->owner)) return -ENODEV; + lock_fb_info(info); if (info->fbops->fb_open && info->fbops->fb_open(info, 0)) { + unlock_fb_info(info); module_put(info->fbops->owner); return -ENODEV; } + unlock_fb_info(info); ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL); if (!ops) { diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 34d6bb1bf82e..0e68d9456bc2 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1670,9 +1670,7 @@ static int do_register_framebuffer(struct fb_info *fb_info) console_lock(); else atomic_inc(&ignore_console_lock_warning); - lock_fb_info(fb_info); ret = fbcon_fb_registered(fb_info); - unlock_fb_info(fb_info); if (!lockless_register_fb) console_unlock(); @@ -1689,9 +1687,7 @@ static void unbind_console(struct fb_info *fb_info) return; console_lock(); - lock_fb_info(fb_info); fbcon_fb_unbind(fb_info); - unlock_fb_info(fb_info); console_unlock(); } From 43553559121ca90965b572cf8a1d6d0fd618b449 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:31 +0200 Subject: [PATCH 036/219] fbcon: Consistently protect deferred_takeover with console_lock() This shouldn't be a problem in practice since until we've actually taken over the console there's nothing we've registered with the console/vt subsystem, so the exit/unbind path that check this can't do the wrong thing. But it's confusing, so fix it by moving it a tad later. Acked-by: Sam Ravnborg Signed-off-by: Daniel Vetter Cc: Daniel Vetter Cc: Du Cheng Cc: Tetsuo Handa Cc: Claudio Suarez Cc: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-14-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index cc960bf49991..4f9752ee9189 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -3227,6 +3227,9 @@ static void fbcon_register_existing_fbs(struct work_struct *work) console_lock(); + deferred_takeover = false; + logo_shown = FBCON_LOGO_DONTSHOW; + for_each_registered_fb(i) fbcon_fb_registered(registered_fb[i]); @@ -3244,8 +3247,6 @@ static int fbcon_output_notifier(struct notifier_block *nb, pr_info("fbcon: Taking over console\n"); dummycon_unregister_output_notifier(&fbcon_output_nb); - deferred_takeover = false; - logo_shown = FBCON_LOGO_DONTSHOW; /* We may get called in atomic context */ schedule_work(&fbcon_deferred_takeover_work); From 6e7da3af008b72520f5318507f455f344b27f022 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:32 +0200 Subject: [PATCH 037/219] fbcon: Move console_lock for register/unlink/unregister Ideally console_lock becomes an implementation detail of fbcon.c and doesn't show up anywhere in fbmem.c. We're still pretty far from that, but at least the register/unregister code is there now. With this the do_fb_ioctl() handler is the only code in fbmem.c still calling console_lock(). Acked-by: Sam Ravnborg Signed-off-by: Daniel Vetter Cc: Daniel Vetter Cc: Thomas Zimmermann Cc: Du Cheng Cc: Claudio Suarez Cc: Greg Kroah-Hartman Cc: Tetsuo Handa Cc: Matthew Wilcox Cc: Sam Ravnborg Cc: Zheyu Ma Cc: Guenter Roeck Cc: Alex Deucher Cc: Zhen Lei Cc: Xiyu Yang Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-15-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 33 ++++++++++++++++++++++++++------ drivers/video/fbdev/core/fbmem.c | 23 ++-------------------- 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 4f9752ee9189..abb419a091c6 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2756,10 +2756,12 @@ void fbcon_fb_unbind(struct fb_info *info) int i, new_idx = -1; int idx = info->node; - WARN_CONSOLE_UNLOCKED(); + console_lock(); - if (!fbcon_has_console_bind) + if (!fbcon_has_console_bind) { + console_unlock(); return; + } for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] != idx && @@ -2794,6 +2796,8 @@ void fbcon_fb_unbind(struct fb_info *info) } fbcon_unbind(); } + + console_unlock(); } /* called with console_lock held */ @@ -2801,10 +2805,12 @@ void fbcon_fb_unregistered(struct fb_info *info) { int i, idx; - WARN_CONSOLE_UNLOCKED(); + console_lock(); - if (deferred_takeover) + if (deferred_takeover) { + console_unlock(); return; + } idx = info->node; for (i = first_fb_vc; i <= last_fb_vc; i++) { @@ -2833,6 +2839,7 @@ void fbcon_fb_unregistered(struct fb_info *info) if (!num_registered_fb) do_unregister_con_driver(&fb_con); + console_unlock(); } void fbcon_remap_all(struct fb_info *info) @@ -2890,19 +2897,27 @@ static inline void fbcon_select_primary(struct fb_info *info) } #endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */ +static bool lockless_register_fb; +module_param_named_unsafe(lockless_register_fb, lockless_register_fb, bool, 0400); +MODULE_PARM_DESC(lockless_register_fb, + "Lockless framebuffer registration for debugging [default=off]"); + /* called with console_lock held */ int fbcon_fb_registered(struct fb_info *info) { int ret = 0, i, idx; - WARN_CONSOLE_UNLOCKED(); + if (!lockless_register_fb) + console_lock(); + else + atomic_inc(&ignore_console_lock_warning); idx = info->node; fbcon_select_primary(info); if (deferred_takeover) { pr_info("fbcon: Deferring console take-over\n"); - return 0; + goto out; } if (info_idx == -1) { @@ -2922,6 +2937,12 @@ int fbcon_fb_registered(struct fb_info *info) } } +out: + if (!lockless_register_fb) + console_unlock(); + else + atomic_dec(&ignore_console_lock_warning); + return ret; } diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 0e68d9456bc2..bdd00d381bbc 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1590,14 +1590,9 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a, } } -static bool lockless_register_fb; -module_param_named_unsafe(lockless_register_fb, lockless_register_fb, bool, 0400); -MODULE_PARM_DESC(lockless_register_fb, - "Lockless framebuffer registration for debugging [default=off]"); - static int do_register_framebuffer(struct fb_info *fb_info) { - int i, ret; + int i; struct fb_videomode mode; if (fb_check_foreignness(fb_info)) @@ -1666,17 +1661,7 @@ static int do_register_framebuffer(struct fb_info *fb_info) } #endif - if (!lockless_register_fb) - console_lock(); - else - atomic_inc(&ignore_console_lock_warning); - ret = fbcon_fb_registered(fb_info); - - if (!lockless_register_fb) - console_unlock(); - else - atomic_dec(&ignore_console_lock_warning); - return ret; + return fbcon_fb_registered(fb_info); } static void unbind_console(struct fb_info *fb_info) @@ -1686,9 +1671,7 @@ static void unbind_console(struct fb_info *fb_info) if (WARN_ON(i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)) return; - console_lock(); fbcon_fb_unbind(fb_info); - console_unlock(); } static void unlink_framebuffer(struct fb_info *fb_info) @@ -1731,9 +1714,7 @@ static void do_unregister_framebuffer(struct fb_info *fb_info) fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); } #endif - console_lock(); fbcon_fb_unregistered(fb_info); - console_unlock(); /* this may free fb info */ put_fb_info(fb_info); From 3647d6d3dbdafc55f8c4ca8225966963252abe7b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:33 +0200 Subject: [PATCH 038/219] fbcon: Move more code into fbcon_release con2fb_release_oldinfo() has a bunch more kfree() calls than fbcon_exit(), but since kfree() on NULL is harmless doing that in both places should be ok. This is also a bit more symmetric now again with fbcon_open also allocating the fbcon_ops structure. Acked-by: Sam Ravnborg Signed-off-by: Daniel Vetter Cc: Daniel Vetter Cc: Tetsuo Handa Cc: Greg Kroah-Hartman Cc: Du Cheng Cc: Claudio Suarez Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-16-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 33 +++++++++++++------------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index abb419a091c6..685b4a9e5546 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -690,6 +690,18 @@ static void fbcon_release(struct fb_info *info) unlock_fb_info(info); module_put(info->fbops->owner); + + if (info->fbcon_par) { + struct fbcon_ops *ops = info->fbcon_par; + + fbcon_del_cursor_work(info); + kfree(ops->cursor_state.mask); + kfree(ops->cursor_data); + kfree(ops->cursor_src); + kfree(ops->fontbuffer); + kfree(info->fbcon_par); + info->fbcon_par = NULL; + } } static int fbcon_open(struct fb_info *info) @@ -740,18 +752,10 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info, static void con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo, struct fb_info *newinfo) { - struct fbcon_ops *ops = oldinfo->fbcon_par; int ret; fbcon_release(oldinfo); - fbcon_del_cursor_work(oldinfo); - kfree(ops->cursor_state.mask); - kfree(ops->cursor_data); - kfree(ops->cursor_src); - kfree(ops->fontbuffer); - kfree(oldinfo->fbcon_par); - oldinfo->fbcon_par = NULL; /* If oldinfo and newinfo are driving the same hardware, the fb_release() method of oldinfo may attempt to @@ -3315,19 +3319,8 @@ static void fbcon_exit(void) } } - if (mapped) { - if (info->fbcon_par) { - struct fbcon_ops *ops = info->fbcon_par; - - fbcon_del_cursor_work(info); - kfree(ops->cursor_src); - kfree(ops->cursor_state.mask); - kfree(info->fbcon_par); - info->fbcon_par = NULL; - } - + if (mapped) fbcon_release(info); - } } } From c75300b5c64b013a6a04c8385094eeff849595ba Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:34 +0200 Subject: [PATCH 039/219] fbcon: untangle fbcon_exit There's a bunch of confusions going on here: - The deferred fbcon setup notifier should only be cleaned up from fb_console_exit(), to be symmetric with fb_console_init() - We also need to make sure we don't race with the work, which means temporarily dropping the console lock (or we can deadlock) - That also means no point in clearing deferred_takeover, we are unloading everything anyway. - Finally rename fbcon_exit to fbcon_release_all and move it, since that's what's it doing when being called from consw->con_deinit through fbcon_deinit. To answer a question from Sam just quoting my own reply: > We loose the call to fbcon_release_all() here [in fb_console_exit()]. > We have part of the old fbcon_exit() above, but miss the release parts. Ah yes that's the entire point of this change. The release_all in the fbcon exit path was only needed when fbcon was a separate module indepedent from core fb.ko. Which means it was possible to unload fbcon while having fbdev drivers registered. But since we've merged them that has become impossible, so by the time the fb.ko module can be unloaded, there's guaranteed to be no fbdev drivers left. And hence removing them is pointless. v2: Explain the why better (Sam) Acked-by: Sam Ravnborg Signed-off-by: Daniel Vetter Cc: Daniel Vetter Cc: Greg Kroah-Hartman Cc: Claudio Suarez Cc: Du Cheng Cc: Tetsuo Handa Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-17-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 63 ++++++++++++++++---------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 685b4a9e5546..944f514c77ec 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -187,7 +187,6 @@ static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, int line, int count, int dy); static void fbcon_modechanged(struct fb_info *info); static void fbcon_set_all_vcs(struct fb_info *info); -static void fbcon_exit(void); static struct device *fbcon_device; @@ -1146,6 +1145,27 @@ static void fbcon_free_font(struct fbcon_display *p, bool freefont) static void set_vc_hi_font(struct vc_data *vc, bool set); +static void fbcon_release_all(void) +{ + struct fb_info *info; + int i, j, mapped; + + for_each_registered_fb(i) { + mapped = 0; + info = registered_fb[i]; + + for (j = first_fb_vc; j <= last_fb_vc; j++) { + if (con2fb_map[j] == i) { + mapped = 1; + con2fb_map[j] = -1; + } + } + + if (mapped) + fbcon_release(info); + } +} + static void fbcon_deinit(struct vc_data *vc) { struct fbcon_display *p = &fb_display[vc->vc_num]; @@ -1185,7 +1205,7 @@ finished: set_vc_hi_font(vc, false); if (!con_is_bound(&fb_con)) - fbcon_exit(); + fbcon_release_all(); if (vc->vc_num == logo_shown) logo_shown = FBCON_LOGO_CANSHOW; @@ -3296,34 +3316,6 @@ static void fbcon_start(void) #endif } -static void fbcon_exit(void) -{ - struct fb_info *info; - int i, j, mapped; - -#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER - if (deferred_takeover) { - dummycon_unregister_output_notifier(&fbcon_output_nb); - deferred_takeover = false; - } -#endif - - for_each_registered_fb(i) { - mapped = 0; - info = registered_fb[i]; - - for (j = first_fb_vc; j <= last_fb_vc; j++) { - if (con2fb_map[j] == i) { - mapped = 1; - con2fb_map[j] = -1; - } - } - - if (mapped) - fbcon_release(info); - } -} - void __init fb_console_init(void) { int i; @@ -3363,10 +3355,19 @@ static void __exit fbcon_deinit_device(void) void __exit fb_console_exit(void) { +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER + console_lock(); + if (deferred_takeover) + dummycon_unregister_output_notifier(&fbcon_output_nb); + console_unlock(); + + cancel_work_sync(&fbcon_deferred_takeover_work); +#endif + console_lock(); fbcon_deinit_device(); device_destroy(fb_class, MKDEV(0, 0)); - fbcon_exit(); + do_unregister_con_driver(&fb_con); console_unlock(); } From efc3acbc105af79bce2ccf97a6159255b7cf6b38 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:35 +0200 Subject: [PATCH 040/219] fbcon: Maintain a private array of fb_info Accessing the one in fbmem.c without taking the right locks is a bad idea. Instead maintain our own private copy, which is fully protected by console_lock() (like everything else in fbcon.c). That copy is serialized through fbcon_fb_registered/unregistered() calls. Also this means we do not need to hold a full fb_info reference, which is nice because doing so would mean a refcount loop between the console and the fb_info. But it's also not nice since it means console_lock() must be held absolutely everywhere. Well strictly speaking we could still try to do some refcounting games again by calling get_fb_info before we drop the console_lock. But things will get tricky. Acked-by: Sam Ravnborg Signed-off-by: Daniel Vetter Cc: Daniel Vetter Cc: Tetsuo Handa Cc: Claudio Suarez Cc: Du Cheng Cc: Greg Kroah-Hartman Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-18-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 82 +++++++++++++++++--------------- 1 file changed, 43 insertions(+), 39 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 944f514c77ec..6a7d470beec7 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -86,10 +86,6 @@ * - fbcon state itself is protected by the console_lock, and the code does a * pretty good job at making sure that lock is held everywhere it's needed. * - * - access to the registered_fb array is entirely unprotected. This should use - * proper object lifetime handling, i.e. get/put_fb_info. This also means - * switching from indices to proper pointers for fb_info everywhere. - * * - fbcon doesn't bother with fb_lock/unlock at all. This is buggy, since it * means concurrent access to the same fbdev from both fbcon and userspace * will blow up. To fix this all fbcon calls from fbmem.c need to be moved out @@ -107,6 +103,13 @@ enum { static struct fbcon_display fb_display[MAX_NR_CONSOLES]; +struct fb_info *fbcon_registered_fb[FB_MAX]; +int fbcon_num_registered_fb; + +#define fbcon_for_each_registered_fb(i) \ + for (i = 0; WARN_CONSOLE_UNLOCKED(), i < FB_MAX; i++) \ + if (!fbcon_registered_fb[i]) {} else + static signed char con2fb_map[MAX_NR_CONSOLES]; static signed char con2fb_map_boot[MAX_NR_CONSOLES]; @@ -114,12 +117,7 @@ static struct fb_info *fbcon_info_from_console(int console) { WARN_CONSOLE_UNLOCKED(); - /* - * Note that only con2fb_map is protected by the console lock, - * registered_fb is protected by a separate mutex. This lookup can - * therefore race. - */ - return registered_fb[con2fb_map[console]]; + return fbcon_registered_fb[con2fb_map[console]]; } static int logo_lines; @@ -518,7 +516,7 @@ static int do_fbcon_takeover(int show_logo) { int err, i; - if (!num_registered_fb) + if (!fbcon_num_registered_fb) return -ENODEV; if (!show_logo) @@ -821,7 +819,7 @@ static int set_con2fb_map(int unit, int newidx, int user) { struct vc_data *vc = vc_cons[unit].d; int oldidx = con2fb_map[unit]; - struct fb_info *info = registered_fb[newidx]; + struct fb_info *info = fbcon_registered_fb[newidx]; struct fb_info *oldinfo = NULL; int found, err = 0, show_logo; @@ -839,7 +837,7 @@ static int set_con2fb_map(int unit, int newidx, int user) } if (oldidx != -1) - oldinfo = registered_fb[oldidx]; + oldinfo = fbcon_registered_fb[oldidx]; found = search_fb_in_map(newidx); @@ -931,13 +929,13 @@ static const char *fbcon_startup(void) * If num_registered_fb is zero, this is a call for the dummy part. * The frame buffer devices weren't initialized yet. */ - if (!num_registered_fb || info_idx == -1) + if (!fbcon_num_registered_fb || info_idx == -1) return display_desc; /* * Instead of blindly using registered_fb[0], we use info_idx, set by * fbcon_fb_registered(); */ - info = registered_fb[info_idx]; + info = fbcon_registered_fb[info_idx]; if (!info) return NULL; @@ -1150,9 +1148,9 @@ static void fbcon_release_all(void) struct fb_info *info; int i, j, mapped; - for_each_registered_fb(i) { + fbcon_for_each_registered_fb(i) { mapped = 0; - info = registered_fb[i]; + info = fbcon_registered_fb[i]; for (j = first_fb_vc; j <= last_fb_vc; j++) { if (con2fb_map[j] == i) { @@ -1179,7 +1177,7 @@ static void fbcon_deinit(struct vc_data *vc) if (idx == -1) goto finished; - info = registered_fb[idx]; + info = fbcon_registered_fb[idx]; if (!info) goto finished; @@ -2098,9 +2096,9 @@ static int fbcon_switch(struct vc_data *vc) * * info->currcon = vc->vc_num; */ - for_each_registered_fb(i) { - if (registered_fb[i]->fbcon_par) { - struct fbcon_ops *o = registered_fb[i]->fbcon_par; + fbcon_for_each_registered_fb(i) { + if (fbcon_registered_fb[i]->fbcon_par) { + struct fbcon_ops *o = fbcon_registered_fb[i]->fbcon_par; o->currcon = vc->vc_num; } @@ -2745,7 +2743,7 @@ int fbcon_mode_deleted(struct fb_info *info, j = con2fb_map[i]; if (j == -1) continue; - fb_info = registered_fb[j]; + fb_info = fbcon_registered_fb[j]; if (fb_info != info) continue; p = &fb_display[i]; @@ -2801,7 +2799,7 @@ void fbcon_fb_unbind(struct fb_info *info) set_con2fb_map(i, new_idx, 0); } } else { - struct fb_info *info = registered_fb[idx]; + struct fb_info *info = fbcon_registered_fb[idx]; /* This is sort of like set_con2fb_map, except it maps * the consoles to no device and then releases the @@ -2831,6 +2829,9 @@ void fbcon_fb_unregistered(struct fb_info *info) console_lock(); + fbcon_registered_fb[info->node] = NULL; + fbcon_num_registered_fb--; + if (deferred_takeover) { console_unlock(); return; @@ -2845,7 +2846,7 @@ void fbcon_fb_unregistered(struct fb_info *info) if (idx == info_idx) { info_idx = -1; - for_each_registered_fb(i) { + fbcon_for_each_registered_fb(i) { info_idx = i; break; } @@ -2861,7 +2862,7 @@ void fbcon_fb_unregistered(struct fb_info *info) if (primary_device == idx) primary_device = -1; - if (!num_registered_fb) + if (!fbcon_num_registered_fb) do_unregister_con_driver(&fb_con); console_unlock(); } @@ -2936,6 +2937,9 @@ int fbcon_fb_registered(struct fb_info *info) else atomic_inc(&ignore_console_lock_warning); + fbcon_registered_fb[info->node] = info; + fbcon_num_registered_fb++; + idx = info->node; fbcon_select_primary(info); @@ -3055,9 +3059,9 @@ int fbcon_set_con2fb_map_ioctl(void __user *argp) return -EINVAL; if (con2fb.framebuffer >= FB_MAX) return -EINVAL; - if (!registered_fb[con2fb.framebuffer]) + if (!fbcon_registered_fb[con2fb.framebuffer]) request_module("fb%d", con2fb.framebuffer); - if (!registered_fb[con2fb.framebuffer]) { + if (!fbcon_registered_fb[con2fb.framebuffer]) { return -EINVAL; } @@ -3124,10 +3128,10 @@ static ssize_t store_rotate(struct device *device, console_lock(); idx = con2fb_map[fg_console]; - if (idx == -1 || registered_fb[idx] == NULL) + if (idx == -1 || fbcon_registered_fb[idx] == NULL) goto err; - info = registered_fb[idx]; + info = fbcon_registered_fb[idx]; rotate = simple_strtoul(buf, last, 0); fbcon_rotate(info, rotate); err: @@ -3146,10 +3150,10 @@ static ssize_t store_rotate_all(struct device *device, console_lock(); idx = con2fb_map[fg_console]; - if (idx == -1 || registered_fb[idx] == NULL) + if (idx == -1 || fbcon_registered_fb[idx] == NULL) goto err; - info = registered_fb[idx]; + info = fbcon_registered_fb[idx]; rotate = simple_strtoul(buf, last, 0); fbcon_rotate_all(info, rotate); err: @@ -3166,10 +3170,10 @@ static ssize_t show_rotate(struct device *device, console_lock(); idx = con2fb_map[fg_console]; - if (idx == -1 || registered_fb[idx] == NULL) + if (idx == -1 || fbcon_registered_fb[idx] == NULL) goto err; - info = registered_fb[idx]; + info = fbcon_registered_fb[idx]; rotate = fbcon_get_rotate(info); err: console_unlock(); @@ -3186,10 +3190,10 @@ static ssize_t show_cursor_blink(struct device *device, console_lock(); idx = con2fb_map[fg_console]; - if (idx == -1 || registered_fb[idx] == NULL) + if (idx == -1 || fbcon_registered_fb[idx] == NULL) goto err; - info = registered_fb[idx]; + info = fbcon_registered_fb[idx]; ops = info->fbcon_par; if (!ops) @@ -3212,10 +3216,10 @@ static ssize_t store_cursor_blink(struct device *device, console_lock(); idx = con2fb_map[fg_console]; - if (idx == -1 || registered_fb[idx] == NULL) + if (idx == -1 || fbcon_registered_fb[idx] == NULL) goto err; - info = registered_fb[idx]; + info = fbcon_registered_fb[idx]; if (!info->fbcon_par) goto err; @@ -3275,8 +3279,8 @@ static void fbcon_register_existing_fbs(struct work_struct *work) deferred_takeover = false; logo_shown = FBCON_LOGO_DONTSHOW; - for_each_registered_fb(i) - fbcon_fb_registered(registered_fb[i]); + fbcon_for_each_registered_fb(i) + fbcon_fb_registered(fbcon_registered_fb[i]); console_unlock(); } From 17b048d4c6143ac9ca8a6952da7a37416bf135b0 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 7 Apr 2022 17:45:31 +0100 Subject: [PATCH 041/219] drm/i915: fix broken build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I guess this was missed in the conversion or something. Fixes: 7bc80a5462c3 ("dma-buf: add enum dma_resv_usage v4") Signed-off-by: Matthew Auld Reviewed-by: Christian König Cc: Christian König Cc: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407164532.1242578-1-matthew.auld@intel.com Signed-off-by: Christian König --- drivers/gpu/drm/i915/i915_deps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_deps.c b/drivers/gpu/drm/i915/i915_deps.c index 999210b37325..297b8e4e42ee 100644 --- a/drivers/gpu/drm/i915/i915_deps.c +++ b/drivers/gpu/drm/i915/i915_deps.c @@ -226,7 +226,7 @@ int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv, struct dma_fence *fence; dma_resv_assert_held(resv); - dma_resv_for_each_fence(&iter, resv, true, fence) { + dma_resv_for_each_fence(&iter, resv, dma_resv_usage_rw(true), fence) { int ret = i915_deps_add_dependency(deps, fence, ctx); if (ret) From c9cad937c0c58618fe5b0310fd539a854dc1ae95 Mon Sep 17 00:00:00 2001 From: Arunpravin Paneer Selvam Date: Fri, 8 Apr 2022 04:18:43 +0530 Subject: [PATCH 042/219] drm/amdgpu: add drm buddy support to amdgpu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Switch to drm buddy allocator - Add resource cursor support for drm buddy v2(Matthew Auld): - replace spinlock with mutex as we call kmem_cache_zalloc (..., GFP_KERNEL) in drm_buddy_alloc() function - lock drm_buddy_block_trim() function as it calls mark_free/mark_split are all globally visible v3(Matthew Auld): - remove trim method error handling as we address the failure case at drm_buddy_block_trim() function v4: - fix warnings reported by kernel test robot v5: - fix merge conflict issue v6: - fix warnings reported by kernel test robot v7: - remove DRM_BUDDY_RANGE_ALLOCATION flag usage v8: - keep DRM_BUDDY_RANGE_ALLOCATION flag usage - resolve conflicts created by drm/amdgpu: remove VRAM accounting v2 v9(Christian): - merged the below patch - drm/amdgpu: move vram inline functions into a header - rename label name as fallback - move struct amdgpu_vram_mgr to amdgpu_vram_mgr.h - remove unnecessary flags from struct amdgpu_vram_reservation - rewrite block NULL check condition - change else style as per coding standard - rewrite the node max size - add a helper function to fetch the first entry from the list v10(Christian): - rename amdgpu_get_node() function name as amdgpu_vram_mgr_first_block v11: - if size is not aligned with min_page_size, enable is_contiguous flag, therefore, the size round up to the power of two and trimmed to the original size. v12: - rename the function names having prefix as amdgpu_vram_mgr_*() - modify the round_up() logic conforming to contiguous flag enablement or if size is not aligned to min_block_size - modify the trim logic - rename node as block wherever applicable Signed-off-by: Arunpravin Paneer Selvam Acked-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20220407224843.2416-1-Arunpravin.PaneerSelvam@amd.com Signed-off-by: Christian König --- drivers/gpu/drm/Kconfig | 1 + .../gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 97 ++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 349 +++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h | 89 +++++ 5 files changed, 375 insertions(+), 171 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index f1422bee3dcc..5133c3f028ab 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -280,6 +280,7 @@ config DRM_AMDGPU select HWMON select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE + select DRM_BUDDY help Choose this option if you have a recent AMD Radeon graphics card. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index acfa207cf970..6546552e596c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -30,12 +30,15 @@ #include #include +#include "amdgpu_vram_mgr.h" + /* state back for walking over vram_mgr and gtt_mgr allocations */ struct amdgpu_res_cursor { uint64_t start; uint64_t size; uint64_t remaining; - struct drm_mm_node *node; + void *node; + uint32_t mem_type; }; /** @@ -52,27 +55,63 @@ static inline void amdgpu_res_first(struct ttm_resource *res, uint64_t start, uint64_t size, struct amdgpu_res_cursor *cur) { + struct drm_buddy_block *block; + struct list_head *head, *next; struct drm_mm_node *node; - if (!res || res->mem_type == TTM_PL_SYSTEM) { - cur->start = start; - cur->size = size; - cur->remaining = size; - cur->node = NULL; - WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT); - return; - } + if (!res) + goto fallback; BUG_ON(start + size > res->num_pages << PAGE_SHIFT); - node = to_ttm_range_mgr_node(res)->mm_nodes; - while (start >= node->size << PAGE_SHIFT) - start -= node++->size << PAGE_SHIFT; + cur->mem_type = res->mem_type; - cur->start = (node->start << PAGE_SHIFT) + start; - cur->size = min((node->size << PAGE_SHIFT) - start, size); + switch (cur->mem_type) { + case TTM_PL_VRAM: + head = &to_amdgpu_vram_mgr_resource(res)->blocks; + + block = list_first_entry_or_null(head, + struct drm_buddy_block, + link); + if (!block) + goto fallback; + + while (start >= amdgpu_vram_mgr_block_size(block)) { + start -= amdgpu_vram_mgr_block_size(block); + + next = block->link.next; + if (next != head) + block = list_entry(next, struct drm_buddy_block, link); + } + + cur->start = amdgpu_vram_mgr_block_start(block) + start; + cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size); + cur->remaining = size; + cur->node = block; + break; + case TTM_PL_TT: + node = to_ttm_range_mgr_node(res)->mm_nodes; + while (start >= node->size << PAGE_SHIFT) + start -= node++->size << PAGE_SHIFT; + + cur->start = (node->start << PAGE_SHIFT) + start; + cur->size = min((node->size << PAGE_SHIFT) - start, size); + cur->remaining = size; + cur->node = node; + break; + default: + goto fallback; + } + + return; + +fallback: + cur->start = start; + cur->size = size; cur->remaining = size; - cur->node = node; + cur->node = NULL; + WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT); + return; } /** @@ -85,7 +124,9 @@ static inline void amdgpu_res_first(struct ttm_resource *res, */ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) { - struct drm_mm_node *node = cur->node; + struct drm_buddy_block *block; + struct drm_mm_node *node; + struct list_head *next; BUG_ON(size > cur->remaining); @@ -99,9 +140,27 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) return; } - cur->node = ++node; - cur->start = node->start << PAGE_SHIFT; - cur->size = min(node->size << PAGE_SHIFT, cur->remaining); + switch (cur->mem_type) { + case TTM_PL_VRAM: + block = cur->node; + + next = block->link.next; + block = list_entry(next, struct drm_buddy_block, link); + + cur->node = block; + cur->start = amdgpu_vram_mgr_block_start(block); + cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining); + break; + case TTM_PL_TT: + node = cur->node; + + cur->node = ++node; + cur->start = node->start << PAGE_SHIFT; + cur->size = min(node->size << PAGE_SHIFT, cur->remaining); + break; + default: + return; + } } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 9120ae80ef52..6a70818039dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -26,6 +26,7 @@ #include #include +#include "amdgpu_vram_mgr.h" #include "amdgpu.h" #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) @@ -38,15 +39,6 @@ #define AMDGPU_POISON 0xd0bed0be -struct amdgpu_vram_mgr { - struct ttm_resource_manager manager; - struct drm_mm mm; - spinlock_t lock; - struct list_head reservations_pending; - struct list_head reserved_pages; - atomic64_t vis_usage; -}; - struct amdgpu_gtt_mgr { struct ttm_resource_manager manager; struct drm_mm mm; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 0a7611648573..49e4092f447f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -32,8 +32,10 @@ #include "atom.h" struct amdgpu_vram_reservation { - struct list_head node; - struct drm_mm_node mm_node; + u64 start; + u64 size; + struct list_head allocated; + struct list_head blocks; }; static inline struct amdgpu_vram_mgr * @@ -186,18 +188,18 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = { }; /** - * amdgpu_vram_mgr_vis_size - Calculate visible node size + * amdgpu_vram_mgr_vis_size - Calculate visible block size * * @adev: amdgpu_device pointer - * @node: MM node structure + * @block: DRM BUDDY block structure * - * Calculate how many bytes of the MM node are inside visible VRAM + * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM */ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, - struct drm_mm_node *node) + struct drm_buddy_block *block) { - uint64_t start = node->start << PAGE_SHIFT; - uint64_t end = (node->size + node->start) << PAGE_SHIFT; + u64 start = amdgpu_vram_mgr_block_start(block); + u64 end = start + amdgpu_vram_mgr_block_size(block); if (start >= adev->gmc.visible_vram_size) return 0; @@ -218,9 +220,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_resource *res = bo->tbo.resource; - unsigned pages = res->num_pages; - struct drm_mm_node *mm; - u64 usage; + struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); + struct drm_buddy_block *block; + u64 usage = 0; if (amdgpu_gmc_vram_full_visible(&adev->gmc)) return amdgpu_bo_size(bo); @@ -228,9 +230,8 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) return 0; - mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0]; - for (usage = 0; pages; pages -= mm->size, mm++) - usage += amdgpu_vram_mgr_vis_size(adev, mm); + list_for_each_entry(block, &vres->blocks, link) + usage += amdgpu_vram_mgr_vis_size(adev, block); return usage; } @@ -240,23 +241,30 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); - struct drm_mm *mm = &mgr->mm; + struct drm_buddy *mm = &mgr->mm; struct amdgpu_vram_reservation *rsv, *temp; + struct drm_buddy_block *block; uint64_t vis_usage; - list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) { - if (drm_mm_reserve_node(mm, &rsv->mm_node)) + list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) { + if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size, + rsv->size, mm->chunk_size, &rsv->allocated, + DRM_BUDDY_RANGE_ALLOCATION)) + continue; + + block = amdgpu_vram_mgr_first_block(&rsv->allocated); + if (!block) continue; dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n", - rsv->mm_node.start, rsv->mm_node.size); + rsv->start, rsv->size); - vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node); + vis_usage = amdgpu_vram_mgr_vis_size(adev, block); atomic64_add(vis_usage, &mgr->vis_usage); spin_lock(&man->bdev->lru_lock); - man->usage += rsv->mm_node.size << PAGE_SHIFT; + man->usage += rsv->size; spin_unlock(&man->bdev->lru_lock); - list_move(&rsv->node, &mgr->reserved_pages); + list_move(&rsv->blocks, &mgr->reserved_pages); } } @@ -278,14 +286,16 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, if (!rsv) return -ENOMEM; - INIT_LIST_HEAD(&rsv->node); - rsv->mm_node.start = start >> PAGE_SHIFT; - rsv->mm_node.size = size >> PAGE_SHIFT; + INIT_LIST_HEAD(&rsv->allocated); + INIT_LIST_HEAD(&rsv->blocks); - spin_lock(&mgr->lock); - list_add_tail(&rsv->node, &mgr->reservations_pending); + rsv->start = start; + rsv->size = size; + + mutex_lock(&mgr->lock); + list_add_tail(&rsv->blocks, &mgr->reservations_pending); amdgpu_vram_mgr_do_reserve(&mgr->manager); - spin_unlock(&mgr->lock); + mutex_unlock(&mgr->lock); return 0; } @@ -307,19 +317,19 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, struct amdgpu_vram_reservation *rsv; int ret; - spin_lock(&mgr->lock); + mutex_lock(&mgr->lock); - list_for_each_entry(rsv, &mgr->reservations_pending, node) { - if ((rsv->mm_node.start <= start) && - (start < (rsv->mm_node.start + rsv->mm_node.size))) { + list_for_each_entry(rsv, &mgr->reservations_pending, blocks) { + if (rsv->start <= start && + (start < (rsv->start + rsv->size))) { ret = -EBUSY; goto out; } } - list_for_each_entry(rsv, &mgr->reserved_pages, node) { - if ((rsv->mm_node.start <= start) && - (start < (rsv->mm_node.start + rsv->mm_node.size))) { + list_for_each_entry(rsv, &mgr->reserved_pages, blocks) { + if (rsv->start <= start && + (start < (rsv->start + rsv->size))) { ret = 0; goto out; } @@ -327,32 +337,10 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, ret = -ENOENT; out: - spin_unlock(&mgr->lock); + mutex_unlock(&mgr->lock); return ret; } -/** - * amdgpu_vram_mgr_virt_start - update virtual start address - * - * @mem: ttm_resource to update - * @node: just allocated node - * - * Calculate a virtual BO start address to easily check if everything is CPU - * accessible. - */ -static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, - struct drm_mm_node *node) -{ - unsigned long start; - - start = node->start + node->size; - if (start > mem->num_pages) - start -= mem->num_pages; - else - start = 0; - mem->start = max(mem->start, start); -} - /** * amdgpu_vram_mgr_new - allocate new ranges * @@ -368,46 +356,44 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, const struct ttm_place *place, struct ttm_resource **res) { - unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages; + u64 vis_usage = 0, max_bytes, cur_size, min_block_size; struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); - uint64_t vis_usage = 0, mem_bytes, max_bytes; - struct ttm_range_mgr_node *node; - struct drm_mm *mm = &mgr->mm; - enum drm_mm_insert_mode mode; - unsigned i; + struct amdgpu_vram_mgr_resource *vres; + u64 size, remaining_size, lpfn, fpfn; + struct drm_buddy *mm = &mgr->mm; + struct drm_buddy_block *block; + unsigned long pages_per_block; int r; - lpfn = place->lpfn; + lpfn = place->lpfn << PAGE_SHIFT; if (!lpfn) - lpfn = man->size >> PAGE_SHIFT; + lpfn = man->size; + + fpfn = place->fpfn << PAGE_SHIFT; max_bytes = adev->gmc.mc_vram_size; if (tbo->type != ttm_bo_type_kernel) max_bytes -= AMDGPU_VM_RESERVED_VRAM; - mem_bytes = tbo->base.size; if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { - pages_per_node = ~0ul; - num_nodes = 1; + pages_per_block = ~0ul; } else { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - pages_per_node = HPAGE_PMD_NR; + pages_per_block = HPAGE_PMD_NR; #else /* default to 2MB */ - pages_per_node = 2UL << (20UL - PAGE_SHIFT); + pages_per_block = 2UL << (20UL - PAGE_SHIFT); #endif - pages_per_node = max_t(uint32_t, pages_per_node, - tbo->page_alignment); - num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node); + pages_per_block = max_t(uint32_t, pages_per_block, + tbo->page_alignment); } - node = kvmalloc(struct_size(node, mm_nodes, num_nodes), - GFP_KERNEL | __GFP_ZERO); - if (!node) + vres = kzalloc(sizeof(*vres), GFP_KERNEL); + if (!vres) return -ENOMEM; - ttm_resource_init(tbo, place, &node->base); + ttm_resource_init(tbo, place, &vres->base); /* bail out quickly if there's likely not enough VRAM for this BO */ if (ttm_resource_manager_usage(man) > max_bytes) { @@ -415,66 +401,130 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, goto error_fini; } - mode = DRM_MM_INSERT_BEST; + INIT_LIST_HEAD(&vres->blocks); + if (place->flags & TTM_PL_FLAG_TOPDOWN) - mode = DRM_MM_INSERT_HIGH; + vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; - pages_left = node->base.num_pages; + if (fpfn || lpfn != man->size) + /* Allocate blocks in desired range */ + vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; - /* Limit maximum size to 2GB due to SG table limitations */ - pages = min(pages_left, 2UL << (30 - PAGE_SHIFT)); + remaining_size = vres->base.num_pages << PAGE_SHIFT; - i = 0; - spin_lock(&mgr->lock); - while (pages_left) { - uint32_t alignment = tbo->page_alignment; + mutex_lock(&mgr->lock); + while (remaining_size) { + if (tbo->page_alignment) + min_block_size = tbo->page_alignment << PAGE_SHIFT; + else + min_block_size = mgr->default_page_size; - if (pages >= pages_per_node) - alignment = pages_per_node; + BUG_ON(min_block_size < mm->chunk_size); - r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages, - alignment, 0, place->fpfn, - lpfn, mode); - if (unlikely(r)) { - if (pages > pages_per_node) { - if (is_power_of_2(pages)) - pages = pages / 2; - else - pages = rounddown_pow_of_two(pages); - continue; + /* Limit maximum size to 2GiB due to SG table limitations */ + size = min(remaining_size, 2ULL << 30); + + if (size >= pages_per_block << PAGE_SHIFT) + min_block_size = pages_per_block << PAGE_SHIFT; + + cur_size = size; + + if (fpfn + size != place->lpfn << PAGE_SHIFT) { + /* + * Except for actual range allocation, modify the size and + * min_block_size conforming to continuous flag enablement + */ + if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { + size = roundup_pow_of_two(size); + min_block_size = size; + /* + * Modify the size value if size is not + * aligned with min_block_size + */ + } else if (!IS_ALIGNED(size, min_block_size)) { + size = round_up(size, min_block_size); } - goto error_free; } - vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]); - amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]); - pages_left -= pages; - ++i; + r = drm_buddy_alloc_blocks(mm, fpfn, + lpfn, + size, + min_block_size, + &vres->blocks, + vres->flags); + if (unlikely(r)) + goto error_free_blocks; - if (pages > pages_left) - pages = pages_left; + if (size > remaining_size) + remaining_size = 0; + else + remaining_size -= size; } - spin_unlock(&mgr->lock); + mutex_unlock(&mgr->lock); - if (i == 1) - node->base.placement |= TTM_PL_FLAG_CONTIGUOUS; + if (cur_size != size) { + struct drm_buddy_block *block; + struct list_head *trim_list; + u64 original_size; + LIST_HEAD(temp); + + trim_list = &vres->blocks; + original_size = vres->base.num_pages << PAGE_SHIFT; + + /* + * If size value is rounded up to min_block_size, trim the last + * block to the required size + */ + if (!list_is_singular(&vres->blocks)) { + block = list_last_entry(&vres->blocks, typeof(*block), link); + list_move_tail(&block->link, &temp); + trim_list = &temp; + /* + * Compute the original_size value by subtracting the + * last block size with (aligned size - original size) + */ + original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size); + } + + mutex_lock(&mgr->lock); + drm_buddy_block_trim(mm, + original_size, + trim_list); + mutex_unlock(&mgr->lock); + + if (!list_empty(&temp)) + list_splice_tail(trim_list, &vres->blocks); + } + + list_for_each_entry(block, &vres->blocks, link) + vis_usage += amdgpu_vram_mgr_vis_size(adev, block); + + block = amdgpu_vram_mgr_first_block(&vres->blocks); + if (!block) { + r = -EINVAL; + goto error_fini; + } + + vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT; + + if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks)) + vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS; if (adev->gmc.xgmi.connected_to_cpu) - node->base.bus.caching = ttm_cached; + vres->base.bus.caching = ttm_cached; else - node->base.bus.caching = ttm_write_combined; + vres->base.bus.caching = ttm_write_combined; atomic64_add(vis_usage, &mgr->vis_usage); - *res = &node->base; + *res = &vres->base; return 0; -error_free: - while (i--) - drm_mm_remove_node(&node->mm_nodes[i]); - spin_unlock(&mgr->lock); +error_free_blocks: + drm_buddy_free_list(mm, &vres->blocks); + mutex_unlock(&mgr->lock); error_fini: - ttm_resource_fini(man, &node->base); - kvfree(node); + ttm_resource_fini(man, &vres->base); + kfree(vres); return r; } @@ -490,27 +540,26 @@ error_fini: static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, struct ttm_resource *res) { - struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); + struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); + struct drm_buddy *mm = &mgr->mm; + struct drm_buddy_block *block; uint64_t vis_usage = 0; - unsigned i, pages; - spin_lock(&mgr->lock); - for (i = 0, pages = res->num_pages; pages; - pages -= node->mm_nodes[i].size, ++i) { - struct drm_mm_node *mm = &node->mm_nodes[i]; + mutex_lock(&mgr->lock); + list_for_each_entry(block, &vres->blocks, link) + vis_usage += amdgpu_vram_mgr_vis_size(adev, block); - drm_mm_remove_node(mm); - vis_usage += amdgpu_vram_mgr_vis_size(adev, mm); - } amdgpu_vram_mgr_do_reserve(man); - spin_unlock(&mgr->lock); + + drm_buddy_free_list(mm, &vres->blocks); + mutex_unlock(&mgr->lock); atomic64_sub(vis_usage, &mgr->vis_usage); ttm_resource_fini(man, res); - kvfree(node); + kfree(vres); } /** @@ -542,7 +591,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, if (!*sgt) return -ENOMEM; - /* Determine the number of DRM_MM nodes to export */ + /* Determine the number of DRM_BUDDY blocks to export */ amdgpu_res_first(res, offset, length, &cursor); while (cursor.remaining) { num_entries++; @@ -558,10 +607,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, sg->length = 0; /* - * Walk down DRM_MM nodes to populate scatterlist nodes - * @note: Use iterator api to get first the DRM_MM node + * Walk down DRM_BUDDY blocks to populate scatterlist nodes + * @note: Use iterator api to get first the DRM_BUDDY block * and the number of bytes from it. Access the following - * DRM_MM node(s) if more buffer needs to exported + * DRM_BUDDY block(s) if more buffer needs to exported */ amdgpu_res_first(res, offset, length, &cursor); for_each_sgtable_sg((*sgt), sg, i) { @@ -648,13 +697,22 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); + struct drm_buddy *mm = &mgr->mm; + struct drm_buddy_block *block; drm_printf(printer, " vis usage:%llu\n", amdgpu_vram_mgr_vis_usage(mgr)); - spin_lock(&mgr->lock); - drm_mm_print(&mgr->mm, printer); - spin_unlock(&mgr->lock); + mutex_lock(&mgr->lock); + drm_printf(printer, "default_page_size: %lluKiB\n", + mgr->default_page_size >> 10); + + drm_buddy_print(mm, printer); + + drm_printf(printer, "reserved:\n"); + list_for_each_entry(block, &mgr->reserved_pages, link) + drm_buddy_block_print(mm, block, printer); + mutex_unlock(&mgr->lock); } static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { @@ -674,16 +732,21 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) { struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; struct ttm_resource_manager *man = &mgr->manager; + int err; ttm_resource_manager_init(man, &adev->mman.bdev, adev->gmc.real_vram_size); man->func = &amdgpu_vram_mgr_func; - drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT); - spin_lock_init(&mgr->lock); + err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); + if (err) + return err; + + mutex_init(&mgr->lock); INIT_LIST_HEAD(&mgr->reservations_pending); INIT_LIST_HEAD(&mgr->reserved_pages); + mgr->default_page_size = PAGE_SIZE; ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); ttm_resource_manager_set_used(man, true); @@ -711,16 +774,16 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) if (ret) return; - spin_lock(&mgr->lock); - list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) + mutex_lock(&mgr->lock); + list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) kfree(rsv); - list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) { - drm_mm_remove_node(&rsv->mm_node); + list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) { + drm_buddy_free_list(&mgr->mm, &rsv->blocks); kfree(rsv); } - drm_mm_takedown(&mgr->mm); - spin_unlock(&mgr->lock); + drm_buddy_fini(&mgr->mm); + mutex_unlock(&mgr->lock); ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h new file mode 100644 index 000000000000..9a2db87186c7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: MIT + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_VRAM_MGR_H__ +#define __AMDGPU_VRAM_MGR_H__ + +#include + +struct amdgpu_vram_mgr { + struct ttm_resource_manager manager; + struct drm_buddy mm; + /* protects access to buffer objects */ + struct mutex lock; + struct list_head reservations_pending; + struct list_head reserved_pages; + atomic64_t vis_usage; + u64 default_page_size; +}; + +struct amdgpu_vram_mgr_resource { + struct ttm_resource base; + struct list_head blocks; + unsigned long flags; +}; + +static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block) +{ + return drm_buddy_block_offset(block); +} + +static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block) +{ + return PAGE_SIZE << drm_buddy_block_order(block); +} + +static inline struct drm_buddy_block * +amdgpu_vram_mgr_first_block(struct list_head *list) +{ + return list_first_entry_or_null(list, struct drm_buddy_block, link); +} + +static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head) +{ + struct drm_buddy_block *block; + u64 start, size; + + block = amdgpu_vram_mgr_first_block(head); + if (!block) + return false; + + while (head != block->link.next) { + start = amdgpu_vram_mgr_block_start(block); + size = amdgpu_vram_mgr_block_size(block); + + block = list_entry(block->link.next, struct drm_buddy_block, link); + if (start + size != amdgpu_vram_mgr_block_start(block)) + return false; + } + + return true; +} + +static inline struct amdgpu_vram_mgr_resource * +to_amdgpu_vram_mgr_resource(struct ttm_resource *res) +{ + return container_of(res, struct amdgpu_vram_mgr_resource, base); +} + +#endif From 9362a07a0c5d6e566d614e988bc9c96102774a9d Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Fri, 8 Apr 2022 09:42:05 +0100 Subject: [PATCH 043/219] drm/i915: fix i915_gem_object_wait_moving_fence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All of CI is just failing with the following, which prevents loading of the module: i915 0000:03:00.0: [drm] *ERROR* Scratch setup failed Best guess is that this comes from the pin_map() for the scratch page, which does an i915_gem_object_wait_moving_fence() somewhere. It looks like this now calls into dma_resv_wait_timeout() which can return the remaining timeout, leading to the caller thinking this is an error. v2(Lucas): handle ret == 0 Fixes: 1d7f5e6c5240 ("drm/i915: drop bo->moving dependency") Signed-off-by: Matthew Auld Cc: Christian König Cc: Lucas De Marchi Cc: Daniel Vetter Reviewed-by: Christian König #v1 Link: https://patchwork.freedesktop.org/patch/msgid/20220408084205.1353427-1-matthew.auld@intel.com Signed-off-by: Christian König --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index ffde7bc0a95d..1c103c4dfaf4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -771,9 +771,16 @@ int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj, int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, bool intr) { + long ret; + assert_object_held(obj); - return dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL, - intr, MAX_SCHEDULE_TIMEOUT); + + ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL, + intr, MAX_SCHEDULE_TIMEOUT); + if (!ret) + ret = -ETIME; + + return ret < 0 ? ret : 0; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) From 8514e6b1f40319e31ac4aa3fbf606796786366c9 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 28 Mar 2022 17:36:54 +0200 Subject: [PATCH 044/219] drm/vc4: hvs: Reset muxes at probe time By default, the HVS driver will force the HVS output 3 to be muxed to the HVS channel 2. However, the Transposer can only be assigned to the HVS channel 2, so whenever we try to use the writeback connector, we'll mux its associated output (Output 2) to the channel 2. This leads to both the output 2 and 3 feeding from the same channel, which is explicitly discouraged in the documentation. In order to avoid this, let's reset all the output muxes to their reset value. Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically") Signed-off-by: Maxime Ripard Acked-by: Thomas Zimmermann Link: https://lore.kernel.org/r/20220328153659.2382206-2-maxime@cerno.tech --- drivers/gpu/drm/vc4/vc4_hvs.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 9194cb52e706..2a58fc421cf6 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -611,6 +611,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) struct vc4_hvs *hvs = NULL; int ret; u32 dispctrl; + u32 reg; hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL); if (!hvs) @@ -682,6 +683,26 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) vc4->hvs = hvs; + reg = HVS_READ(SCALER_DISPECTRL); + reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK; + HVS_WRITE(SCALER_DISPECTRL, + reg | VC4_SET_FIELD(0, SCALER_DISPECTRL_DSP2_MUX)); + + reg = HVS_READ(SCALER_DISPCTRL); + reg &= ~SCALER_DISPCTRL_DSP3_MUX_MASK; + HVS_WRITE(SCALER_DISPCTRL, + reg | VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX)); + + reg = HVS_READ(SCALER_DISPEOLN); + reg &= ~SCALER_DISPEOLN_DSP4_MUX_MASK; + HVS_WRITE(SCALER_DISPEOLN, + reg | VC4_SET_FIELD(3, SCALER_DISPEOLN_DSP4_MUX)); + + reg = HVS_READ(SCALER_DISPDITHER); + reg &= ~SCALER_DISPDITHER_DSP5_MUX_MASK; + HVS_WRITE(SCALER_DISPDITHER, + reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX)); + dispctrl = HVS_READ(SCALER_DISPCTRL); dispctrl |= SCALER_DISPCTRL_ENABLE; @@ -689,10 +710,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) SCALER_DISPCTRL_DISPEIRQ(1) | SCALER_DISPCTRL_DISPEIRQ(2); - /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise - * be unused. - */ - dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK; dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ | SCALER_DISPCTRL_SLVWREIRQ | SCALER_DISPCTRL_SLVRDEIRQ | @@ -706,7 +723,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) SCALER_DISPCTRL_DSPEISLUR(1) | SCALER_DISPCTRL_DSPEISLUR(2) | SCALER_DISPCTRL_SCLEIRQ); - dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); HVS_WRITE(SCALER_DISPCTRL, dispctrl); From 234998df929f14d00cbf2f1e81a7facb69fd9266 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 28 Mar 2022 17:36:55 +0200 Subject: [PATCH 045/219] drm/vc4: txp: Don't set TXP_VSTART_AT_EOF The TXP_VSTART_AT_EOF will generate a second VSTART signal to the HVS. However, the HVS waits for VSTART to enable the FIFO and will thus start filling the FIFO before the start of the frame. This leads to corruption at the beginning of the first frame, and content from the previous frame at the beginning of the next frames. Since one VSTART is enough, let's get rid of it. Fixes: 008095e065a8 ("drm/vc4: Add support for the transposer block") Signed-off-by: Maxime Ripard Acked-by: Thomas Zimmermann Link: https://lore.kernel.org/r/20220328153659.2382206-3-maxime@cerno.tech --- drivers/gpu/drm/vc4/vc4_txp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index 9809ca3e2945..ace2d03649ba 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -298,7 +298,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn, if (WARN_ON(i == ARRAY_SIZE(drm_fmts))) return; - ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI | + ctrl = TXP_GO | TXP_EI | VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) | VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT); From 5453343a88ede8b12812fced81ecd24cb888ccc3 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 28 Mar 2022 17:36:56 +0200 Subject: [PATCH 046/219] drm/vc4: txp: Force alpha to be 0xff if it's disabled If we use a format that has padding instead of the alpha component (such as XRGB8888), it appears that the Transposer will fill the padding to 0, disregarding what was stored in the input buffer padding. This leads to issues with IGT, since it will set the padding to 0xff, but will then compare the CRC of the two frames which will thus fail. Another nice side effect is that it is now possible to just use the buffer as ARGB. Fixes: 008095e065a8 ("drm/vc4: Add support for the transposer block") Signed-off-by: Maxime Ripard Acked-by: Thomas Zimmermann Link: https://lore.kernel.org/r/20220328153659.2382206-4-maxime@cerno.tech --- drivers/gpu/drm/vc4/vc4_txp.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index ace2d03649ba..82beb8c159f2 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -304,6 +304,12 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn, if (fb->format->has_alpha) ctrl |= TXP_ALPHA_ENABLE; + else + /* + * If TXP_ALPHA_ENABLE isn't set and TXP_ALPHA_INVERT is, the + * hardware will force the output padding to be 0xff. + */ + ctrl |= TXP_ALPHA_INVERT; gem = drm_fb_cma_get_gem_obj(fb, 0); TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]); From f47d37a91e60990f0ee3006004639487906e3125 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 28 Mar 2022 17:36:57 +0200 Subject: [PATCH 047/219] drm/vc4: kms: Store channel in local variable We use the channel from our vc4_crtc_state structure in multiple places, let's store it in a local variable to make it cleaner. Signed-off-by: Maxime Ripard Acked-by: Thomas Zimmermann Link: https://lore.kernel.org/r/20220328153659.2382206-5-maxime@cerno.tech --- drivers/gpu/drm/vc4/vc4_kms.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 747a2d199eca..0c6a5197d376 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -283,13 +283,14 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, for_each_new_crtc_in_state(state, crtc, crtc_state, i) { struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + unsigned int channel = vc4_state->assigned_channel; if (!vc4_state->update_muxing) continue; switch (vc4_crtc->data->hvs_output) { case 2: - mux = (vc4_state->assigned_channel == 2) ? 0 : 1; + mux = (channel == 2) ? 0 : 1; reg = HVS_READ(SCALER_DISPECTRL); HVS_WRITE(SCALER_DISPECTRL, (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | @@ -297,10 +298,10 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, break; case 3: - if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) + if (channel == VC4_HVS_CHANNEL_DISABLED) mux = 3; else - mux = vc4_state->assigned_channel; + mux = channel; reg = HVS_READ(SCALER_DISPCTRL); HVS_WRITE(SCALER_DISPCTRL, @@ -309,10 +310,10 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, break; case 4: - if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) + if (channel == VC4_HVS_CHANNEL_DISABLED) mux = 3; else - mux = vc4_state->assigned_channel; + mux = channel; reg = HVS_READ(SCALER_DISPEOLN); HVS_WRITE(SCALER_DISPEOLN, @@ -322,10 +323,10 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, break; case 5: - if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) + if (channel == VC4_HVS_CHANNEL_DISABLED) mux = 3; else - mux = vc4_state->assigned_channel; + mux = channel; reg = HVS_READ(SCALER_DISPDITHER); HVS_WRITE(SCALER_DISPDITHER, From 457e5184b9e0e14b26044c7fed5940bcbe7698a7 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 28 Mar 2022 17:36:58 +0200 Subject: [PATCH 048/219] drm/vc4: kms: Warn if we have an incompatible muxing setup The documentation explicitly states we must prevent the output 2 and 3 from feeding from the same HVS channel. Let's add a warning to make some noise if we ever find ourselves in such a case. Signed-off-by: Maxime Ripard Acked-by: Thomas Zimmermann Link: https://lore.kernel.org/r/20220328153659.2382206-6-maxime@cerno.tech --- drivers/gpu/drm/vc4/vc4_kms.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 0c6a5197d376..ffd54aab7759 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -290,6 +290,10 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, switch (vc4_crtc->data->hvs_output) { case 2: + drm_WARN_ON(&vc4->base, + VC4_GET_FIELD(HVS_READ(SCALER_DISPCTRL), + SCALER_DISPCTRL_DSP3_MUX) == channel); + mux = (channel == 2) ? 0 : 1; reg = HVS_READ(SCALER_DISPECTRL); HVS_WRITE(SCALER_DISPECTRL, From 3870b54e0684a17ac95ae7ec8fffbcb6357731ea Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 28 Mar 2022 17:36:59 +0200 Subject: [PATCH 049/219] drm/vc4: kms: Improve logging When debugging, finding out what muxing decisions were made and what the actual core clock rate is is always useful, so let's add some more messages. Signed-off-by: Maxime Ripard Acked-by: Thomas Zimmermann Link: https://lore.kernel.org/r/20220328153659.2382206-7-maxime@cerno.tech --- drivers/gpu/drm/vc4/vc4_kms.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index ffd54aab7759..c169bd72e53b 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -439,6 +439,9 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) * requirements. */ clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate); + + drm_dbg(dev, "Core clock actual rate: %lu Hz\n", + clk_get_rate(hvs->core_clk)); } } @@ -822,9 +825,18 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev, unsigned int matching_channels; unsigned int channel; + drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name); + /* Nothing to do here, let's skip it */ - if (old_crtc_state->enable == new_crtc_state->enable) + if (old_crtc_state->enable == new_crtc_state->enable) { + if (new_crtc_state->enable) + drm_dbg(dev, "%s: Already enabled, reusing channel %d.\n", + crtc->name, new_vc4_crtc_state->assigned_channel); + else + drm_dbg(dev, "%s: Disabled, ignoring.\n", crtc->name); + continue; + } /* Muxing will need to be modified, mark it as such */ new_vc4_crtc_state->update_muxing = true; @@ -832,6 +844,10 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev, /* If we're disabling our CRTC, we put back our channel */ if (!new_crtc_state->enable) { channel = old_vc4_crtc_state->assigned_channel; + + drm_dbg(dev, "%s: Disabling, Freeing channel %d\n", + crtc->name, channel); + hvs_new_state->fifo_state[channel].in_use = false; new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; continue; @@ -866,6 +882,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev, return -EINVAL; channel = ffs(matching_channels) - 1; + + drm_dbg(dev, "Assigned HVS channel %d to CRTC %s\n", channel, crtc->name); new_vc4_crtc_state->assigned_channel = channel; unassigned_channels &= ~BIT(channel); hvs_new_state->fifo_state[channel].in_use = true; From 5f77876013d08fe9d43bb4b7f9f7a81e4d3b63a9 Mon Sep 17 00:00:00 2001 From: Arunpravin Paneer Selvam Date: Mon, 11 Apr 2022 13:08:34 +0530 Subject: [PATCH 050/219] drm: add a check to verify the size alignment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a simple check to reject any size not aligned to the min_page_size. when size is not aligned to min_page_size, driver module should handle in their own way either to round_up() the size value to min_page_size or just to enable WARN_ON(). If we dont handle the alignment properly, we may hit the following bug, Unigine Heaven has allocation requests for example required pages are 257 and alignment request is 256. To allocate the left over 1 page, continues the iteration to find the order value which is 0 and when it compares with min_order = 8, triggers the BUG_ON(order < min_order). v2: add more commit description v3: remove WARN_ON() Signed-off-by: Arunpravin Paneer Selvam Suggested-by: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20220411073834.15210-1-Arunpravin.PaneerSelvam@amd.com Signed-off-by: Christian König --- drivers/gpu/drm/drm_buddy.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index 72f52f293249..11bb59399471 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -665,6 +665,9 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, if (start + size == end) return __drm_buddy_alloc_range(mm, start, size, blocks); + if (!IS_ALIGNED(size, min_page_size)) + return -EINVAL; + pages = size >> ilog2(mm->chunk_size); order = fls(pages) - 1; min_order = ilog2(min_page_size) - ilog2(mm->chunk_size); From d72dcbe9fce505228dae43bef9da8f2b707d1b3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 11 Apr 2022 15:21:59 +0200 Subject: [PATCH 051/219] drm/ttm: fix logic inversion in ttm_eu_reserve_buffers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That should have been max, not min. Signed-off-by: Christian König Fixes: c8d4c18bfbc4 ("dma-buf/drivers: make reserving a shared slot mandatory v4") Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20220411134537.2854-1-christian.koenig@amd.com --- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 0eb995d25df1..dbee34a058df 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -101,7 +101,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, continue; } - num_fences = min(entry->num_shared, 1u); + num_fences = max(entry->num_shared, 1u); if (!ret) { ret = dma_resv_reserve_fences(bo->base.resv, num_fences); From 6de79dd3a920a138a292231e2da82ab8e019ec99 Mon Sep 17 00:00:00 2001 From: "H. Nikolaus Schaller" Date: Thu, 7 Apr 2022 13:16:09 +0200 Subject: [PATCH 052/219] drm/bridge: display-connector: add ddc-en gpio support "hdmi-connector.yaml" bindings defines an optional property "ddc-en-gpios" for a single gpio to enable DDC operation. Usually this controls +5V power on the HDMI connector. This +5V may also be needed for HPD. This was not reflected in code but is needed to make the CI20 board work. Now, the driver activates the ddc gpio after probe and deactivates after remove so it is "almost on". But only if this driver is loaded (and not e.g. blacklisted as module). Signed-off-by: H. Nikolaus Schaller Reviewed-by: Laurent Pinchart Reviewed-by: Neil Armstrong Signed-off-by: Paul Cercueil Link: https://patchwork.freedesktop.org/patch/msgid/3607e924b7c0cf0be956c0d49894be1442dbda41.1649330171.git.hns@goldelico.com --- drivers/gpu/drm/bridge/display-connector.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index d24f5b90feab..e4d52a7e31b7 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -24,6 +24,7 @@ struct display_connector { int hpd_irq; struct regulator *dp_pwr; + struct gpio_desc *ddc_en; }; static inline struct display_connector * @@ -345,6 +346,17 @@ static int display_connector_probe(struct platform_device *pdev) } } + /* enable DDC */ + if (type == DRM_MODE_CONNECTOR_HDMIA) { + conn->ddc_en = devm_gpiod_get_optional(&pdev->dev, "ddc-en", + GPIOD_OUT_HIGH); + + if (IS_ERR(conn->ddc_en)) { + dev_err(&pdev->dev, "Couldn't get ddc-en gpio\n"); + return PTR_ERR(conn->ddc_en); + } + } + conn->bridge.funcs = &display_connector_bridge_funcs; conn->bridge.of_node = pdev->dev.of_node; @@ -373,6 +385,9 @@ static int display_connector_remove(struct platform_device *pdev) { struct display_connector *conn = platform_get_drvdata(pdev); + if (conn->ddc_en) + gpiod_set_value(conn->ddc_en, 0); + if (conn->dp_pwr) regulator_disable(conn->dp_pwr); From 33e799ed905b84a53b9efa71c6f2b62cbbca40dd Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Thu, 7 Apr 2022 13:16:10 +0200 Subject: [PATCH 053/219] drm/ingenic: Implement proper .atomic_get_input_bus_fmts The .atomic_get_input_bus_fmts() callback of our top bridge should return the possible input formats for a given output format. If the requested output format is not supported, then NULL should be returned, otherwise the bus format negociation will end with a bus format that the encoder does not support. Signed-off-by: H. Nikolaus Schaller Reviewed-by: Neil Armstrong Signed-off-by: Paul Cercueil Link: https://patchwork.freedesktop.org/patch/msgid/ab25925723cff2f3e773e7137567ef86fff5fdba.1649330171.git.hns@goldelico.com --- drivers/gpu/drm/ingenic/ingenic-drm-drv.c | 28 ++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index a4f5a323f490..8eb0ad501a7b 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -833,6 +833,32 @@ static int ingenic_drm_bridge_atomic_check(struct drm_bridge *bridge, } } +static u32 * +ingenic_drm_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + switch (output_fmt) { + case MEDIA_BUS_FMT_RGB888_1X24: + case MEDIA_BUS_FMT_RGB666_1X18: + case MEDIA_BUS_FMT_RGB565_1X16: + case MEDIA_BUS_FMT_RGB888_3X8: + case MEDIA_BUS_FMT_RGB888_3X8_DELTA: + break; + default: + *num_input_fmts = 0; + return NULL; + } + + return drm_atomic_helper_bridge_propagate_bus_fmt(bridge, bridge_state, + crtc_state, conn_state, + output_fmt, + num_input_fmts); +} + static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg) { struct ingenic_drm *priv = drm_device_get_priv(arg); @@ -984,7 +1010,7 @@ static const struct drm_bridge_funcs ingenic_drm_bridge_funcs = { .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, - .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt, + .atomic_get_input_bus_fmts = ingenic_drm_bridge_atomic_get_input_bus_fmts, }; static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = { From 71f56b27550986d0e53988ed18db386c106bab55 Mon Sep 17 00:00:00 2001 From: Paul Boddie Date: Thu, 7 Apr 2022 13:16:11 +0200 Subject: [PATCH 054/219] drm/ingenic: Add dw-hdmi driver specialization for jz4780 A specialisation of the generic Synopsys HDMI driver is employed for JZ4780 HDMI support. This requires a new driver, plus device tree and configuration modifications. Here we add Kconfig DRM_INGENIC_DW_HDMI, Makefile and driver code. Note that there is no hpd-gpio installed on the CI20 board HDMI connector. Hence there is no hpd detection by the connector driver and we have to enable polling in the dw-hdmi core driver. For that we need to set .poll_enabled but that struct component can only be accessed by core code. Hence we use the public setter function drm_kms_helper_hotplug_event() introduced before. Also note that we disable Color Space Conversion since it is not working on jz4780. Signed-off-by: Paul Boddie Signed-off-by: Ezequiel Garcia Signed-off-by: H. Nikolaus Schaller Reviewed-by: Neil Armstrong Signed-off-by: Paul Cercueil Link: https://patchwork.freedesktop.org/patch/msgid/e5cdf9cd44bde52cce379cc830f2d6117ea15c32.1649330171.git.hns@goldelico.com --- drivers/gpu/drm/ingenic/Kconfig | 9 ++ drivers/gpu/drm/ingenic/Makefile | 1 + drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c | 103 ++++++++++++++++++++++ 3 files changed, 113 insertions(+) create mode 100644 drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig index 001f59fb06d5..090830bcbde7 100644 --- a/drivers/gpu/drm/ingenic/Kconfig +++ b/drivers/gpu/drm/ingenic/Kconfig @@ -24,4 +24,13 @@ config DRM_INGENIC_IPU The Image Processing Unit (IPU) will appear as a second primary plane. +config DRM_INGENIC_DW_HDMI + tristate "Ingenic specific support for Synopsys DW HDMI" + depends on MACH_JZ4780 + select DRM_DW_HDMI + help + Choose this option to enable Synopsys DesignWare HDMI based driver. + If you want to enable HDMI on Ingenic JZ4780 based SoC, you should + select this option. + endif diff --git a/drivers/gpu/drm/ingenic/Makefile b/drivers/gpu/drm/ingenic/Makefile index d313326bdddb..f10cc1c5a5f2 100644 --- a/drivers/gpu/drm/ingenic/Makefile +++ b/drivers/gpu/drm/ingenic/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_DRM_INGENIC) += ingenic-drm.o ingenic-drm-y = ingenic-drm-drv.o ingenic-drm-$(CONFIG_DRM_INGENIC_IPU) += ingenic-ipu.o +obj-$(CONFIG_DRM_INGENIC_DW_HDMI) += ingenic-dw-hdmi.o diff --git a/drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c b/drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c new file mode 100644 index 000000000000..72f8b44998a5 --- /dev/null +++ b/drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2011-2013 Freescale Semiconductor, Inc. + * Copyright (C) 2019, 2020 Paul Boddie + * + * Derived from dw_hdmi-imx.c with i.MX portions removed. + */ + +#include +#include +#include + +#include +#include +#include + +static const struct dw_hdmi_mpll_config ingenic_mpll_cfg[] = { + { 45250000, { { 0x01e0, 0x0000 }, { 0x21e1, 0x0000 }, { 0x41e2, 0x0000 } } }, + { 92500000, { { 0x0140, 0x0005 }, { 0x2141, 0x0005 }, { 0x4142, 0x0005 } } }, + { 148500000, { { 0x00a0, 0x000a }, { 0x20a1, 0x000a }, { 0x40a2, 0x000a } } }, + { 216000000, { { 0x00a0, 0x000a }, { 0x2001, 0x000f }, { 0x4002, 0x000f } } }, + { ~0UL, { { 0x0000, 0x0000 }, { 0x0000, 0x0000 }, { 0x0000, 0x0000 } } } +}; + +static const struct dw_hdmi_curr_ctrl ingenic_cur_ctr[] = { + /*pixelclk bpp8 bpp10 bpp12 */ + { 54000000, { 0x091c, 0x091c, 0x06dc } }, + { 58400000, { 0x091c, 0x06dc, 0x06dc } }, + { 72000000, { 0x06dc, 0x06dc, 0x091c } }, + { 74250000, { 0x06dc, 0x0b5c, 0x091c } }, + { 118800000, { 0x091c, 0x091c, 0x06dc } }, + { 216000000, { 0x06dc, 0x0b5c, 0x091c } }, + { ~0UL, { 0x0000, 0x0000, 0x0000 } }, +}; + +/* + * Resistance term 133Ohm Cfg + * PREEMP config 0.00 + * TX/CK level 10 + */ +static const struct dw_hdmi_phy_config ingenic_phy_config[] = { + /*pixelclk symbol term vlev */ + { 216000000, 0x800d, 0x0005, 0x01ad}, + { ~0UL, 0x0000, 0x0000, 0x0000} +}; + +static enum drm_mode_status +ingenic_dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + if (mode->clock < 13500) + return MODE_CLOCK_LOW; + /* FIXME: Hardware is capable of 270MHz, but setup data is missing. */ + if (mode->clock > 216000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static struct dw_hdmi_plat_data ingenic_dw_hdmi_plat_data = { + .mpll_cfg = ingenic_mpll_cfg, + .cur_ctr = ingenic_cur_ctr, + .phy_config = ingenic_phy_config, + .mode_valid = ingenic_dw_hdmi_mode_valid, + .output_port = 1, +}; + +static const struct of_device_id ingenic_dw_hdmi_dt_ids[] = { + { .compatible = "ingenic,jz4780-dw-hdmi" }, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, ingenic_dw_hdmi_dt_ids); + +static void ingenic_dw_hdmi_cleanup(void *data) +{ + struct dw_hdmi *hdmi = (struct dw_hdmi *)data; + + dw_hdmi_remove(hdmi); +} + +static int ingenic_dw_hdmi_probe(struct platform_device *pdev) +{ + struct dw_hdmi *hdmi; + + hdmi = dw_hdmi_probe(pdev, &ingenic_dw_hdmi_plat_data); + if (IS_ERR(hdmi)) + return PTR_ERR(hdmi); + + return devm_add_action_or_reset(&pdev->dev, ingenic_dw_hdmi_cleanup, hdmi); +} + +static struct platform_driver ingenic_dw_hdmi_driver = { + .probe = ingenic_dw_hdmi_probe, + .driver = { + .name = "dw-hdmi-ingenic", + .of_match_table = ingenic_dw_hdmi_dt_ids, + }, +}; +module_platform_driver(ingenic_dw_hdmi_driver); + +MODULE_DESCRIPTION("JZ4780 Specific DW-HDMI Driver Extension"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:dw-hdmi-ingenic"); From f5e284bb74ab296f98122673c7ecd22028b2c200 Mon Sep 17 00:00:00 2001 From: Zhou Qingyang Date: Wed, 1 Dec 2021 11:37:03 +0800 Subject: [PATCH 055/219] drm/komeda: Fix an undefined behavior bug in komeda_plane_add() In komeda_plane_add(), komeda_get_layer_fourcc_list() is assigned to formats and used in drm_universal_plane_init(). drm_universal_plane_init() passes formats to __drm_universal_plane_init(). __drm_universal_plane_init() further passes formats to memcpy() as src parameter, which could lead to an undefined behavior bug on failure of komeda_get_layer_fourcc_list(). Fix this bug by adding a check of formats. This bug was found by a static analyzer. The analysis employs differential checking to identify inconsistent security operations (e.g., checks or kfrees) between two code paths and confirms that the inconsistent operations are not recovered in the current function or the callers, so they constitute bugs. Note that, as a bug found by static analysis, it can be a false positive or hard to trigger. Multiple researchers have cross-reviewed the bug. Builds with CONFIG_DRM_KOMEDA=m show no new warnings, and our static analyzer no longer warns about this code. Fixes: 61f1c4a8ab75 ("drm/komeda: Attach komeda_dev to DRM-KMS") Signed-off-by: Zhou Qingyang Signed-off-by: Liviu Dudau Link: https://lore.kernel.org/dri-devel/20211201033704.32054-1-zhou1615@umn.edu --- drivers/gpu/drm/arm/display/komeda/komeda_plane.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c index 541949f2d44a..9a8197a23c45 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c @@ -256,6 +256,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms, formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl, layer->layer_type, &n_formats); + if (!formats) { + kfree(kplane); + return -ENOMEM; + } err = drm_universal_plane_init(&kms->base, plane, get_possible_crtcs(kms, c->pipeline), From c8f76c37cc3668ee45e081e76a15f24a352ebbdd Mon Sep 17 00:00:00 2001 From: Liviu Dudau Date: Thu, 2 Dec 2021 17:00:33 +0000 Subject: [PATCH 056/219] drm/komeda: return early if drm_universal_plane_init() fails. If drm_universal_plane_init() fails early we jump to the common cleanup code that calls komeda_plane_destroy() which in turn could access the uninitalised drm_plane and crash. Return early if an error is detected without going through the common code. Reported-by: Steven Price Reviewed-by: Steven Price Signed-off-by: Liviu Dudau Link: https://lore.kernel.org/dri-devel/20211203100946.2706922-1-liviu.dudau@arm.com --- drivers/gpu/drm/arm/display/komeda/komeda_plane.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c index 9a8197a23c45..e0b9f7063b20 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c @@ -270,8 +270,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms, komeda_put_fourcc_list(formats); - if (err) - goto cleanup; + if (err) { + kfree(kplane); + return err; + } drm_plane_helper_add(plane, &komeda_plane_helper_funcs); From b5c7d19736a17115f0c3489a2ebfa5c5181902b9 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 5 Apr 2022 15:11:18 +0100 Subject: [PATCH 057/219] drm/arm/malidp: Stop using iommu_present() iommu_get_domain_for_dev() is already perfectly happy to return NULL if the given device has no IOMMU. Drop the unnecessary check. Signed-off-by: Robin Murphy Signed-off-by: Liviu Dudau Link: https://patchwork.freedesktop.org/patch/msgid/5049994e6c2ba92c2f30d51850c8929136d0f8ca.1649167878.git.robin.murphy@arm.com --- drivers/gpu/drm/arm/malidp_planes.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 0562bdaac00c..81d9f5004025 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -310,17 +310,13 @@ static int malidp_se_check_scaling(struct malidp_plane *mp, static u32 malidp_get_pgsize_bitmap(struct malidp_plane *mp) { - u32 pgsize_bitmap = 0; + struct iommu_domain *mmu_dom; - if (iommu_present(&platform_bus_type)) { - struct iommu_domain *mmu_dom = - iommu_get_domain_for_dev(mp->base.dev->dev); + mmu_dom = iommu_get_domain_for_dev(mp->base.dev->dev); + if (mmu_dom) + return mmu_dom->pgsize_bitmap; - if (mmu_dom) - pgsize_bitmap = mmu_dom->pgsize_bitmap; - } - - return pgsize_bitmap; + return 0; } /* From 73c3ed7495c67b8fbdc31cf58e6ca8757df31a33 Mon Sep 17 00:00:00 2001 From: Jiasheng Jiang Date: Tue, 14 Dec 2021 18:08:37 +0800 Subject: [PATCH 058/219] drm: mali-dp: potential dereference of null pointer The return value of kzalloc() needs to be checked. To avoid use of null pointer '&state->base' in case of the failure of alloc. Fixes: 99665d072183 ("drm: mali-dp: add malidp_crtc_state struct") Signed-off-by: Jiasheng Jiang Reviewed-by: Brian Starkey Signed-off-by: Liviu Dudau Link: https://patchwork.freedesktop.org/patch/msgid/20211214100837.46912-1-jiasheng@iscas.ac.cn --- drivers/gpu/drm/arm/malidp_crtc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index 494075ddbef6..b5928b52e279 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -487,7 +487,10 @@ static void malidp_crtc_reset(struct drm_crtc *crtc) if (crtc->state) malidp_crtc_destroy_state(crtc, crtc->state); - __drm_atomic_helper_crtc_reset(crtc, &state->base); + if (state) + __drm_atomic_helper_crtc_reset(crtc, &state->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); } static int malidp_crtc_enable_vblank(struct drm_crtc *crtc) From 563c4a7599d8ac0f6826b33f312d886eda7938aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 18 Feb 2022 12:04:02 +0200 Subject: [PATCH 059/219] drm: Use drm_mode_init() for on-stack modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initialize on-stack modes with drm_mode_init() to guarantee no stack garbage in the list head, or that we aren't copying over another mode's list head. Based on the following cocci script, with manual fixups: @decl@ identifier M; expression E; @@ - struct drm_display_mode M = E; + struct drm_display_mode M; @@ identifier decl.M; expression decl.E; statement S, S1; @@ struct drm_display_mode M; ... when != S + drm_mode_init(&M, &E); + S1 @@ expression decl.E; @@ - &*E + E Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220218100403.7028-22-ville.syrjala@linux.intel.com Reviewed-by: Andrzej Hajda --- drivers/gpu/drm/drm_crtc_helper.c | 8 ++++---- drivers/gpu/drm/drm_edid.c | 8 ++++++-- drivers/gpu/drm/drm_modes.c | 4 +++- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index bff917531f33..a34aa009725f 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -297,8 +297,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, return false; } - saved_mode = crtc->mode; - saved_hwmode = crtc->hwmode; + drm_mode_init(&saved_mode, &crtc->mode); + drm_mode_init(&saved_hwmode, &crtc->hwmode); saved_x = crtc->x; saved_y = crtc->y; @@ -411,8 +411,8 @@ done: drm_mode_destroy(dev, adjusted_mode); if (!ret) { crtc->enabled = saved_enabled; - crtc->mode = saved_mode; - crtc->hwmode = saved_hwmode; + drm_mode_copy(&crtc->mode, &saved_mode); + drm_mode_copy(&crtc->hwmode, &saved_hwmode); crtc->x = saved_x; crtc->y = saved_y; } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 04e818ecd662..dbca588474c3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3551,9 +3551,11 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m match_flags |= DRM_MODE_MATCH_ASPECT_RATIO; for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) { - struct drm_display_mode cea_mode = *cea_mode_for_vic(vic); + struct drm_display_mode cea_mode; unsigned int clock1, clock2; + drm_mode_init(&cea_mode, cea_mode_for_vic(vic)); + /* Check both 60Hz and 59.94Hz */ clock1 = cea_mode.clock; clock2 = cea_mode_alternate_clock(&cea_mode); @@ -3590,9 +3592,11 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match) match_flags |= DRM_MODE_MATCH_ASPECT_RATIO; for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) { - struct drm_display_mode cea_mode = *cea_mode_for_vic(vic); + struct drm_display_mode cea_mode; unsigned int clock1, clock2; + drm_mode_init(&cea_mode, cea_mode_for_vic(vic)); + /* Check both 60Hz and 59.94Hz */ clock1 = cea_mode.clock; clock2 = cea_mode_alternate_clock(&cea_mode); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 6e7e10f16ec0..e699950cc34d 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -837,7 +837,9 @@ EXPORT_SYMBOL(drm_mode_vrefresh); void drm_mode_get_hv_timing(const struct drm_display_mode *mode, int *hdisplay, int *vdisplay) { - struct drm_display_mode adjusted = *mode; + struct drm_display_mode adjusted; + + drm_mode_init(&adjusted, mode); drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY); *hdisplay = adjusted.crtc_hdisplay; From a3342f4d5af16888e385980a4bea4cc195c36bc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 18 Feb 2022 12:04:03 +0200 Subject: [PATCH 060/219] drm: Use drm_mode_copy() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit struct drm_display_mode embeds a list head, so overwriting the full struct with another one will corrupt the list (if the destination mode is on a list). Use drm_mode_copy() instead which explicitly preserves the list head of the destination mode. Even if we know the destination mode is not on any list using drm_mode_copy() seems decent as it sets a good example. Bad examples of not using it might eventually get copied into code where preserving the list head actually matters. Obviously one case not covered here is when the mode itself is embedded in a larger structure and the whole structure is copied. But if we are careful when copying into modes embedded in structures I think we can be a little more reassured that bogus list heads haven't been propagated in. @is_mode_copy@ @@ drm_mode_copy(...) { ... } @depends on !is_mode_copy@ struct drm_display_mode *mode; expression E, S; @@ ( - *mode = E + drm_mode_copy(mode, &E) | - memcpy(mode, E, S) + drm_mode_copy(mode, E) ) @depends on !is_mode_copy@ struct drm_display_mode mode; expression E; @@ ( - mode = E + drm_mode_copy(&mode, &E) | - memcpy(&mode, E, S) + drm_mode_copy(&mode, E) ) @@ struct drm_display_mode *mode; @@ - &*mode + mode Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220218100403.7028-23-ville.syrjala@linux.intel.com Reviewed-by: Andrzej Hajda --- drivers/gpu/drm/drm_crtc_helper.c | 4 ++-- drivers/gpu/drm/drm_vblank.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a34aa009725f..b632825654a9 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -305,7 +305,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, /* Update crtc values up front so the driver can rely on them for mode * setting. */ - crtc->mode = *mode; + drm_mode_copy(&crtc->mode, mode); crtc->x = x; crtc->y = y; @@ -341,7 +341,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, } DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); - crtc->hwmode = *adjusted_mode; + drm_mode_copy(&crtc->hwmode, adjusted_mode); /* Prepare the encoders and CRTCs before setting the mode. */ drm_for_each_encoder(encoder, dev) { diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index b701cda86d0c..2ff31717a3de 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -644,7 +644,7 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc, vblank->linedur_ns = linedur_ns; vblank->framedur_ns = framedur_ns; - vblank->hwmode = *mode; + drm_mode_copy(&vblank->hwmode, mode); drm_dbg_core(dev, "crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n", From c63462184b35316fed7657d3c92dc5099ad5fab2 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 11 Apr 2022 09:56:03 +0100 Subject: [PATCH 061/219] drm/ttm: stop passing NULL fence in ttm_bo_move_sync_cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we hit the sync case, like when skipping clearing for kernel internal objects, or when falling back to cpu clearing, like in i915, we end up trying to add a NULL fence, but with some recent changes in this area this now just results in NULL deref in dma_resv_add_fence: <1>[ 5.466383] BUG: kernel NULL pointer dereference, address: 0000000000000008 <1>[ 5.466384] #PF: supervisor read access in kernel mode <1>[ 5.466385] #PF: error_code(0x0000) - not-present page <6>[ 5.466386] PGD 0 P4D 0 <4>[ 5.466387] Oops: 0000 [#1] PREEMPT SMP NOPTI <4>[ 5.466389] CPU: 5 PID: 267 Comm: modprobe Not tainted 5.18.0-rc2-CI-CI_DRM_11481+ #1 <4>[ 5.466391] RIP: 0010:dma_resv_add_fence+0x63/0x260 <4>[ 5.466395] Code: 38 85 c0 0f 84 df 01 00 00 0f 88 e8 01 00 00 83 c0 01 0f 88 df 01 00 00 8b 05 35 89 10 01 49 8d 5e 68 85 c0 0f 85 45 01 00 00 <48> 8b 45 08 48 3d c0 a5 0a 82 0f 84 5c 01 00 00 48 3d 60 a5 0a 82 <4>[ 5.466396] RSP: 0018:ffffc90000e974f8 EFLAGS: 00010202 <4>[ 5.466397] RAX: 0000000000000001 RBX: ffff888123e88b28 RCX: 00000000ffffffff <4>[ 5.466398] RDX: 0000000000000001 RSI: ffffffff822e4f50 RDI: ffffffff8233f087 <4>[ 5.466399] RBP: 0000000000000000 R08: ffff8881313dbc80 R09: 0000000000000001 <4>[ 5.466399] R10: 0000000000000001 R11: 00000000da354294 R12: 0000000000000000 <4>[ 5.466400] R13: ffff88810927dc58 R14: ffff888123e88ac0 R15: ffff88810a88d600 <4>[ 5.466401] FS: 00007f5fa1193540(0000) GS:ffff88845d880000(0000) knlGS:0000000000000000 <4>[ 5.466402] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4>[ 5.466402] CR2: 0000000000000008 CR3: 0000000106dd6003 CR4: 00000000003706e0 <4>[ 5.466403] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 <4>[ 5.466404] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 <4>[ 5.466404] Call Trace: <4>[ 5.466405] <4>[ 5.466406] ttm_bo_move_accel_cleanup+0x62/0x270 [ttm] <4>[ 5.466411] ? i915_rsgt_from_buddy_resource+0x185/0x1e0 [i915] <4>[ 5.466529] i915_ttm_move+0xfd/0x430 [i915] <4>[ 5.466833] ? dma_resv_reserve_fences+0x4e/0x320 <4>[ 5.466836] ? ttm_bo_add_move_fence.constprop.20+0xf7/0x140 [ttm] <4>[ 5.466841] ttm_bo_handle_move_mem+0xa1/0x140 [ttm] <4>[ 5.466845] ttm_bo_validate+0xee/0x160 [ttm] <4>[ 5.466849] __i915_ttm_get_pages+0x4f/0x210 [i915] <4>[ 5.466976] i915_ttm_get_pages+0xad/0x140 [i915] <4>[ 5.467094] ____i915_gem_object_get_pages+0x32/0xf0 [i915] <4>[ 5.467210] __i915_gem_object_get_pages+0x89/0xa0 [i915] <4>[ 5.467323] i915_vma_get_pages+0x114/0x1d0 [i915] <4>[ 5.467446] i915_vma_pin_ww+0xd3/0xa90 [i915] <4>[ 5.467570] i915_vma_pin.constprop.10+0x119/0x1b0 [i915] <4>[ 5.467700] ? __mutex_unlock_slowpath+0x3e/0x2b0 <4>[ 5.467704] intel_alloc_initial_plane_obj.isra.6+0x1a9/0x390 [i915] <4>[ 5.467833] intel_crtc_initial_plane_config+0x83/0x340 [i915] In the ttm_bo_move_sync_cleanup() case it seems we only really care about calling ttm_bo_wait_free_node(), so let's instead just call that directly. Signed-off-by: Matthew Auld Cc: Thomas Hellström Cc: Christian König Cc: Lucas De Marchi Cc: Nirmoy Das Reviewed-by: Christian König Signed-off-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20220411085603.58156-1-matthew.auld@intel.com --- drivers/gpu/drm/ttm/ttm_bo_util.c | 15 +++++++++++++++ include/drm/ttm/ttm_bo_driver.h | 11 +++-------- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index bc5190340b9c..1cbfb00c1d65 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -572,6 +572,21 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); +void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem) +{ + struct ttm_device *bdev = bo->bdev; + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + int ret; + + ret = ttm_bo_wait_free_node(bo, man->use_tt); + if (WARN_ON(ret)) + return; + + ttm_bo_assign_mem(bo, new_mem); +} +EXPORT_SYMBOL(ttm_bo_move_sync_cleanup); + /** * ttm_bo_pipeline_gutting - purge the contents of a bo * @bo: The buffer object diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 059a595e14e5..897b88f0bd59 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -245,7 +245,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_resource *new_mem); /** - * ttm_bo_move_accel_cleanup. + * ttm_bo_move_sync_cleanup. * * @bo: A pointer to a struct ttm_buffer_object. * @new_mem: struct ttm_resource indicating where to move. @@ -253,13 +253,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed * by the caller to be idle. Typically used after memcpy buffer moves. */ -static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) -{ - int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem); - - WARN_ON(ret); -} +void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem); /** * ttm_bo_pipeline_gutting. From 62c6f4f9bb08a8b18fc6e12d865284991ec3776a Mon Sep 17 00:00:00 2001 From: Changcheng Deng Date: Wed, 9 Feb 2022 08:48:10 +0000 Subject: [PATCH 062/219] fbcon: use min() to make code cleaner Use min() in order to make code cleaner. Reported-by: Zeal Robot Signed-off-by: Changcheng Deng Reviewed-by: Sam Ravnborg Signed-off-by: Helge Deller Link: https://patchwork.freedesktop.org/patch/msgid/20220209084810.1561184-1-deng.changcheng@zte.com.cn --- drivers/video/fbdev/core/fbcon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 6a7d470beec7..c56dd2f73f79 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -578,7 +578,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, save = kmalloc(array3_size(logo_lines, new_cols, 2), GFP_KERNEL); if (save) { - int i = cols < new_cols ? cols : new_cols; + int i = min(cols, new_cols); scr_memsetw(save, erase, array3_size(logo_lines, new_cols, 2)); r = q - step; for (cnt = 0; cnt < logo_lines; cnt++, r += i) From 7be2bb8c2ff3f769c49f5a527e7ea992b81e9ca1 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Wed, 13 Apr 2022 09:21:33 +0100 Subject: [PATCH 063/219] drm/ttm: fixup ttm_bo_add_move_fence v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It looks like we still need to call dma_fence_put() on the man->move, otherwise we just end up leaking it, leading to fireworks later. v2(Daniel): - Simplify the function tail Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5689 Fixes: 8bb31587820a ("drm/ttm: remove bo->moving") Cc: Christian König Cc: Daniel Vetter Signed-off-by: Matthew Auld Reviewed-by: Daniel Vetter Reviewed-by: Christian König Signed-off-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20220413082133.272445-1-matthew.auld@intel.com --- drivers/gpu/drm/ttm/ttm_bo.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 015a94f766de..75d308ec173d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -740,11 +740,8 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); ret = dma_resv_reserve_fences(bo->base.resv, 1); - if (unlikely(ret)) { - dma_fence_put(fence); - return ret; - } - return 0; + dma_fence_put(fence); + return ret; } /* From 84ab41db1734c7764a4f29a99d3106586495638c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 13 Apr 2022 11:11:36 +0200 Subject: [PATCH 064/219] drm/ttm: fix kerneldoc for ttm_lru_bulk_move MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update the kerneldoc for the members as well. Signed-off-by: Christian König Fixes: b0e2c9ea5afc ("drm/ttm: allow bulk moves for all domains") Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220413091242.638413-1-christian.koenig@amd.com --- include/drm/ttm/ttm_resource.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 314f73de4280..f82fee18c07f 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -215,8 +215,7 @@ struct ttm_lru_bulk_move_pos { /** * struct ttm_lru_bulk_move * - * @tt: first/last lru entry for resources in the TT domain - * @vram: first/last lru entry for resources in the VRAM domain + * @pos: first/last lru entry for resources in the each domain/priority * * Container for the current bulk move state. Should be used with * ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move(). From fb47723aba6fb62a6dce34eb108c886d9d469f9c Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Sun, 3 Apr 2022 19:10:04 +0200 Subject: [PATCH 065/219] drm: bridge: icn6211: Convert to regmap To make debugging easier, convert driver to regmap. Implement read and write regmap tables for known registers, keep all known register readable and mark those which are obviously read-only as not writeable. Use common I2C regmap for the I2C configuration, implement custom regmap bus for DSI configuration. The later is mandatory as this chip requires one extra byte set to read access length between register address and data. Signed-off-by: Marek Vasut Cc: Jagan Teki Cc: Mark Brown Cc: Maxime Ripard Cc: Robert Foss Cc: Sam Ravnborg Cc: Thomas Zimmermann To: dri-devel@lists.freedesktop.org Acked-by: Mark Brown Link: https://patchwork.freedesktop.org/patch/msgid/20220403171004.368464-1-marex@denx.de --- drivers/gpu/drm/bridge/Kconfig | 1 + drivers/gpu/drm/bridge/chipone-icn6211.c | 96 ++++++++++++++++++++++-- 2 files changed, 89 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 658837caaf39..7ac920a57d26 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -32,6 +32,7 @@ config DRM_CHIPONE_ICN6211 select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL_BRIDGE + select REGMAP_I2C help ICN6211 is MIPI-DSI/RGB Converter bridge from chipone. diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c index b72aa3f3b5c2..6ce0633d4c08 100644 --- a/drivers/gpu/drm/bridge/chipone-icn6211.c +++ b/drivers/gpu/drm/bridge/chipone-icn6211.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #define VENDOR_ID 0x00 @@ -134,6 +135,7 @@ struct chipone { struct device *dev; + struct regmap *regmap; struct i2c_client *client; struct drm_bridge bridge; struct drm_display_mode mode; @@ -146,6 +148,77 @@ struct chipone { bool interface_i2c; }; +static const struct regmap_range chipone_dsi_readable_ranges[] = { + regmap_reg_range(VENDOR_ID, VERSION_ID), + regmap_reg_range(FIRMWARE_VERSION, PLL_SSC_OFFSET(3)), + regmap_reg_range(GPIO_OEN, MIPI_ULPS_CTRL), + regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE), + regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H), + regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE), + regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY_(5)), + regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM), + regmap_reg_range(MIPI_DBG_SET_(0), MIPI_DBG_SET_(9)), + regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS_(1)), +}; + +static const struct regmap_access_table chipone_dsi_readable_table = { + .yes_ranges = chipone_dsi_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(chipone_dsi_readable_ranges), +}; + +static const struct regmap_range chipone_dsi_writeable_ranges[] = { + regmap_reg_range(CONFIG_FINISH, PLL_SSC_OFFSET(3)), + regmap_reg_range(GPIO_OEN, MIPI_ULPS_CTRL), + regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE), + regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H), + regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE), + regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY_(5)), + regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM), + regmap_reg_range(MIPI_DBG_SET_(0), MIPI_DBG_SET_(9)), + regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS_(1)), +}; + +static const struct regmap_access_table chipone_dsi_writeable_table = { + .yes_ranges = chipone_dsi_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(chipone_dsi_writeable_ranges), +}; + +static const struct regmap_config chipone_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .rd_table = &chipone_dsi_readable_table, + .wr_table = &chipone_dsi_writeable_table, + .cache_type = REGCACHE_RBTREE, + .max_register = MIPI_ATE_STATUS_(1), +}; + +static int chipone_dsi_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct mipi_dsi_device *dsi = context; + const u16 reg16 = (val_size << 8) | *(u8 *)reg; + int ret; + + ret = mipi_dsi_generic_read(dsi, ®16, 2, val, val_size); + + return ret == val_size ? 0 : -EINVAL; +} + +static int chipone_dsi_write(void *context, const void *data, size_t count) +{ + struct mipi_dsi_device *dsi = context; + + return mipi_dsi_generic_write(dsi, data, 2); +} + +static const struct regmap_bus chipone_dsi_regmap_bus = { + .read = chipone_dsi_read, + .write = chipone_dsi_write, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, +}; + static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge) { return container_of(bridge, struct chipone, bridge); @@ -153,18 +226,16 @@ static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge) static void chipone_readb(struct chipone *icn, u8 reg, u8 *val) { - if (icn->interface_i2c) - *val = i2c_smbus_read_byte_data(icn->client, reg); - else - mipi_dsi_generic_read(icn->dsi, (u8[]){reg, 1}, 2, val, 1); + int ret, pval; + + ret = regmap_read(icn->regmap, reg, &pval); + + *val = ret ? 0 : pval & 0xff; } static int chipone_writeb(struct chipone *icn, u8 reg, u8 val) { - if (icn->interface_i2c) - return i2c_smbus_write_byte_data(icn->client, reg, val); - else - return mipi_dsi_generic_write(icn->dsi, (u8[]){reg, val}, 2); + return regmap_write(icn->regmap, reg, val); } static void chipone_configure_pll(struct chipone *icn, @@ -591,6 +662,11 @@ static int chipone_dsi_probe(struct mipi_dsi_device *dsi) if (ret) return ret; + icn->regmap = devm_regmap_init(dev, &chipone_dsi_regmap_bus, + dsi, &chipone_regmap_config); + if (IS_ERR(icn->regmap)) + return PTR_ERR(icn->regmap); + icn->interface_i2c = false; icn->dsi = dsi; @@ -616,6 +692,10 @@ static int chipone_i2c_probe(struct i2c_client *client, if (ret) return ret; + icn->regmap = devm_regmap_init_i2c(client, &chipone_regmap_config); + if (IS_ERR(icn->regmap)) + return PTR_ERR(icn->regmap); + icn->interface_i2c = true; icn->client = client; dev_set_drvdata(dev, icn); From 03fa454bb666f243d2ffe8ee7395b89cf4e3c27d Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Fri, 1 Apr 2022 18:21:53 +0200 Subject: [PATCH 066/219] drm/panel: lvds: Simplify mode parsing The mode parsing is currently implemented in three steps: of_get_display_timing() - DT panel-timing to struct display_timing videomode_from_timing() - struct display_timing to struct videomode drm_display_mode_from_videomode() - struct videomode to struct drm_display_mode Replace all that with simple of_get_drm_panel_display_mode() call, which already populates struct drm_display_mode and then duplicate that mode in panel_lvds_get_modes() each time, since the mode does not change. Nice bonus is the bus_flags parsed by of_get_drm_panel_display_mode() out of panel-timing DT node, which is used in subsequent patch to fix handling of 'de-active' DT property. Tested-by: Christoph Niedermaier Signed-off-by: Marek Vasut Cc: Christoph Niedermaier Cc: Daniel Vetter Cc: Dmitry Osipenko Cc: Laurent Pinchart Cc: Robert Foss Cc: Sam Ravnborg Cc: Thomas Zimmermann To: dri-devel@lists.freedesktop.org Reviewed-by: Laurent Pinchart Link: https://patchwork.freedesktop.org/patch/msgid/20220401162154.295152-1-marex@denx.de --- drivers/gpu/drm/panel/panel-lvds.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c index 27a1c9923b09..6422868c1089 100644 --- a/drivers/gpu/drm/panel/panel-lvds.c +++ b/drivers/gpu/drm/panel/panel-lvds.c @@ -30,7 +30,8 @@ struct panel_lvds { const char *label; unsigned int width; unsigned int height; - struct videomode video_mode; + struct drm_display_mode dmode; + u32 bus_flags; unsigned int bus_format; bool data_mirror; @@ -87,16 +88,15 @@ static int panel_lvds_get_modes(struct drm_panel *panel, struct panel_lvds *lvds = to_panel_lvds(panel); struct drm_display_mode *mode; - mode = drm_mode_create(connector->dev); + mode = drm_mode_duplicate(connector->dev, &lvds->dmode); if (!mode) return 0; - drm_display_mode_from_videomode(&lvds->video_mode, mode); mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); - connector->display_info.width_mm = lvds->width; - connector->display_info.height_mm = lvds->height; + connector->display_info.width_mm = lvds->dmode.width_mm; + connector->display_info.height_mm = lvds->dmode.height_mm; drm_display_info_set_bus_formats(&connector->display_info, &lvds->bus_format, 1); connector->display_info.bus_flags = lvds->data_mirror @@ -116,7 +116,6 @@ static const struct drm_panel_funcs panel_lvds_funcs = { static int panel_lvds_parse_dt(struct panel_lvds *lvds) { struct device_node *np = lvds->dev->of_node; - struct display_timing timing; int ret; ret = of_drm_get_panel_orientation(np, &lvds->orientation); @@ -125,23 +124,20 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds) return ret; } - ret = of_get_display_timing(np, "panel-timing", &timing); + ret = of_get_drm_panel_display_mode(np, &lvds->dmode, &lvds->bus_flags); if (ret < 0) { dev_err(lvds->dev, "%pOF: problems parsing panel-timing (%d)\n", np, ret); return ret; } - videomode_from_timing(&timing, &lvds->video_mode); - - ret = of_property_read_u32(np, "width-mm", &lvds->width); - if (ret < 0) { + if (lvds->dmode.width_mm == 0) { dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n", np, "width-mm"); return -ENODEV; } - ret = of_property_read_u32(np, "height-mm", &lvds->height); - if (ret < 0) { + + if (lvds->dmode.height_mm == 0) { dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n", np, "height-mm"); return -ENODEV; From 83c784e7003625d63ff4609500c9f11736edebed Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Fri, 1 Apr 2022 18:21:54 +0200 Subject: [PATCH 067/219] drm/panel: lvds: Use bus_flags from DT panel-timing property This driver currently rewrites bus_flags based solely on the value of DT property 'data-mirror' and ignores bus_flags which might have been set in DT panel-timing node. Specificaly, the 'de-active' DT property sets DRM_BUS_FLAG_DE_ bus_flags. Since of_get_drm_panel_display_mode() conveniently parses the bus_flags out of DT panel-timing property, just ORR them with bus_flags inferred from 'data-mirror' DT property and use the result as panel bus_flags. This fixes handling of panels with 'panel-timing { de-active = <1>; };'. Reviewed-by: Laurent Pinchart Tested-by: Christoph Niedermaier Signed-off-by: Marek Vasut Cc: Christoph Niedermaier Cc: Daniel Vetter Cc: Dmitry Osipenko Cc: Laurent Pinchart Cc: Robert Foss Cc: Sam Ravnborg Cc: Thomas Zimmermann To: dri-devel@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20220401162154.295152-2-marex@denx.de --- drivers/gpu/drm/panel/panel-lvds.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c index 6422868c1089..eca067e78579 100644 --- a/drivers/gpu/drm/panel/panel-lvds.c +++ b/drivers/gpu/drm/panel/panel-lvds.c @@ -33,7 +33,6 @@ struct panel_lvds { struct drm_display_mode dmode; u32 bus_flags; unsigned int bus_format; - bool data_mirror; struct regulator *supply; @@ -99,9 +98,7 @@ static int panel_lvds_get_modes(struct drm_panel *panel, connector->display_info.height_mm = lvds->dmode.height_mm; drm_display_info_set_bus_formats(&connector->display_info, &lvds->bus_format, 1); - connector->display_info.bus_flags = lvds->data_mirror - ? DRM_BUS_FLAG_DATA_LSB_TO_MSB - : DRM_BUS_FLAG_DATA_MSB_TO_LSB; + connector->display_info.bus_flags = lvds->bus_flags; drm_connector_set_panel_orientation(connector, lvds->orientation); return 1; @@ -154,7 +151,9 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds) lvds->bus_format = ret; - lvds->data_mirror = of_property_read_bool(np, "data-mirror"); + lvds->bus_flags |= of_property_read_bool(np, "data-mirror") ? + DRM_BUS_FLAG_DATA_LSB_TO_MSB : + DRM_BUS_FLAG_DATA_MSB_TO_LSB; return 0; } From 9d79799193b728b62c9899d931b5009da1f89b67 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 13 Apr 2022 10:21:28 +0200 Subject: [PATCH 068/219] fbcon: Fix delayed takeover locking I messed up the delayed takover path in the locking conversion in 6e7da3af008b ("fbcon: Move console_lock for register/unlink/unregister"). If CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is enabled, fbcon take-over doesn't take place when calling fbcon_fb_registered(). Instead, is deferred using a workqueue and its fbcon_register_existing_fbs() function calls to fbcon_fb_registered() again for each registered fbcon fb. This leads to the console_lock tried to be held twice, causing a deadlock. Fix it by re-extracting the lockless function and using it in the delayed takeover path, where we need to hold the lock already to iterate over the list of already registered fb. Well the current code still is broken in there (since the list is protected by a registration_lock, which we can't take here because it nests the other way round with console_lock), but in the future this will be a list protected by console_lock when this is all sorted out. While reviewing the broken commit I realized that I've left some outdated comments about the locking behind. Fix those too. v2: Improve commit message (Javier) Reported-by: Nathan Chancellor Tested-by: Nathan Chancellor Reviewed-by: Javier Martinez Canillas Cc: Nathan Chancellor Fixes: 6e7da3af008b ("fbcon: Move console_lock for register/unlink/unregister") Cc: Sam Ravnborg Cc: Thomas Zimmermann Cc: Du Cheng Cc: Claudio Suarez Cc: Greg Kroah-Hartman Cc: Tetsuo Handa Cc: Matthew Wilcox Cc: Zheyu Ma Cc: Guenter Roeck Cc: Helge Deller Cc: Geert Uytterhoeven Cc: Javier Martinez Canillas Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220413082128.348186-1-daniel.vetter@ffwll.ch --- drivers/video/fbdev/core/fbcon.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index c56dd2f73f79..34744a16d41b 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2772,7 +2772,6 @@ static void fbcon_unbind(void) static inline void fbcon_unbind(void) {} #endif /* CONFIG_VT_HW_CONSOLE_BINDING */ -/* called with console_lock held */ void fbcon_fb_unbind(struct fb_info *info) { int i, new_idx = -1; @@ -2822,7 +2821,6 @@ void fbcon_fb_unbind(struct fb_info *info) console_unlock(); } -/* called with console_lock held */ void fbcon_fb_unregistered(struct fb_info *info) { int i, idx; @@ -2928,14 +2926,11 @@ MODULE_PARM_DESC(lockless_register_fb, "Lockless framebuffer registration for debugging [default=off]"); /* called with console_lock held */ -int fbcon_fb_registered(struct fb_info *info) +static int do_fb_registered(struct fb_info *info) { int ret = 0, i, idx; - if (!lockless_register_fb) - console_lock(); - else - atomic_inc(&ignore_console_lock_warning); + WARN_CONSOLE_UNLOCKED(); fbcon_registered_fb[info->node] = info; fbcon_num_registered_fb++; @@ -2945,7 +2940,7 @@ int fbcon_fb_registered(struct fb_info *info) if (deferred_takeover) { pr_info("fbcon: Deferring console take-over\n"); - goto out; + return 0; } if (info_idx == -1) { @@ -2965,7 +2960,20 @@ int fbcon_fb_registered(struct fb_info *info) } } -out: + return ret; +} + +int fbcon_fb_registered(struct fb_info *info) +{ + int ret; + + if (!lockless_register_fb) + console_lock(); + else + atomic_inc(&ignore_console_lock_warning); + + ret = do_fb_registered(info); + if (!lockless_register_fb) console_unlock(); else @@ -3280,7 +3288,7 @@ static void fbcon_register_existing_fbs(struct work_struct *work) logo_shown = FBCON_LOGO_DONTSHOW; fbcon_for_each_registered_fb(i) - fbcon_fb_registered(fbcon_registered_fb[i]); + do_fb_registered(fbcon_registered_fb[i]); console_unlock(); } From 8baccb27db403afa865b7f9a1452371a23d9bf45 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 11 Apr 2022 17:00:24 +0300 Subject: [PATCH 069/219] drm/edid: convert edid_is_zero() to edid_block_is_zero() for blocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As edid_is_zero() is only ever used on EDID blocks, convert it to edid_block_is_zero() with implicit block size. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/5cc9387e22b4a61243df4053d1ebcc14b0007dc8.1649685475.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index dbca588474c3..a8eedc38a39e 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1632,9 +1632,9 @@ static int edid_block_tag(const void *_block) return block[0]; } -static bool edid_is_zero(const void *edid, int length) +static bool edid_block_is_zero(const void *edid) { - return !memchr_inv(edid, 0, length); + return !memchr_inv(edid, 0, EDID_LENGTH); } /** @@ -1785,7 +1785,7 @@ bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid, } if (!valid && print_bad_edid) { - if (edid_is_zero(block, EDID_LENGTH)) { + if (edid_block_is_zero(block)) { pr_notice("EDID block is all zeroes\n"); } else { pr_notice("Raw EDID:\n"); @@ -1942,7 +1942,7 @@ static void connector_bad_edid(struct drm_connector *connector, u8 *block = edid + i * EDID_LENGTH; char prefix[20]; - if (edid_is_zero(block, EDID_LENGTH)) + if (edid_block_is_zero(block)) sprintf(prefix, "\t[%02x] ZERO ", i); else if (!drm_edid_block_valid(block, i, false, NULL)) sprintf(prefix, "\t[%02x] BAD ", i); @@ -2019,7 +2019,7 @@ static struct edid *drm_do_get_edid_base_block(struct drm_connector *connector, goto out; if (drm_edid_block_valid(edid, 0, false, edid_corrupt)) break; - if (try == 0 && edid_is_zero(edid, EDID_LENGTH)) { + if (try == 0 && edid_block_is_zero(edid)) { if (null_edid_counter) (*null_edid_counter)++; goto carp; From 49dc0558f764e02ba8c13ee56c1012a6883a981b Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 11 Apr 2022 17:00:25 +0300 Subject: [PATCH 070/219] drm/edid: have edid_block_check() detect blocks that are all zero MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have the check function, have it also detect blocks that are all zero instead of leaving that to callers. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/f9ad302e6b7dbcd1dff98d94ec5500ce27bebe10.1649685475.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index a8eedc38a39e..5ee4df2afbc3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1671,6 +1671,7 @@ EXPORT_SYMBOL(drm_edid_are_equal); enum edid_block_status { EDID_BLOCK_OK = 0, EDID_BLOCK_NULL, + EDID_BLOCK_ZERO, EDID_BLOCK_HEADER_CORRUPT, EDID_BLOCK_HEADER_REPAIR, EDID_BLOCK_HEADER_FIXED, @@ -1689,15 +1690,23 @@ static enum edid_block_status edid_block_check(const void *_block, if (is_base_block) { int score = drm_edid_header_is_valid(block); - if (score < clamp(edid_fixup, 0, 8)) - return EDID_BLOCK_HEADER_CORRUPT; + if (score < clamp(edid_fixup, 0, 8)) { + if (edid_block_is_zero(block)) + return EDID_BLOCK_ZERO; + else + return EDID_BLOCK_HEADER_CORRUPT; + } if (score < 8) return EDID_BLOCK_HEADER_REPAIR; } - if (edid_block_compute_checksum(block) != edid_block_get_checksum(block)) - return EDID_BLOCK_CHECKSUM; + if (edid_block_compute_checksum(block) != edid_block_get_checksum(block)) { + if (edid_block_is_zero(block)) + return EDID_BLOCK_ZERO; + else + return EDID_BLOCK_CHECKSUM; + } if (is_base_block) { if (block->version != 1) @@ -1785,7 +1794,7 @@ bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid, } if (!valid && print_bad_edid) { - if (edid_block_is_zero(block)) { + if (status == EDID_BLOCK_ZERO) { pr_notice("EDID block is all zeroes\n"); } else { pr_notice("Raw EDID:\n"); From cee2ce1ac25d0aedaa06e81a7bc7170aafb1ea14 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 11 Apr 2022 17:00:26 +0300 Subject: [PATCH 071/219] drm/edid: refactor EDID block status printing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Split out a function to log EDID block status. The printouts get changed slightly. Unfortunately, not all users will have struct drm_device available, so we convert to pr_* debug logging instead of drm device based logging. v2: Complain more loudly about unknown status codes (Ville) Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/98d12db95e55e2e18548822078ec3b16ae006732.1649685475.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 75 ++++++++++++++++++++++++++------------ 1 file changed, 51 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 5ee4df2afbc3..4c1ceaf33ab6 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1729,6 +1729,50 @@ static bool edid_block_valid(const void *block, bool base) edid_block_tag(block)); } +static void edid_block_status_print(enum edid_block_status status, + const struct edid *block, + int block_num) +{ + switch (status) { + case EDID_BLOCK_OK: + break; + case EDID_BLOCK_NULL: + pr_debug("EDID block %d pointer is NULL\n", block_num); + break; + case EDID_BLOCK_ZERO: + pr_notice("EDID block %d is all zeroes\n", block_num); + break; + case EDID_BLOCK_HEADER_CORRUPT: + pr_notice("EDID has corrupt header\n"); + break; + case EDID_BLOCK_HEADER_REPAIR: + pr_debug("EDID corrupt header needs repair\n"); + break; + case EDID_BLOCK_HEADER_FIXED: + pr_debug("EDID corrupt header fixed\n"); + break; + case EDID_BLOCK_CHECKSUM: + if (edid_block_status_valid(status, edid_block_tag(block))) { + pr_debug("EDID block %d (tag 0x%02x) checksum is invalid, remainder is %d, ignoring\n", + block_num, edid_block_tag(block), + edid_block_compute_checksum(block)); + } else { + pr_notice("EDID block %d (tag 0x%02x) checksum is invalid, remainder is %d\n", + block_num, edid_block_tag(block), + edid_block_compute_checksum(block)); + } + break; + case EDID_BLOCK_VERSION: + pr_notice("EDID has major version %d, instead of 1\n", + block->version); + break; + default: + WARN(1, "EDID block %d unknown edid block status code %d\n", + block_num, status); + break; + } +} + /** * drm_edid_block_valid - Sanity check the EDID block (base or extension) * @raw_edid: pointer to raw EDID block @@ -1775,33 +1819,16 @@ bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid, *edid_corrupt = true; } + edid_block_status_print(status, block, block_num); + /* Determine whether we can use this block with this status. */ valid = edid_block_status_valid(status, edid_block_tag(block)); - /* Some fairly random status printouts. */ - if (status == EDID_BLOCK_CHECKSUM) { - if (valid) { - DRM_DEBUG("EDID block checksum is invalid, remainder is %d\n", - edid_block_compute_checksum(block)); - DRM_DEBUG("Assuming a KVM switch modified the block but left the original checksum\n"); - } else if (print_bad_edid) { - DRM_NOTE("EDID block checksum is invalid, remainder is %d\n", - edid_block_compute_checksum(block)); - } - } else if (status == EDID_BLOCK_VERSION) { - DRM_NOTE("EDID has major version %d, instead of 1\n", - block->version); - } - - if (!valid && print_bad_edid) { - if (status == EDID_BLOCK_ZERO) { - pr_notice("EDID block is all zeroes\n"); - } else { - pr_notice("Raw EDID:\n"); - print_hex_dump(KERN_NOTICE, - " \t", DUMP_PREFIX_NONE, 16, 1, - block, EDID_LENGTH, false); - } + if (!valid && print_bad_edid && status != EDID_BLOCK_ZERO) { + pr_notice("Raw EDID:\n"); + print_hex_dump(KERN_NOTICE, + " \t", DUMP_PREFIX_NONE, 16, 1, + block, EDID_LENGTH, false); } return valid; From 9c7345de9b66f66007bec39b60b04b68d7723e20 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 11 Apr 2022 17:00:27 +0300 Subject: [PATCH 072/219] drm/edid: add a helper to log dump an EDID block MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Unify debug log dumping. There's duplication in the error paths for EDID block validity checks, but this should be neglible. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/b56f120a26f54b0defc43faa6d49e26f072d4d8f.1649685475.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 4c1ceaf33ab6..3db658b6368a 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1773,6 +1773,23 @@ static void edid_block_status_print(enum edid_block_status status, } } +static void edid_block_dump(const char *level, const void *block, int block_num) +{ + enum edid_block_status status; + char prefix[20]; + + status = edid_block_check(block, block_num == 0); + if (status == EDID_BLOCK_ZERO) + sprintf(prefix, "\t[%02x] ZERO ", block_num); + else if (!edid_block_status_valid(status, edid_block_tag(block))) + sprintf(prefix, "\t[%02x] BAD ", block_num); + else + sprintf(prefix, "\t[%02x] GOOD ", block_num); + + print_hex_dump(level, prefix, DUMP_PREFIX_NONE, 16, 1, + block, EDID_LENGTH, false); +} + /** * drm_edid_block_valid - Sanity check the EDID block (base or extension) * @raw_edid: pointer to raw EDID block @@ -1826,9 +1843,7 @@ bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid, if (!valid && print_bad_edid && status != EDID_BLOCK_ZERO) { pr_notice("Raw EDID:\n"); - print_hex_dump(KERN_NOTICE, - " \t", DUMP_PREFIX_NONE, 16, 1, - block, EDID_LENGTH, false); + edid_block_dump(KERN_NOTICE, block, block_num); } return valid; @@ -1976,18 +1991,8 @@ static void connector_bad_edid(struct drm_connector *connector, drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name); for (i = 0; i < num_blocks; i++) { u8 *block = edid + i * EDID_LENGTH; - char prefix[20]; - if (edid_block_is_zero(block)) - sprintf(prefix, "\t[%02x] ZERO ", i); - else if (!drm_edid_block_valid(block, i, false, NULL)) - sprintf(prefix, "\t[%02x] BAD ", i); - else - sprintf(prefix, "\t[%02x] GOOD ", i); - - print_hex_dump(KERN_DEBUG, - prefix, DUMP_PREFIX_NONE, 16, 1, - block, EDID_LENGTH, false); + edid_block_dump(KERN_DEBUG, block, i); } } From 63cae081538de1be12a335b267b462c533785ca9 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 11 Apr 2022 17:00:28 +0300 Subject: [PATCH 073/219] drm/edid: pass struct edid to connector_bad_edid() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid casting here and there, and make it const. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/4f9fdc961dfd9b36f4649e8ba57d05e43375fc92.1649685475.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 3db658b6368a..64a19a95b693 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1967,7 +1967,7 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len) } static void connector_bad_edid(struct drm_connector *connector, - u8 *edid, int num_blocks) + const struct edid *edid, int num_blocks) { int i; u8 last_block; @@ -1978,22 +1978,19 @@ static void connector_bad_edid(struct drm_connector *connector, * of 0x7e in the EDID of the _index_ of the last block in the * combined chunk of memory. */ - last_block = edid[0x7e]; + last_block = edid->extensions; /* Calculate real checksum for the last edid extension block data */ if (last_block < num_blocks) connector->real_edid_checksum = - edid_block_compute_checksum(edid + last_block * EDID_LENGTH); + edid_block_compute_checksum(edid + last_block); if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS)) return; drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name); - for (i = 0; i < num_blocks; i++) { - u8 *block = edid + i * EDID_LENGTH; - - edid_block_dump(KERN_DEBUG, block, i); - } + for (i = 0; i < num_blocks; i++) + edid_block_dump(KERN_DEBUG, edid + i, i); } /* Get override or firmware EDID */ @@ -2139,7 +2136,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, } if (invalid_blocks) { - connector_bad_edid(connector, (u8 *)edid, edid->extensions + 1); + connector_bad_edid(connector, edid, edid->extensions + 1); edid = edid_filter_invalid_blocks(edid, invalid_blocks); } From 89fb7536ad2fdcdaf95eee44b9e162d0522f48dc Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 11 Apr 2022 17:00:29 +0300 Subject: [PATCH 074/219] drm/edid: add typedef for block read function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the callback a bit easier on the eye. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/10b8721bb7ea8c7df1fd0c1d97c5d446905abbf4.1649685475.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 64a19a95b693..1394750afc9d 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2037,10 +2037,11 @@ int drm_add_override_edid_modes(struct drm_connector *connector) } EXPORT_SYMBOL(drm_add_override_edid_modes); +typedef int read_block_fn(void *context, u8 *buf, unsigned int block, size_t len); + static struct edid *drm_do_get_edid_base_block(struct drm_connector *connector, - int (*get_edid_block)(void *data, u8 *buf, unsigned int block, - size_t len), - void *data) + read_block_fn read_block, + void *context) { int *null_edid_counter = connector ? &connector->null_edid_counter : NULL; bool *edid_corrupt = connector ? &connector->edid_corrupt : NULL; @@ -2053,7 +2054,7 @@ static struct edid *drm_do_get_edid_base_block(struct drm_connector *connector, /* base block fetch */ for (try = 0; try < 4; try++) { - if (get_edid_block(data, edid, 0, EDID_LENGTH)) + if (read_block(context, edid, 0, EDID_LENGTH)) goto out; if (drm_edid_block_valid(edid, 0, false, edid_corrupt)) break; @@ -2097,9 +2098,8 @@ out: * Return: Pointer to valid EDID or NULL if we couldn't find any. */ struct edid *drm_do_get_edid(struct drm_connector *connector, - int (*get_edid_block)(void *data, u8 *buf, unsigned int block, - size_t len), - void *data) + read_block_fn read_block, + void *context) { int j, invalid_blocks = 0; struct edid *edid, *new, *override; @@ -2108,7 +2108,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, if (override) return override; - edid = drm_do_get_edid_base_block(connector, get_edid_block, data); + edid = drm_do_get_edid_base_block(connector, read_block, context); if (!edid) return NULL; @@ -2125,7 +2125,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, int try; for (try = 0; try < 4; try++) { - if (get_edid_block(data, block, j, EDID_LENGTH)) + if (read_block(context, block, j, EDID_LENGTH)) goto out; if (drm_edid_block_valid(block, j, false, NULL)) break; From 2deaf1c2561d5ec152ee2f2ce547a15b85d38b55 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 11 Apr 2022 17:00:30 +0300 Subject: [PATCH 075/219] drm/edid: abstract an EDID block read helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have an abstraction for the EDID base block read, yet duplicating the retries and error handling for extension block reads. Introduce a more generic EDID block read helper. Switch to the helper piecemeal, starting with drm_edid_get_panel_id(), which doesn't need or have access to the connector anyway. The subtle change is switching from drm_edid_block_valid() to edid_block_check(). We also status print once, not for every attempt. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/47560f7530e4a7b32b56cb9038178244fe30a4af.1649685475.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 58 +++++++++++++++++++++++++++++++++----- 1 file changed, 51 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 1394750afc9d..11276d226b37 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1670,6 +1670,7 @@ EXPORT_SYMBOL(drm_edid_are_equal); enum edid_block_status { EDID_BLOCK_OK = 0, + EDID_BLOCK_READ_FAIL, EDID_BLOCK_NULL, EDID_BLOCK_ZERO, EDID_BLOCK_HEADER_CORRUPT, @@ -1736,6 +1737,9 @@ static void edid_block_status_print(enum edid_block_status status, switch (status) { case EDID_BLOCK_OK: break; + case EDID_BLOCK_READ_FAIL: + pr_debug("EDID block %d read failed\n", block_num); + break; case EDID_BLOCK_NULL: pr_debug("EDID block %d pointer is NULL\n", block_num); break; @@ -2039,6 +2043,39 @@ EXPORT_SYMBOL(drm_add_override_edid_modes); typedef int read_block_fn(void *context, u8 *buf, unsigned int block, size_t len); +static enum edid_block_status edid_block_read(void *block, unsigned int block_num, + read_block_fn read_block, + void *context) +{ + enum edid_block_status status; + bool is_base_block = block_num == 0; + int try; + + for (try = 0; try < 4; try++) { + if (read_block(context, block, block_num, EDID_LENGTH)) + return EDID_BLOCK_READ_FAIL; + + status = edid_block_check(block, is_base_block); + if (status == EDID_BLOCK_HEADER_REPAIR) { + edid_header_fix(block); + + /* Retry with fixed header, update status if that worked. */ + status = edid_block_check(block, is_base_block); + if (status == EDID_BLOCK_OK) + status = EDID_BLOCK_HEADER_FIXED; + } + + if (edid_block_status_valid(status, edid_block_tag(block))) + break; + + /* Fail early for unrepairable base block all zeros. */ + if (try == 0 && is_base_block && status == EDID_BLOCK_ZERO) + break; + } + + return status; +} + static struct edid *drm_do_get_edid_base_block(struct drm_connector *connector, read_block_fn read_block, void *context) @@ -2237,20 +2274,27 @@ static u32 edid_extract_panel_id(const struct edid *edid) u32 drm_edid_get_panel_id(struct i2c_adapter *adapter) { - const struct edid *edid; - u32 panel_id; - - edid = drm_do_get_edid_base_block(NULL, drm_do_probe_ddc_edid, adapter); + enum edid_block_status status; + void *base_block; + u32 panel_id = 0; /* * There are no manufacturer IDs of 0, so if there is a problem reading * the EDID then we'll just return 0. */ - if (!edid) + + base_block = kmalloc(EDID_LENGTH, GFP_KERNEL); + if (!base_block) return 0; - panel_id = edid_extract_panel_id(edid); - kfree(edid); + status = edid_block_read(base_block, 0, drm_do_probe_ddc_edid, adapter); + + edid_block_status_print(status, base_block, 0); + + if (edid_block_status_valid(status, edid_block_tag(base_block))) + panel_id = edid_extract_panel_id(base_block); + + kfree(base_block); return panel_id; } From c12561ce43358ce59fc0f2275fc1853e24980908 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 11 Apr 2022 17:00:31 +0300 Subject: [PATCH 076/219] drm/edid: use EDID block read helper in drm_do_get_edid() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert drm_do_get_edit() from the base block read helper to the generic block read helper. There's quite a bit going on here, as the corrupt and null EDID information is moved back to the caller. As we see, they were not all that clear to begin with, and this change underlines that. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/3bcf98453770757ee93386da0cfbc6552d42a312.1649685475.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 62 ++++++++++++++------------------------ 1 file changed, 23 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 11276d226b37..d3951ac44f8f 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2076,44 +2076,6 @@ static enum edid_block_status edid_block_read(void *block, unsigned int block_nu return status; } -static struct edid *drm_do_get_edid_base_block(struct drm_connector *connector, - read_block_fn read_block, - void *context) -{ - int *null_edid_counter = connector ? &connector->null_edid_counter : NULL; - bool *edid_corrupt = connector ? &connector->edid_corrupt : NULL; - void *edid; - int try; - - edid = kmalloc(EDID_LENGTH, GFP_KERNEL); - if (edid == NULL) - return NULL; - - /* base block fetch */ - for (try = 0; try < 4; try++) { - if (read_block(context, edid, 0, EDID_LENGTH)) - goto out; - if (drm_edid_block_valid(edid, 0, false, edid_corrupt)) - break; - if (try == 0 && edid_block_is_zero(edid)) { - if (null_edid_counter) - (*null_edid_counter)++; - goto carp; - } - } - if (try == 4) - goto carp; - - return edid; - -carp: - if (connector) - connector_bad_edid(connector, edid, 1); -out: - kfree(edid); - return NULL; -} - /** * drm_do_get_edid - get EDID data using a custom EDID block read function * @connector: connector we're probing @@ -2138,6 +2100,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, read_block_fn read_block, void *context) { + enum edid_block_status status; int j, invalid_blocks = 0; struct edid *edid, *new, *override; @@ -2145,10 +2108,31 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, if (override) return override; - edid = drm_do_get_edid_base_block(connector, read_block, context); + edid = kmalloc(EDID_LENGTH, GFP_KERNEL); if (!edid) return NULL; + status = edid_block_read(edid, 0, read_block, context); + + edid_block_status_print(status, edid, 0); + + if (status == EDID_BLOCK_READ_FAIL) + goto out; + + /* FIXME: Clarify what a corrupt EDID actually means. */ + if (status == EDID_BLOCK_OK || status == EDID_BLOCK_VERSION) + connector->edid_corrupt = false; + else + connector->edid_corrupt = true; + + if (!edid_block_status_valid(status, edid_block_tag(edid))) { + if (status == EDID_BLOCK_ZERO) + connector->null_edid_counter++; + + connector_bad_edid(connector, edid, 1); + goto out; + } + if (edid->extensions == 0) return edid; From d3da3f4072c82e5be5aaafd6168c8effdea1be19 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 11 Apr 2022 17:00:32 +0300 Subject: [PATCH 077/219] drm/edid: convert extension block read to EDID block read helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the EDID block read helper also for extension block reads, making edid_block_read() the only place with the read retry logic. Note: We observe that drm_do_get_edid() does not use invalid extension blocks to flag the EDID as corrupt. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/a6328b898db40235b85ad4635374bc0768b5a970.1649685475.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index d3951ac44f8f..202b54f95576 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2143,17 +2143,16 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, for (j = 1; j <= edid->extensions; j++) { void *block = edid + j; - int try; - for (try = 0; try < 4; try++) { - if (read_block(context, block, j, EDID_LENGTH)) + status = edid_block_read(block, j, read_block, context); + + edid_block_status_print(status, block, j); + + if (!edid_block_status_valid(status, edid_block_tag(block))) { + if (status == EDID_BLOCK_READ_FAIL) goto out; - if (drm_edid_block_valid(block, j, false, NULL)) - break; - } - - if (try == 4) invalid_blocks++; + } } if (invalid_blocks) { From b3eb97b66d4f2f919f5dd683c8ea62058fde1a93 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 11 Apr 2022 17:00:33 +0300 Subject: [PATCH 078/219] drm/edid: drop extra local var MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't need override as a variable for anything. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/4d249173b34758e1d6c4a74eb98518d180f0a8ae.1649685475.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 202b54f95576..c50b2768d5d1 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2102,11 +2102,11 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, { enum edid_block_status status; int j, invalid_blocks = 0; - struct edid *edid, *new, *override; + struct edid *edid, *new; - override = drm_get_override_edid(connector); - if (override) - return override; + edid = drm_get_override_edid(connector); + if (edid) + return edid; edid = kmalloc(EDID_LENGTH, GFP_KERNEL); if (!edid) From 1c788f69f2643f38e7fdf0ba6c8a88bc0b3aefd7 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 11 Apr 2022 17:00:34 +0300 Subject: [PATCH 079/219] drm/edid: add single point of return to drm_do_get_edid() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will be useful in the future. Use fail label for fail exit. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/8e564e9415baa4dc9dc3127e4200b2618a8a3ba0.1649685475.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c50b2768d5d1..d2e1c101575d 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2106,7 +2106,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, edid = drm_get_override_edid(connector); if (edid) - return edid; + goto ok; edid = kmalloc(EDID_LENGTH, GFP_KERNEL); if (!edid) @@ -2117,7 +2117,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, edid_block_status_print(status, edid, 0); if (status == EDID_BLOCK_READ_FAIL) - goto out; + goto fail; /* FIXME: Clarify what a corrupt EDID actually means. */ if (status == EDID_BLOCK_OK || status == EDID_BLOCK_VERSION) @@ -2130,15 +2130,15 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, connector->null_edid_counter++; connector_bad_edid(connector, edid, 1); - goto out; + goto fail; } if (edid->extensions == 0) - return edid; + goto ok; new = krealloc(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL); if (!new) - goto out; + goto fail; edid = new; for (j = 1; j <= edid->extensions; j++) { @@ -2150,7 +2150,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, if (!edid_block_status_valid(status, edid_block_tag(block))) { if (status == EDID_BLOCK_READ_FAIL) - goto out; + goto fail; invalid_blocks++; } } @@ -2161,9 +2161,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, edid = edid_filter_invalid_blocks(edid, invalid_blocks); } +ok: return edid; -out: +fail: kfree(edid); return NULL; } From f1e4c916f97f6adc0848515d269b3899661873ce Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 11 Apr 2022 17:00:35 +0300 Subject: [PATCH 080/219] drm/edid: add EDID block count and size helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add some helpers to figure out the EDID extension block count, block count, size, pointers to blocks. Unfortunately, we'll need to cast away the const in a few places where we actually need to access the data. v3: fix (!edid_extension_block_count(edid) == 0) (kernel test robot) v2: fix s/j/i/ introduced in a rebase Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/dc7b0850293d837439fb3914c8a9d81e39018b4b.1649685475.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 80 +++++++++++++++++++++++++++----------- 1 file changed, 57 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index d2e1c101575d..324ce8467915 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1568,6 +1568,38 @@ static const struct drm_display_mode edid_4k_modes[] = { /*** DDC fetch and block validation ***/ +static int edid_extension_block_count(const struct edid *edid) +{ + return edid->extensions; +} + +static int edid_block_count(const struct edid *edid) +{ + return edid_extension_block_count(edid) + 1; +} + +static int edid_size_by_blocks(int num_blocks) +{ + return num_blocks * EDID_LENGTH; +} + +static int edid_size(const struct edid *edid) +{ + return edid_size_by_blocks(edid_block_count(edid)); +} + +static const void *edid_block_data(const struct edid *edid, int index) +{ + BUILD_BUG_ON(sizeof(*edid) != EDID_LENGTH); + + return edid + index; +} + +static const void *edid_extension_block_data(const struct edid *edid, int index) +{ + return edid_block_data(edid, index + 1); +} + static const u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; @@ -1654,8 +1686,8 @@ bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2) return false; if (edid1) { - edid1_len = EDID_LENGTH * (1 + edid1->extensions); - edid2_len = EDID_LENGTH * (1 + edid2->extensions); + edid1_len = edid_size(edid1); + edid2_len = edid_size(edid2); if (edid1_len != edid2_len) return false; @@ -1865,14 +1897,16 @@ EXPORT_SYMBOL(drm_edid_block_valid); bool drm_edid_is_valid(struct edid *edid) { int i; - u8 *raw = (u8 *)edid; if (!edid) return false; - for (i = 0; i <= edid->extensions; i++) - if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true, NULL)) + for (i = 0; i < edid_block_count(edid); i++) { + void *block = (void *)edid_block_data(edid, i); + + if (!drm_edid_block_valid(block, i, true, NULL)) return false; + } return true; } @@ -1885,13 +1919,13 @@ static struct edid *edid_filter_invalid_blocks(const struct edid *edid, int valid_extensions = edid->extensions - invalid_blocks; int i; - new = kmalloc_array(valid_extensions + 1, EDID_LENGTH, GFP_KERNEL); + new = kmalloc(edid_size_by_blocks(valid_extensions + 1), GFP_KERNEL); if (!new) goto out; dest_block = new; - for (i = 0; i <= edid->extensions; i++) { - const void *block = edid + i; + for (i = 0; i < edid_block_count(edid); i++) { + const void *block = edid_block_data(edid, i); if (edid_block_valid(block, i == 0)) memcpy(dest_block++, block, EDID_LENGTH); @@ -2101,7 +2135,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, void *context) { enum edid_block_status status; - int j, invalid_blocks = 0; + int i, invalid_blocks = 0; struct edid *edid, *new; edid = drm_get_override_edid(connector); @@ -2133,20 +2167,20 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, goto fail; } - if (edid->extensions == 0) + if (!edid_extension_block_count(edid)) goto ok; - new = krealloc(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL); + new = krealloc(edid, edid_size(edid), GFP_KERNEL); if (!new) goto fail; edid = new; - for (j = 1; j <= edid->extensions; j++) { - void *block = edid + j; + for (i = 1; i < edid_block_count(edid); i++) { + void *block = (void *)edid_block_data(edid, i); - status = edid_block_read(block, j, read_block, context); + status = edid_block_read(block, i, read_block, context); - edid_block_status_print(status, block, j); + edid_block_status_print(status, block, i); if (!edid_block_status_valid(status, edid_block_tag(block))) { if (status == EDID_BLOCK_READ_FAIL) @@ -2156,7 +2190,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, } if (invalid_blocks) { - connector_bad_edid(connector, edid, edid->extensions + 1); + connector_bad_edid(connector, edid, edid_block_count(edid)); edid = edid_filter_invalid_blocks(edid, invalid_blocks); } @@ -2321,7 +2355,7 @@ EXPORT_SYMBOL(drm_get_edid_switcheroo); */ struct edid *drm_edid_duplicate(const struct edid *edid) { - return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL); + return kmemdup(edid, edid_size(edid), GFP_KERNEL); } EXPORT_SYMBOL(drm_edid_duplicate); @@ -2505,8 +2539,8 @@ drm_for_each_detailed_block(const struct edid *edid, detailed_cb *cb, void *clos for (i = 0; i < EDID_DETAILED_TIMINGS; i++) cb(&(edid->detailed_timings[i]), closure); - for (i = 1; i <= edid->extensions; i++) { - const u8 *ext = (const u8 *)edid + (i * EDID_LENGTH); + for (i = 0; i < edid_extension_block_count(edid); i++) { + const u8 *ext = edid_extension_block_data(edid, i); switch (*ext) { case CEA_EXT: @@ -3476,17 +3510,17 @@ const u8 *drm_find_edid_extension(const struct edid *edid, int i; /* No EDID or EDID extensions */ - if (edid == NULL || edid->extensions == 0) + if (!edid || !edid_extension_block_count(edid)) return NULL; /* Find CEA extension */ - for (i = *ext_index; i < edid->extensions; i++) { - edid_ext = (const u8 *)edid + EDID_LENGTH * (i + 1); + for (i = *ext_index; i < edid_extension_block_count(edid); i++) { + edid_ext = edid_extension_block_data(edid, i); if (edid_block_tag(edid_ext) == ext_id) break; } - if (i >= edid->extensions) + if (i >= edid_extension_block_count(edid)) return NULL; *ext_index = i + 1; From d8bb92e70a434584f5b8a882eb46930cc22fd45a Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 11 Apr 2022 16:25:39 +0300 Subject: [PATCH 081/219] drm/dp: Factor out a function to probe a DPCD address Factor out from drm_dp_dpcd_read() a function to probe a DPCD address with a 1-byte read access. This will be needed by the next patch doing a read from an LTTPR address, which must happen without the preceding wake-up read in drm_dp_dpcd_read(). While at it add tracing for the 1 byte read even if the read was successful. v2: Add a probe function instead of exporting drm_dp_dpcd_access(). (Jani) v3: Add tracing for the 1-byte read even if the read was successful. (Khaled) Cc: Jani Nikula Cc: Khaled Almahallawy Cc: dri-devel@lists.freedesktop.org Signed-off-by: Imre Deak Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220411132539.984647-1-imre.deak@intel.com --- drivers/gpu/drm/dp/drm_dp.c | 33 ++++++++++++++++++++++++++++----- include/drm/dp/drm_dp_helper.h | 1 + 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/dp/drm_dp.c b/drivers/gpu/drm/dp/drm_dp.c index 703972ae14c6..b76260ea11ce 100644 --- a/drivers/gpu/drm/dp/drm_dp.c +++ b/drivers/gpu/drm/dp/drm_dp.c @@ -527,6 +527,31 @@ unlock: return ret; } +/** + * drm_dp_dpcd_probe() - probe a given DPCD address with a 1-byte read access + * @aux: DisplayPort AUX channel (SST) + * @offset: address of the register to probe + * + * Probe the provided DPCD address by reading 1 byte from it. The function can + * be used to trigger some side-effect the read access has, like waking up the + * sink, without the need for the read-out value. + * + * Returns 0 if the read access suceeded, or a negative error code on failure. + */ +int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset) +{ + u8 buffer; + int ret; + + ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, &buffer, 1); + WARN_ON(ret == 0); + + drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, &buffer, ret); + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL(drm_dp_dpcd_probe); + /** * drm_dp_dpcd_read() - read a series of bytes from the DPCD * @aux: DisplayPort AUX channel (SST or MST) @@ -559,10 +584,9 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, * monitor doesn't power down exactly after the throw away read. */ if (!aux->is_remote) { - ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, - buffer, 1); - if (ret != 1) - goto out; + ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV); + if (ret < 0) + return ret; } if (aux->is_remote) @@ -571,7 +595,6 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer, size); -out: drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret); return ret; } diff --git a/include/drm/dp/drm_dp_helper.h b/include/drm/dp/drm_dp_helper.h index 51e02cf75277..53d1e722f4de 100644 --- a/include/drm/dp/drm_dp_helper.h +++ b/include/drm/dp/drm_dp_helper.h @@ -2053,6 +2053,7 @@ struct drm_dp_aux { bool is_remote; }; +int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset); ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, void *buffer, size_t size); ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, From 16a54d4ee7c8aa1c4b59a91bea24799f48bcc5b5 Mon Sep 17 00:00:00 2001 From: Yang Guang Date: Tue, 30 Nov 2021 08:05:08 +0800 Subject: [PATCH 082/219] fbcon: replace snprintf in show functions with sysfs_emit Use sysfs_emit instead of scnprintf or sprintf. Reported-by: Zeal Robot Signed-off-by: Yang Guang Signed-off-by: Helge Deller Link: https://patchwork.freedesktop.org/patch/msgid/0cb7ca73d9cd7162988a22a24cd18bbcd3d8bb27.1638156341.git.yang.guang5@zte.com.cn --- drivers/video/fbdev/core/fbcon.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 34744a16d41b..c4e91715ef00 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -3185,7 +3185,7 @@ static ssize_t show_rotate(struct device *device, rotate = fbcon_get_rotate(info); err: console_unlock(); - return snprintf(buf, PAGE_SIZE, "%d\n", rotate); + return sysfs_emit(buf, "%d\n", rotate); } static ssize_t show_cursor_blink(struct device *device, @@ -3210,7 +3210,7 @@ static ssize_t show_cursor_blink(struct device *device, blink = delayed_work_pending(&ops->cursor_work); err: console_unlock(); - return snprintf(buf, PAGE_SIZE, "%d\n", blink); + return sysfs_emit(buf, "%d\n", blink); } static ssize_t store_cursor_blink(struct device *device, From d6cd978f7e6b6f6895f8d0c4ce6e5d2c8e979afe Mon Sep 17 00:00:00 2001 From: Zhouyi Zhou Date: Thu, 10 Feb 2022 14:58:24 +0800 Subject: [PATCH 083/219] video: fbdev: fbmem: fix pointer reference to null device field In function do_remove_conflicting_framebuffers, if device is NULL, there will be null pointer reference. The patch add a check to the if expression. Signed-off-by: Zhouyi Zhou Acked-by: Paul Menzel Signed-off-by: Helge Deller Link: https://patchwork.freedesktop.org/patch/msgid/20220210065824.368355-1-zhouzhouyi@gmail.com --- drivers/video/fbdev/core/fbmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index bdd00d381bbc..1cfb2bd090b6 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1579,7 +1579,7 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a, * If it's not a platform device, at least print a warning. A * fix would add code to remove the device from the system. */ - if (dev_is_platform(device)) { + if (device && dev_is_platform(device)) { registered_fb[i]->forced_out = true; platform_device_unregister(to_platform_device(device)); } else { From cabfa2bbe617ddf0a0cc4d01f72b584dae4939ad Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Thu, 14 Apr 2022 22:17:18 +0200 Subject: [PATCH 084/219] Revert "video: fbdev: fbmem: fix pointer reference to null device field" This reverts commit d6cd978f7e6b6f6895f8d0c4ce6e5d2c8e979afe. It has been solved differently already. Signed-off-by: Helge Deller Acked-by: Paul Menzel Link: https://patchwork.freedesktop.org/patch/msgid/20220210065824.368355-1-zhouzhouyi@gmail.com --- drivers/video/fbdev/core/fbmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 1cfb2bd090b6..bdd00d381bbc 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1579,7 +1579,7 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a, * If it's not a platform device, at least print a warning. A * fix would add code to remove the device from the system. */ - if (device && dev_is_platform(device)) { + if (dev_is_platform(device)) { registered_fb[i]->forced_out = true; platform_device_unregister(to_platform_device(device)); } else { From f6e0a6b09edc61d567ccf6199b3bb6f3c5f9e077 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 18 Apr 2022 10:18:42 -0400 Subject: [PATCH 085/219] drm/nouveau: change base917c_format from global to static Smatch reports this issue base917c.c:26:1: warning: symbol 'base917c_format' was not declared. Should it be static? base917c_format is only used in base917.c. Single file variables should not be global so change base917c_format's storage-class specifier to static. Signed-off-by: Tom Rix Reviewed-by: Lyude Paul Signed-off-by: Lyude Paul Link: https://patchwork.freedesktop.org/patch/msgid/20220418141842.296386-1-trix@redhat.com --- drivers/gpu/drm/nouveau/dispnv50/base917c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/dispnv50/base917c.c b/drivers/gpu/drm/nouveau/dispnv50/base917c.c index a1baed4fe0e9..ca260509a4f1 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base917c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base917c.c @@ -22,7 +22,7 @@ #include "base.h" #include "atom.h" -const u32 +static const u32 base917c_format[] = { DRM_FORMAT_C8, DRM_FORMAT_XRGB8888, From c6ed9f66eb70aeaac9998bd3552ada740d90e20c Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 18 Apr 2022 11:28:10 -0400 Subject: [PATCH 086/219] drm/nouveau/gr/gf100-: change gf108_gr_fwif from global to static Smatch reports this issue gf108.c:147:1: warning: symbol 'gf108_gr_fwif' was not declared. Should it be static? gf108_gr_fwif is only used in gf108.c. Single file variables should not be global so change gf108_gr_fwif's storage-class specifier to static. Signed-off-by: Tom Rix Reviewed-by: Lyude Paul Signed-off-by: Lyude Paul Link: https://patchwork.freedesktop.org/patch/msgid/20220418152810.3280502-1-trix@redhat.com --- drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c index 030640bb3dca..ab3760e804b8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c @@ -143,7 +143,7 @@ gf108_gr = { } }; -const struct gf100_gr_fwif +static const struct gf100_gr_fwif gf108_gr_fwif[] = { { -1, gf100_gr_load, &gf108_gr }, { -1, gf100_gr_nofw, &gf108_gr }, From c9b2d923befd8cee0b9d6cbd96128ec0bf44c881 Mon Sep 17 00:00:00 2001 From: Devarsh Thakkar Date: Mon, 14 Mar 2022 17:07:39 +0530 Subject: [PATCH 087/219] drm/tidss: Soft Reset DISPC on startup Soft reset the display subsystem controller on startup and wait for the reset to complete. This helps the scenario where display was already in use by some other core before the linux was booted. Signed-off-by: Devarsh Thakkar Reviewed-by: Tomi Valkeinen Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/20220314113739.18000-1-devarsht@ti.com --- drivers/gpu/drm/tidss/tidss_dispc.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index 60b92df615aa..dae47853b728 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -2650,6 +2650,20 @@ static void dispc_init_errata(struct dispc_device *dispc) } } +static void dispc_softreset(struct dispc_device *dispc) +{ + u32 val; + int ret = 0; + + /* Soft reset */ + REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1); + /* Wait for reset to complete */ + ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS, + val, val & 1, 100, 5000); + if (ret) + dev_warn(dispc->dev, "failed to reset dispc\n"); +} + int dispc_init(struct tidss_device *tidss) { struct device *dev = tidss->dev; @@ -2709,6 +2723,10 @@ int dispc_init(struct tidss_device *tidss) return r; } + /* K2G display controller does not support soft reset */ + if (feat->subrev != DISPC_K2G) + dispc_softreset(dispc); + for (i = 0; i < dispc->feat->num_vps; i++) { u32 gamma_size = dispc->feat->vp_feat.color.gamma_size; u32 *gamma_table; From 554ae8dce1268789e72767a67f0635cb743b3cea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Thu, 14 Apr 2022 08:11:17 -0700 Subject: [PATCH 088/219] drm/i915/display/psr: Unset enable_psr2_sel_fetch if other checks in intel_psr2_config_valid() fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If any of the PSR2 checks after intel_psr2_sel_fetch_config_valid() fails, enable_psr2_sel_fetch will be kept enabled causing problems in the functions that only checks for it and not for has_psr2. So here moving the check that do not depend on enable_psr2_sel_fetch and for the remaning ones jumping to a section that unset enable_psr2_sel_fetch in case of failure to support PSR2. Fixes: 6e43e276b8c9 ("drm/i915: Initial implementation of PSR2 selective fetch") Cc: Jouni Högander Reviewed-by: Jouni Högander Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20220414151118.21980-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 38 +++++++++++++----------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 5a55010a9b2f..8ec7c161284b 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -891,6 +891,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + /* Wa_16011303918:adl-p */ + if (crtc_state->vrr.enable && + IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, not compatible with HW stepping + VRR\n"); + return false; + } + + if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n"); + return false; + } + if (HAS_PSR2_SEL_FETCH(dev_priv)) { if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && !HAS_PSR_HW_TRACKING(dev_priv)) { @@ -904,12 +918,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, if (!crtc_state->enable_psr2_sel_fetch && IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) { drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n"); - return false; + goto unsupported; } if (!psr2_granularity_check(intel_dp, crtc_state)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n"); - return false; + goto unsupported; } if (!crtc_state->enable_psr2_sel_fetch && @@ -918,25 +932,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v); - return false; - } - - if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) { - drm_dbg_kms(&dev_priv->drm, - "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n"); - return false; - } - - /* Wa_16011303918:adl-p */ - if (crtc_state->vrr.enable && - IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { - drm_dbg_kms(&dev_priv->drm, - "PSR2 not enabled, not compatible with HW stepping + VRR\n"); - return false; + goto unsupported; } tgl_dc3co_exitline_compute_config(intel_dp, crtc_state); return true; + +unsupported: + crtc_state->enable_psr2_sel_fetch = false; + return false; } void intel_psr_compute_config(struct intel_dp *intel_dp, From c837e027436df69d20474bd3fdea2c6b3971aa6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Thu, 14 Apr 2022 08:11:18 -0700 Subject: [PATCH 089/219] drm/i915/display/psr: Clear more PSR state during disable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After commit 805f04d42a6b ("drm/i915/display/psr: Use continuos full frame to handle frontbuffer invalidations") was merged we started to get some drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE)) in tests that are executed in pipe B. This is probably due psr2_sel_fetch_cff_enabled being left set during PSR disable in the pipe A, so the PSR2_MAN_TRK_CTL write in intel_psr2_program_trans_man_trk_ctl() is skipped in pipe B and then we get the warning when actually enabling PSR after planes programing. We don't get such warnings when running tests in pipe A because PSR2_MAN_TRK_CTL is only cleared when enabling PSR2 with hardware tracking. Was not able to reproduce this issue but cleaning the PSR state disable will not harm anything at all. Fixes: 805f04d42a6b ("drm/i915/display/psr: Use continuos full frame to handle frontbuffer invalidations") Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5634 Cc: Jouni Högander Signed-off-by: José Roberto de Souza Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414151118.21980-2-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 8ec7c161284b..06db407e2749 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1353,6 +1353,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); intel_dp->psr.enabled = false; + intel_dp->psr.psr2_enabled = false; + intel_dp->psr.psr2_sel_fetch_enabled = false; + intel_dp->psr.psr2_sel_fetch_cff_enabled = false; } /** From 97f2c684f34d386639926787fe5211d42b6f4e6f Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Thu, 14 Apr 2022 17:00:04 +0800 Subject: [PATCH 090/219] drm/bridge: anx7625: Fill in empty ELD when no connector Speaker may share I2S with DP and .get_eld callback will be called when speaker is playing. When HDMI wans't connected, the connector will be null. Instead of return an error, fill in empty ELD. Signed-off-by: Hsin-Yi Wang Reviewed-by: Xin Ji Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20220414090003.1806535-1-hsinyi@chromium.org --- drivers/gpu/drm/bridge/analogix/anx7625.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 6516f9570b86..f2bc30c98c77 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -1932,14 +1932,14 @@ static int anx7625_audio_get_eld(struct device *dev, void *data, struct anx7625_data *ctx = dev_get_drvdata(dev); if (!ctx->connector) { - dev_err(dev, "connector not initial\n"); - return -EINVAL; + /* Pass en empty ELD if connector not available */ + memset(buf, 0, len); + } else { + dev_dbg(dev, "audio copy eld\n"); + memcpy(buf, ctx->connector->eld, + min(sizeof(ctx->connector->eld), len)); } - dev_dbg(dev, "audio copy eld\n"); - memcpy(buf, ctx->connector->eld, - min(sizeof(ctx->connector->eld), len)); - return 0; } From 501f94d09b7df2d121a626590e824d6ae6e6e095 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Tue, 19 Apr 2022 16:39:57 +0200 Subject: [PATCH 091/219] dt-bindings: display: bridge: lt9211: Add Lontium LT9211 bridge driver Add bindings for Lontium LT9211 Single/Dual-Link DSI/LVDS or Single DPI to Single-link/Dual-Link DSI/LVDS or Single DPI bridge. This chip is highly capable at converting formats, but sadly it is also highly undocumented. Reviewed-by: Rob Herring Signed-off-by: Marek Vasut Cc: Laurent Pinchart Cc: Lucas Stach Cc: Maxime Ripard Cc: Rob Herring Cc: Robert Foss Cc: Sam Ravnborg Cc: Thomas Zimmermann Cc: devicetree@vger.kernel.org To: dri-devel@lists.freedesktop.org Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20220419143958.94873-1-marex@denx.de --- .../display/bridge/lontium,lt9211.yaml | 117 ++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 Documentation/devicetree/bindings/display/bridge/lontium,lt9211.yaml diff --git a/Documentation/devicetree/bindings/display/bridge/lontium,lt9211.yaml b/Documentation/devicetree/bindings/display/bridge/lontium,lt9211.yaml new file mode 100644 index 000000000000..9a6e9b25d14a --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/lontium,lt9211.yaml @@ -0,0 +1,117 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/bridge/lontium,lt9211.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Lontium LT9211 DSI/LVDS/DPI to DSI/LVDS/DPI bridge. + +maintainers: + - Marek Vasut + +description: | + The LT9211 are bridge devices which convert Single/Dual-Link DSI/LVDS + or Single DPI to Single/Dual-Link DSI/LVDS or Single DPI. + +properties: + compatible: + enum: + - lontium,lt9211 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + reset-gpios: + maxItems: 1 + description: GPIO connected to active high RESET pin. + + vccio-supply: + description: Regulator for 1.8V IO power. + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/properties/port + description: + Primary MIPI DSI port-1 for MIPI input or + LVDS port-1 for LVDS input or DPI input. + + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: + Additional MIPI port-2 for MIPI input or LVDS port-2 + for LVDS input. Used in combination with primary + port-1 to drive higher resolution displays + + port@2: + $ref: /schemas/graph.yaml#/properties/port + description: + Primary MIPI DSI port-1 for MIPI output or + LVDS port-1 for LVDS output or DPI output. + + port@3: + $ref: /schemas/graph.yaml#/properties/port + description: + Additional MIPI port-2 for MIPI output or LVDS port-2 + for LVDS output. Used in combination with primary + port-1 to drive higher resolution displays. + + required: + - port@0 + - port@2 + +required: + - compatible + - reg + - vccio-supply + - ports + +additionalProperties: false + +examples: + - | + #include + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + hdmi-bridge@3b { + compatible = "lontium,lt9211"; + reg = <0x3b>; + + reset-gpios = <&tlmm 128 GPIO_ACTIVE_HIGH>; + interrupts-extended = <&tlmm 84 IRQ_TYPE_EDGE_FALLING>; + + vccio-supply = <<9211_1v8>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + endpoint { + remote-endpoint = <&dsi0_out>; + }; + }; + + port@2 { + reg = <2>; + + endpoint { + remote-endpoint = <&panel_in_lvds>; + }; + }; + }; + }; + }; + +... From 8ce4129e3de433ab924951eeb980e97e4c2cc03b Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Tue, 19 Apr 2022 16:39:58 +0200 Subject: [PATCH 092/219] drm/bridge: lt9211: Add Lontium LT9211 bridge driver Add driver for Lontium LT9211 Single/Dual-Link DSI/LVDS or Single DPI to Single-link/Dual-Link DSI/LVDS or Single DPI bridge. This chip is highly capable at converting formats, but sadly it is also highly undocumented. This driver is written without any documentation from Lontium and based only on shreds of information available in various obscure example codes, hence long runs of unknown register patches and lengthy delays in various places. Whichever register meaning could be divined from its behavior has at least a comment around it. Currently the only mode tested is Single-link DSI to Single-link LVDS. Dual-link LVDS might work as well, the register programming is in place, but is untested. Reviewed-by: Robert Foss Signed-off-by: Marek Vasut Cc: Laurent Pinchart Cc: Lucas Stach Cc: Maxime Ripard Cc: Robert Foss Cc: Sam Ravnborg Cc: Thomas Zimmermann To: dri-devel@lists.freedesktop.org Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20220419143958.94873-2-marex@denx.de --- drivers/gpu/drm/bridge/Kconfig | 13 + drivers/gpu/drm/bridge/Makefile | 1 + drivers/gpu/drm/bridge/lontium-lt9211.c | 802 ++++++++++++++++++++++++ 3 files changed, 816 insertions(+) create mode 100644 drivers/gpu/drm/bridge/lontium-lt9211.c diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 7ac920a57d26..c08ccb4b332b 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -100,6 +100,19 @@ config DRM_LONTIUM_LT8912B Say M here if you want to support this hardware as a module. The module will be named "lontium-lt8912b". +config DRM_LONTIUM_LT9211 + tristate "Lontium LT9211 DSI/LVDS/DPI bridge" + depends on OF + select DRM_PANEL_BRIDGE + select DRM_KMS_HELPER + select DRM_MIPI_DSI + select REGMAP_I2C + help + Driver for Lontium LT9211 Single/Dual-Link DSI/LVDS or Single DPI + input to Single-link/Dual-Link DSI/LVDS or Single DPI output bridge + chip. + Please say Y if you have such hardware. + config DRM_LONTIUM_LT9611 tristate "Lontium LT9611 DSI/HDMI bridge" select SND_SOC_HDMI_CODEC if SND_SOC diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 425844c30495..b0edf2022fa0 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o +obj-$(CONFIG_DRM_LONTIUM_LT9211) += lontium-lt9211.o obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c new file mode 100644 index 000000000000..e92821fbc639 --- /dev/null +++ b/drivers/gpu/drm/bridge/lontium-lt9211.c @@ -0,0 +1,802 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lontium LT9211 bridge driver + * + * LT9211 is capable of converting: + * 2xDSI/2xLVDS/1xDPI -> 2xDSI/2xLVDS/1xDPI + * Currently supported is: + * 1xDSI -> 1xLVDS + * + * Copyright (C) 2022 Marek Vasut + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define REG_PAGE_CONTROL 0xff +#define REG_CHIPID0 0x8100 +#define REG_CHIPID0_VALUE 0x18 +#define REG_CHIPID1 0x8101 +#define REG_CHIPID1_VALUE 0x01 +#define REG_CHIPID2 0x8102 +#define REG_CHIPID2_VALUE 0xe3 + +#define REG_DSI_LANE 0xd000 +/* DSI lane count - 0 means 4 lanes ; 1, 2, 3 means 1, 2, 3 lanes. */ +#define REG_DSI_LANE_COUNT(n) ((n) & 3) + +struct lt9211 { + struct drm_bridge bridge; + struct device *dev; + struct regmap *regmap; + struct mipi_dsi_device *dsi; + struct drm_bridge *panel_bridge; + struct gpio_desc *reset_gpio; + struct regulator *vccio; + bool lvds_dual_link; + bool lvds_dual_link_even_odd_swap; +}; + +static const struct regmap_range lt9211_rw_ranges[] = { + regmap_reg_range(0xff, 0xff), + regmap_reg_range(0x8100, 0x816b), + regmap_reg_range(0x8200, 0x82aa), + regmap_reg_range(0x8500, 0x85ff), + regmap_reg_range(0x8600, 0x86a0), + regmap_reg_range(0x8700, 0x8746), + regmap_reg_range(0xd000, 0xd0a7), + regmap_reg_range(0xd400, 0xd42c), + regmap_reg_range(0xd800, 0xd838), + regmap_reg_range(0xd9c0, 0xd9d5), +}; + +static const struct regmap_access_table lt9211_rw_table = { + .yes_ranges = lt9211_rw_ranges, + .n_yes_ranges = ARRAY_SIZE(lt9211_rw_ranges), +}; + +static const struct regmap_range_cfg lt9211_range = { + .name = "lt9211", + .range_min = 0x0000, + .range_max = 0xda00, + .selector_reg = REG_PAGE_CONTROL, + .selector_mask = 0xff, + .selector_shift = 0, + .window_start = 0, + .window_len = 0x100, +}; + +static const struct regmap_config lt9211_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .rd_table = <9211_rw_table, + .wr_table = <9211_rw_table, + .volatile_table = <9211_rw_table, + .ranges = <9211_range, + .num_ranges = 1, + .cache_type = REGCACHE_RBTREE, + .max_register = 0xda00, +}; + +static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge) +{ + return container_of(bridge, struct lt9211, bridge); +} + +static int lt9211_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct lt9211 *ctx = bridge_to_lt9211(bridge); + + return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, + &ctx->bridge, flags); +} + +static int lt9211_read_chipid(struct lt9211 *ctx) +{ + u8 chipid[3]; + int ret; + + /* Read Chip ID registers and verify the chip can communicate. */ + ret = regmap_bulk_read(ctx->regmap, REG_CHIPID0, chipid, 3); + if (ret < 0) { + dev_err(ctx->dev, "Failed to read Chip ID: %d\n", ret); + return ret; + } + + /* Test for known Chip ID. */ + if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE || + chipid[2] != REG_CHIPID2_VALUE) { + dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n", + chipid[0], chipid[1], chipid[2]); + return -EINVAL; + } + + return 0; +} + +static int lt9211_system_init(struct lt9211 *ctx) +{ + const struct reg_sequence lt9211_system_init_seq[] = { + { 0x8201, 0x18 }, + { 0x8606, 0x61 }, + { 0x8607, 0xa8 }, + { 0x8714, 0x08 }, + { 0x8715, 0x00 }, + { 0x8718, 0x0f }, + { 0x8722, 0x08 }, + { 0x8723, 0x00 }, + { 0x8726, 0x0f }, + { 0x810b, 0xfe }, + }; + + return regmap_multi_reg_write(ctx->regmap, lt9211_system_init_seq, + ARRAY_SIZE(lt9211_system_init_seq)); +} + +static int lt9211_configure_rx(struct lt9211 *ctx) +{ + const struct reg_sequence lt9211_rx_phy_seq[] = { + { 0x8202, 0x44 }, + { 0x8204, 0xa0 }, + { 0x8205, 0x22 }, + { 0x8207, 0x9f }, + { 0x8208, 0xfc }, + /* ORR with 0xf8 here to enable DSI DN/DP swap. */ + { 0x8209, 0x01 }, + { 0x8217, 0x0c }, + { 0x8633, 0x1b }, + }; + + const struct reg_sequence lt9211_rx_cal_reset_seq[] = { + { 0x8120, 0x7f }, + { 0x8120, 0xff }, + }; + + const struct reg_sequence lt9211_rx_dig_seq[] = { + { 0x8630, 0x85 }, + /* 0x8588: BIT 6 set = MIPI-RX, BIT 4 unset = LVDS-TX */ + { 0x8588, 0x40 }, + { 0x85ff, 0xd0 }, + { REG_DSI_LANE, REG_DSI_LANE_COUNT(ctx->dsi->lanes) }, + { 0xd002, 0x05 }, + }; + + const struct reg_sequence lt9211_rx_div_reset_seq[] = { + { 0x810a, 0xc0 }, + { 0x8120, 0xbf }, + }; + + const struct reg_sequence lt9211_rx_div_clear_seq[] = { + { 0x810a, 0xc1 }, + { 0x8120, 0xff }, + }; + + int ret; + + ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_phy_seq, + ARRAY_SIZE(lt9211_rx_phy_seq)); + if (ret) + return ret; + + ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_cal_reset_seq, + ARRAY_SIZE(lt9211_rx_cal_reset_seq)); + if (ret) + return ret; + + ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_dig_seq, + ARRAY_SIZE(lt9211_rx_dig_seq)); + if (ret) + return ret; + + ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_div_reset_seq, + ARRAY_SIZE(lt9211_rx_div_reset_seq)); + if (ret) + return ret; + + usleep_range(10000, 15000); + + return regmap_multi_reg_write(ctx->regmap, lt9211_rx_div_clear_seq, + ARRAY_SIZE(lt9211_rx_div_clear_seq)); +} + +static int lt9211_autodetect_rx(struct lt9211 *ctx, + const struct drm_display_mode *mode) +{ + u16 width, height; + u32 byteclk; + u8 buf[5]; + u8 format; + u8 bc[3]; + int ret; + + /* Measure ByteClock frequency. */ + ret = regmap_write(ctx->regmap, 0x8600, 0x01); + if (ret) + return ret; + + /* Give the chip time to lock onto RX stream. */ + msleep(100); + + /* Read the ByteClock frequency from the chip. */ + ret = regmap_bulk_read(ctx->regmap, 0x8608, bc, sizeof(bc)); + if (ret) + return ret; + + /* RX ByteClock in kHz */ + byteclk = ((bc[0] & 0xf) << 16) | (bc[1] << 8) | bc[2]; + + /* Width/Height/Format Auto-detection */ + ret = regmap_bulk_read(ctx->regmap, 0xd082, buf, sizeof(buf)); + if (ret) + return ret; + + width = (buf[0] << 8) | buf[1]; + height = (buf[3] << 8) | buf[4]; + format = buf[2] & 0xf; + + if (format == 0x3) { /* YUV422 16bit */ + width /= 2; + } else if (format == 0xa) { /* RGB888 24bit */ + width /= 3; + } else { + dev_err(ctx->dev, "Unsupported DSI pixel format 0x%01x\n", + format); + return -EINVAL; + } + + if (width != mode->hdisplay) { + dev_err(ctx->dev, + "RX: Detected DSI width (%d) does not match mode hdisplay (%d)\n", + width, mode->hdisplay); + return -EINVAL; + } + + if (height != mode->vdisplay) { + dev_err(ctx->dev, + "RX: Detected DSI height (%d) does not match mode vdisplay (%d)\n", + height, mode->vdisplay); + return -EINVAL; + } + + dev_dbg(ctx->dev, "RX: %dx%d format=0x%01x byteclock=%d kHz\n", + width, height, format, byteclk); + + return 0; +} + +static int lt9211_configure_timing(struct lt9211 *ctx, + const struct drm_display_mode *mode) +{ + const struct reg_sequence lt9211_timing[] = { + { 0xd00d, (mode->vtotal >> 8) & 0xff }, + { 0xd00e, mode->vtotal & 0xff }, + { 0xd00f, (mode->vdisplay >> 8) & 0xff }, + { 0xd010, mode->vdisplay & 0xff }, + { 0xd011, (mode->htotal >> 8) & 0xff }, + { 0xd012, mode->htotal & 0xff }, + { 0xd013, (mode->hdisplay >> 8) & 0xff }, + { 0xd014, mode->hdisplay & 0xff }, + { 0xd015, (mode->vsync_end - mode->vsync_start) & 0xff }, + { 0xd016, (mode->hsync_end - mode->hsync_start) & 0xff }, + { 0xd017, ((mode->vsync_start - mode->vdisplay) >> 8) & 0xff }, + { 0xd018, (mode->vsync_start - mode->vdisplay) & 0xff }, + { 0xd019, ((mode->hsync_start - mode->hdisplay) >> 8) & 0xff }, + { 0xd01a, (mode->hsync_start - mode->hdisplay) & 0xff }, + }; + + return regmap_multi_reg_write(ctx->regmap, lt9211_timing, + ARRAY_SIZE(lt9211_timing)); +} + +static int lt9211_configure_plls(struct lt9211 *ctx, + const struct drm_display_mode *mode) +{ + const struct reg_sequence lt9211_pcr_seq[] = { + { 0xd026, 0x17 }, + { 0xd027, 0xc3 }, + { 0xd02d, 0x30 }, + { 0xd031, 0x10 }, + { 0xd023, 0x20 }, + { 0xd038, 0x02 }, + { 0xd039, 0x10 }, + { 0xd03a, 0x20 }, + { 0xd03b, 0x60 }, + { 0xd03f, 0x04 }, + { 0xd040, 0x08 }, + { 0xd041, 0x10 }, + { 0x810b, 0xee }, + { 0x810b, 0xfe }, + }; + + unsigned int pval; + int ret; + + /* DeSSC PLL reference clock is 25 MHz XTal. */ + ret = regmap_write(ctx->regmap, 0x822d, 0x48); + if (ret) + return ret; + + if (mode->clock < 44000) { + ret = regmap_write(ctx->regmap, 0x8235, 0x83); + } else if (mode->clock < 88000) { + ret = regmap_write(ctx->regmap, 0x8235, 0x82); + } else if (mode->clock < 176000) { + ret = regmap_write(ctx->regmap, 0x8235, 0x81); + } else { + dev_err(ctx->dev, + "Unsupported mode clock (%d kHz) above 176 MHz.\n", + mode->clock); + return -EINVAL; + } + + if (ret) + return ret; + + /* Wait for the DeSSC PLL to stabilize. */ + msleep(100); + + ret = regmap_multi_reg_write(ctx->regmap, lt9211_pcr_seq, + ARRAY_SIZE(lt9211_pcr_seq)); + if (ret) + return ret; + + /* PCR stability test takes seconds. */ + ret = regmap_read_poll_timeout(ctx->regmap, 0xd087, pval, pval & 0x8, + 20000, 10000000); + if (ret) + dev_err(ctx->dev, "PCR unstable, ret=%i\n", ret); + + return ret; +} + +static int lt9211_configure_tx(struct lt9211 *ctx, bool jeida, + bool bpp24, bool de) +{ + const struct reg_sequence system_lt9211_tx_phy_seq[] = { + /* DPI output disable */ + { 0x8262, 0x00 }, + /* BIT(7) is LVDS dual-port */ + { 0x823b, 0x38 | (ctx->lvds_dual_link ? BIT(7) : 0) }, + { 0x823e, 0x92 }, + { 0x823f, 0x48 }, + { 0x8240, 0x31 }, + { 0x8243, 0x80 }, + { 0x8244, 0x00 }, + { 0x8245, 0x00 }, + { 0x8249, 0x00 }, + { 0x824a, 0x01 }, + { 0x824e, 0x00 }, + { 0x824f, 0x00 }, + { 0x8250, 0x00 }, + { 0x8253, 0x00 }, + { 0x8254, 0x01 }, + /* LVDS channel order, Odd:Even 0x10..A:B, 0x40..B:A */ + { 0x8646, ctx->lvds_dual_link_even_odd_swap ? 0x40 : 0x10 }, + { 0x8120, 0x7b }, + { 0x816b, 0xff }, + }; + + const struct reg_sequence system_lt9211_tx_dig_seq[] = { + { 0x8559, 0x40 | (jeida ? BIT(7) : 0) | + (de ? BIT(5) : 0) | (bpp24 ? BIT(4) : 0) }, + { 0x855a, 0xaa }, + { 0x855b, 0xaa }, + { 0x855c, ctx->lvds_dual_link ? BIT(0) : 0 }, + { 0x85a1, 0x77 }, + { 0x8640, 0x40 }, + { 0x8641, 0x34 }, + { 0x8642, 0x10 }, + { 0x8643, 0x23 }, + { 0x8644, 0x41 }, + { 0x8645, 0x02 }, + }; + + const struct reg_sequence system_lt9211_tx_pll_seq[] = { + /* TX PLL power down */ + { 0x8236, 0x01 }, + { 0x8237, ctx->lvds_dual_link ? 0x2a : 0x29 }, + { 0x8238, 0x06 }, + { 0x8239, 0x30 }, + { 0x823a, 0x8e }, + { 0x8737, 0x14 }, + { 0x8713, 0x00 }, + { 0x8713, 0x80 }, + }; + + unsigned int pval; + int ret; + + ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_phy_seq, + ARRAY_SIZE(system_lt9211_tx_phy_seq)); + if (ret) + return ret; + + ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_dig_seq, + ARRAY_SIZE(system_lt9211_tx_dig_seq)); + if (ret) + return ret; + + ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_pll_seq, + ARRAY_SIZE(system_lt9211_tx_pll_seq)); + if (ret) + return ret; + + ret = regmap_read_poll_timeout(ctx->regmap, 0x871f, pval, pval & 0x80, + 10000, 1000000); + if (ret) { + dev_err(ctx->dev, "TX PLL unstable, ret=%i\n", ret); + return ret; + } + + ret = regmap_read_poll_timeout(ctx->regmap, 0x8720, pval, pval & 0x80, + 10000, 1000000); + if (ret) { + dev_err(ctx->dev, "TX PLL unstable, ret=%i\n", ret); + return ret; + } + + return 0; +} + +static void lt9211_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct lt9211 *ctx = bridge_to_lt9211(bridge); + struct drm_atomic_state *state = old_bridge_state->base.state; + const struct drm_bridge_state *bridge_state; + const struct drm_crtc_state *crtc_state; + const struct drm_display_mode *mode; + struct drm_connector *connector; + struct drm_crtc *crtc; + bool lvds_format_24bpp; + bool lvds_format_jeida; + u32 bus_flags; + int ret; + + ret = regulator_enable(ctx->vccio); + if (ret) { + dev_err(ctx->dev, "Failed to enable vccio: %d\n", ret); + return; + } + + /* Deassert reset */ + gpiod_set_value(ctx->reset_gpio, 1); + usleep_range(20000, 21000); /* Very long post-reset delay. */ + + /* Get the LVDS format from the bridge state. */ + bridge_state = drm_atomic_get_new_bridge_state(state, bridge); + bus_flags = bridge_state->output_bus_cfg.flags; + + switch (bridge_state->output_bus_cfg.format) { + case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: + lvds_format_24bpp = false; + lvds_format_jeida = true; + break; + case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: + lvds_format_24bpp = true; + lvds_format_jeida = true; + break; + case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: + lvds_format_24bpp = true; + lvds_format_jeida = false; + break; + default: + /* + * Some bridges still don't set the correct + * LVDS bus pixel format, use SPWG24 default + * format until those are fixed. + */ + lvds_format_24bpp = true; + lvds_format_jeida = false; + dev_warn(ctx->dev, + "Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n", + bridge_state->output_bus_cfg.format); + break; + } + + /* + * Retrieve the CRTC adjusted mode. This requires a little dance to go + * from the bridge to the encoder, to the connector and to the CRTC. + */ + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + mode = &crtc_state->adjusted_mode; + + ret = lt9211_read_chipid(ctx); + if (ret) + return; + + ret = lt9211_system_init(ctx); + if (ret) + return; + + ret = lt9211_configure_rx(ctx); + if (ret) + return; + + ret = lt9211_autodetect_rx(ctx, mode); + if (ret) + return; + + ret = lt9211_configure_timing(ctx, mode); + if (ret) + return; + + ret = lt9211_configure_plls(ctx, mode); + if (ret) + return; + + ret = lt9211_configure_tx(ctx, lvds_format_jeida, lvds_format_24bpp, + bus_flags & DRM_BUS_FLAG_DE_HIGH); + if (ret) + return; + + dev_dbg(ctx->dev, "LT9211 enabled.\n"); +} + +static void lt9211_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct lt9211 *ctx = bridge_to_lt9211(bridge); + int ret; + + /* + * Put the chip in reset, pull nRST line low, + * and assure lengthy 10ms reset low timing. + */ + gpiod_set_value(ctx->reset_gpio, 0); + usleep_range(10000, 11000); /* Very long reset duration. */ + + ret = regulator_disable(ctx->vccio); + if (ret) + dev_err(ctx->dev, "Failed to disable vccio: %d\n", ret); + + regcache_mark_dirty(ctx->regmap); +} + +static enum drm_mode_status +lt9211_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + /* LVDS output clock range 25..176 MHz */ + if (mode->clock < 25000) + return MODE_CLOCK_LOW; + if (mode->clock > 176000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +#define MAX_INPUT_SEL_FORMATS 1 + +static u32 * +lt9211_atomic_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + u32 *input_fmts; + + *num_input_fmts = 0; + + input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), + GFP_KERNEL); + if (!input_fmts) + return NULL; + + /* This is the DSI-end bus format */ + input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; + *num_input_fmts = 1; + + return input_fmts; +} + +static const struct drm_bridge_funcs lt9211_funcs = { + .attach = lt9211_attach, + .mode_valid = lt9211_mode_valid, + .atomic_enable = lt9211_atomic_enable, + .atomic_disable = lt9211_atomic_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_get_input_bus_fmts = lt9211_atomic_get_input_bus_fmts, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +static int lt9211_parse_dt(struct lt9211 *ctx) +{ + struct device_node *port2, *port3; + struct drm_bridge *panel_bridge; + struct device *dev = ctx->dev; + struct drm_panel *panel; + int dual_link; + int ret; + + ctx->vccio = devm_regulator_get(dev, "vccio"); + if (IS_ERR(ctx->vccio)) + return dev_err_probe(dev, PTR_ERR(ctx->vccio), + "Failed to get supply 'vccio'\n"); + + ctx->lvds_dual_link = false; + ctx->lvds_dual_link_even_odd_swap = false; + + port2 = of_graph_get_port_by_id(dev->of_node, 2); + port3 = of_graph_get_port_by_id(dev->of_node, 3); + dual_link = drm_of_lvds_get_dual_link_pixel_order(port2, port3); + of_node_put(port2); + of_node_put(port3); + + if (dual_link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) { + ctx->lvds_dual_link = true; + /* Odd pixels to LVDS Channel A, even pixels to B */ + ctx->lvds_dual_link_even_odd_swap = false; + } else if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) { + ctx->lvds_dual_link = true; + /* Even pixels to LVDS Channel A, odd pixels to B */ + ctx->lvds_dual_link_even_odd_swap = true; + } + + ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge); + if (ret < 0) + return ret; + if (panel) { + panel_bridge = devm_drm_panel_bridge_add(dev, panel); + if (IS_ERR(panel_bridge)) + return PTR_ERR(panel_bridge); + } + + ctx->panel_bridge = panel_bridge; + + return 0; +} + +static int lt9211_host_attach(struct lt9211 *ctx) +{ + const struct mipi_dsi_device_info info = { + .type = "lt9211", + .channel = 0, + .node = NULL, + }; + struct device *dev = ctx->dev; + struct device_node *host_node; + struct device_node *endpoint; + struct mipi_dsi_device *dsi; + struct mipi_dsi_host *host; + int dsi_lanes; + int ret; + + endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); + dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes"); + host_node = of_graph_get_remote_port_parent(endpoint); + host = of_find_mipi_dsi_host_by_node(host_node); + of_node_put(host_node); + of_node_put(endpoint); + + if (!host) + return -EPROBE_DEFER; + + if (dsi_lanes < 0 || dsi_lanes > 4) + return -EINVAL; + + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); + if (IS_ERR(dsi)) + return dev_err_probe(dev, PTR_ERR(dsi), + "failed to create dsi device\n"); + + ctx->dsi = dsi; + + dsi->lanes = dsi_lanes; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_MODE_VIDEO_HSE; + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + dev_err(dev, "failed to attach dsi to host: %d\n", ret); + return ret; + } + + return 0; +} + +static int lt9211_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct lt9211 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->dev = dev; + + /* + * Put the chip in reset, pull nRST line low, + * and assure lengthy 10ms reset low timing. + */ + ctx->reset_gpio = devm_gpiod_get_optional(ctx->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(ctx->reset_gpio)) + return PTR_ERR(ctx->reset_gpio); + + usleep_range(10000, 11000); /* Very long reset duration. */ + + ret = lt9211_parse_dt(ctx); + if (ret) + return ret; + + ctx->regmap = devm_regmap_init_i2c(client, <9211_regmap_config); + if (IS_ERR(ctx->regmap)) + return PTR_ERR(ctx->regmap); + + dev_set_drvdata(dev, ctx); + i2c_set_clientdata(client, ctx); + + ctx->bridge.funcs = <9211_funcs; + ctx->bridge.of_node = dev->of_node; + drm_bridge_add(&ctx->bridge); + + ret = lt9211_host_attach(ctx); + if (ret) + drm_bridge_remove(&ctx->bridge); + + return ret; +} + +static int lt9211_remove(struct i2c_client *client) +{ + struct lt9211 *ctx = i2c_get_clientdata(client); + + drm_bridge_remove(&ctx->bridge); + + return 0; +} + +static struct i2c_device_id lt9211_id[] = { + { "lontium,lt9211" }, + {}, +}; +MODULE_DEVICE_TABLE(i2c, lt9211_id); + +static const struct of_device_id lt9211_match_table[] = { + { .compatible = "lontium,lt9211" }, + {}, +}; +MODULE_DEVICE_TABLE(of, lt9211_match_table); + +static struct i2c_driver lt9211_driver = { + .probe = lt9211_probe, + .remove = lt9211_remove, + .id_table = lt9211_id, + .driver = { + .name = "lt9211", + .of_match_table = lt9211_match_table, + }, +}; +module_i2c_driver(lt9211_driver); + +MODULE_AUTHOR("Marek Vasut "); +MODULE_DESCRIPTION("Lontium LT9211 DSI/LVDS/DPI bridge driver"); +MODULE_LICENSE("GPL"); From 04b19d32213654e54ec819b6ac033360f1551902 Mon Sep 17 00:00:00 2001 From: Biju Das Date: Tue, 19 Apr 2022 15:24:53 +0100 Subject: [PATCH 093/219] drm: bridge: adv7511: Enable DRM_BRIDGE_OP_HPD based on HPD interrupt Connector detection using poll method won't work in case of bridge attached to the encoder with the flag DRM_BRIDGE_ATTACH_NO_CONNECTOR, as the code defaults to HPD. Enable DRM_BRIDGE_OP_HPD based on HPD interrupt availability, so that it will fall back to polling, if HPD is not available. Signed-off-by: Biju Das Reviewed-by: Robert Foss Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20220419142453.48839-1-biju.das.jz@bp.renesas.com --- drivers/gpu/drm/bridge/adv7511/adv7511_drv.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 668dcefbae17..b3f10c54e064 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -1292,8 +1292,10 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) goto err_unregister_cec; adv7511->bridge.funcs = &adv7511_bridge_funcs; - adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID - | DRM_BRIDGE_OP_HPD; + adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; + if (adv7511->i2c_main->irq) + adv7511->bridge.ops |= DRM_BRIDGE_OP_HPD; + adv7511->bridge.of_node = dev->of_node; adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA; From 34263c1b1593e44a3963dcfd6ed9af70c3002686 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Tue, 19 Apr 2022 11:14:22 +0200 Subject: [PATCH 094/219] drm: bridge: panel: Register connector if DRM device is already registered If panel_bridge_attach() happens after DRM device registration, the created connector will not be registered by the DRM core anymore. Fix this by registering it explicitly in such case. This fixes the following issue observed on Samsung Exynos4210-based Trats board with a DSI panel (the panel driver is registered after the Exynos DRM component device is bound): $ ./modetest -c -Mexynos could not get connector 56: No such file or directory Segmentation fault While touching this, move the connector reset() call also under the DRM device registered check, because otherwise it is not really needed. Fixes: 934aef885f9d ("drm: bridge: panel: Reset the connector state pointer") Signed-off-by: Marek Szyprowski Reviewed-by: Jagan Teki Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20220419091422.4255-1-m.szyprowski@samsung.com --- drivers/gpu/drm/bridge/panel.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index ff1c37b2e6e5..0ee563eb2b6f 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -83,8 +83,11 @@ static int panel_bridge_attach(struct drm_bridge *bridge, drm_connector_attach_encoder(&panel_bridge->connector, bridge->encoder); - if (connector->funcs->reset) - connector->funcs->reset(connector); + if (bridge->dev->registered) { + if (connector->funcs->reset) + connector->funcs->reset(connector); + drm_connector_register(connector); + } return 0; } From 8c1bfd0ccf904d80d2874062ef3fecdfddb2a48b Mon Sep 17 00:00:00 2001 From: Sandor Yu Date: Fri, 15 Apr 2022 10:42:47 +0800 Subject: [PATCH 095/219] drm: bridge: dw_hdmi: default enable workaround to clear the overflow i.MX8MPlus (v2.13a) has verified need the workaround to clear the overflow with one iteration. Only i.MX6Q(v1.30a) need the workaround with 4 iterations, the others versions later than v1.3a have been identified as needing the workaround with a single iteration. Default enable the workaround with one iteration for all versions later than v1.30a. Signed-off-by: Sandor Yu Acked-by: Neil Armstrong Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/561951005a85574dcdd108e5d6a3a87df930ea3d.1649989179.git.Sandor.yu@nxp.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index a563460f8d20..c11782fc1501 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2086,30 +2086,21 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) * then write one of the FC registers several times. * * The number of iterations matters and depends on the HDMI TX revision - * (and possibly on the platform). So far i.MX6Q (v1.30a), i.MX6DL - * (v1.31a) and multiple Allwinner SoCs (v1.32a) have been identified - * as needing the workaround, with 4 iterations for v1.30a and 1 - * iteration for others. - * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing - * the workaround with a single iteration. - * The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have - * been identified as needing the workaround with a single iteration. + * (and possibly on the platform). + * 4 iterations for i.MX6Q(v1.30a) and 1 iteration for others. + * i.MX6DL (v1.31a), Allwinner SoCs (v1.32a), Rockchip RK3288 SoC (v2.00a), + * Amlogic Meson GX SoCs (v2.01a), RK3328/RK3399 SoCs (v2.11a) + * and i.MX8MPlus (v2.13a) have been identified as needing the workaround + * with a single iteration. */ switch (hdmi->version) { case 0x130a: count = 4; break; - case 0x131a: - case 0x132a: - case 0x200a: - case 0x201a: - case 0x211a: - case 0x212a: + default: count = 1; break; - default: - return; } /* TMDS software reset */ From a90b8fc9ca2d5cae915a0a185785325095245ec3 Mon Sep 17 00:00:00 2001 From: Sandor Yu Date: Fri, 15 Apr 2022 10:42:48 +0800 Subject: [PATCH 096/219] drm: bridge: dw_hdmi: Enable GCP only for Deep Color HDMI1.4b specification section 6.5.3: Source shall only send GCPs with non-zero CD to sinks that indicate support for Deep Color. DW HDMI GCP default enabled, but only transmit CD and do not handle AVMUTE, PP norDefault_Phase (yet). Disable Auto GCP when 24-bit color for sinks that not support Deep Color. Signed-off-by: Sandor Yu Reviewed-by: Neil Armstrong Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/78fa41e4fb3d3d53354034bc221fcf870dbac617.1649989179.git.Sandor.yu@nxp.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 17 +++++++++++++++++ drivers/gpu/drm/bridge/synopsys/dw-hdmi.h | 3 +++ 2 files changed, 20 insertions(+) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index c11782fc1501..81f4c39fab85 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1108,6 +1108,8 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi) unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP; struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data; u8 val, vp_conf; + u8 clear_gcp_auto = 0; + if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format) || hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format) || @@ -1117,6 +1119,7 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi) case 8: color_depth = 4; output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS; + clear_gcp_auto = 1; break; case 10: color_depth = 5; @@ -1136,6 +1139,7 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi) case 0: case 8: remap_size = HDMI_VP_REMAP_YCC422_16bit; + clear_gcp_auto = 1; break; case 10: remap_size = HDMI_VP_REMAP_YCC422_20bit; @@ -1160,6 +1164,19 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi) HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK); hdmi_writeb(hdmi, val, HDMI_VP_PR_CD); + /* HDMI1.4b specification section 6.5.3: + * Source shall only send GCPs with non-zero CD to sinks + * that indicate support for Deep Color. + * GCP only transmit CD and do not handle AVMUTE, PP norDefault_Phase (yet). + * Disable Auto GCP when 24-bit color for sinks that not support Deep Color. + */ + val = hdmi_readb(hdmi, HDMI_FC_DATAUTO3); + if (clear_gcp_auto == 1) + val &= ~HDMI_FC_DATAUTO3_GCP_AUTO; + else + val |= HDMI_FC_DATAUTO3_GCP_AUTO; + hdmi_writeb(hdmi, val, HDMI_FC_DATAUTO3); + hdmi_modb(hdmi, HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE, HDMI_VP_STUFF_PR_STUFFING_MASK, HDMI_VP_STUFF); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h index 1999db05bc3b..18df3e119553 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h @@ -850,6 +850,9 @@ enum { HDMI_FC_DATAUTO0_VSD_MASK = 0x08, HDMI_FC_DATAUTO0_VSD_OFFSET = 3, +/* FC_DATAUTO3 field values */ + HDMI_FC_DATAUTO3_GCP_AUTO = 0x04, + /* PHY_CONF0 field values */ HDMI_PHY_CONF0_PDZ_MASK = 0x80, HDMI_PHY_CONF0_PDZ_OFFSET = 7, From 8fb241e2d265de7c1711635f3f2048f33e02b57d Mon Sep 17 00:00:00 2001 From: Sandor Yu Date: Fri, 15 Apr 2022 10:42:49 +0800 Subject: [PATCH 097/219] drm: bridge: dw_hdmi: add reset function for PHY GEN1 PHY reset register(MC_PHYRSTZ) active high reset control for PHY GEN2, and active low reset control for PHY GEN1. Rename function dw_hdmi_phy_reset to dw_hdmi_phy_gen2_reset. Add dw_hdmi_phy_gen1_reset function for PHY GEN1. Signed-off-by: Sandor Yu Reviewed-by: Neil Armstrong Acked-by: Jernej Skrabec Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/e0b3be2d63fe3e95246fb8b8b0dcd57415b29e04.1649989179.git.Sandor.yu@nxp.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 14 +++++++++++--- drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c | 2 +- include/drm/bridge/dw_hdmi.h | 4 +++- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 81f4c39fab85..3500b4b98297 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1374,13 +1374,21 @@ static void dw_hdmi_phy_sel_interface_control(struct dw_hdmi *hdmi, u8 enable) HDMI_PHY_CONF0_SELDIPIF_MASK); } -void dw_hdmi_phy_reset(struct dw_hdmi *hdmi) +void dw_hdmi_phy_gen1_reset(struct dw_hdmi *hdmi) +{ + /* PHY reset. The reset signal is active low on Gen1 PHYs. */ + hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ); + hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ); +} +EXPORT_SYMBOL_GPL(dw_hdmi_phy_gen1_reset); + +void dw_hdmi_phy_gen2_reset(struct dw_hdmi *hdmi) { /* PHY reset. The reset signal is active high on Gen2 PHYs. */ hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ); hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ); } -EXPORT_SYMBOL_GPL(dw_hdmi_phy_reset); +EXPORT_SYMBOL_GPL(dw_hdmi_phy_gen2_reset); void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address) { @@ -1534,7 +1542,7 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, if (phy->has_svsret) dw_hdmi_phy_enable_svsret(hdmi, 1); - dw_hdmi_phy_reset(hdmi); + dw_hdmi_phy_gen2_reset(hdmi); hdmi_writeb(hdmi, HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST); diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index 5e2b0175df36..2860e6bff8b7 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -135,7 +135,7 @@ static int sun8i_hdmi_phy_config_a83t(struct dw_hdmi *hdmi, dw_hdmi_phy_gen2_txpwron(hdmi, 0); dw_hdmi_phy_gen2_pddq(hdmi, 1); - dw_hdmi_phy_reset(hdmi); + dw_hdmi_phy_gen2_reset(hdmi); dw_hdmi_phy_gen2_pddq(hdmi, 0); diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index 2a1f85f9a8a3..70082f80a8c8 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -187,9 +187,11 @@ void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address); void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, unsigned char addr); +void dw_hdmi_phy_gen1_reset(struct dw_hdmi *hdmi); + void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable); void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable); -void dw_hdmi_phy_reset(struct dw_hdmi *hdmi); +void dw_hdmi_phy_gen2_reset(struct dw_hdmi *hdmi); enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi, void *data); From d970ce303ff80ae57bbd3e784f2772dbf3056e0c Mon Sep 17 00:00:00 2001 From: Sandor Yu Date: Fri, 15 Apr 2022 10:42:50 +0800 Subject: [PATCH 098/219] drm: bridge: dw_hdmi: Audio: Add General Parallel Audio (GPA) driver General Parallel Audio (GPA) interface is one of the supported audio interface for synopsys HDMI module, which has verified for i.MX8MPlus platform. This is initial version for GPA. Signed-off-by: Shengjiu Wang Signed-off-by: Sandor Yu Reviewed-by: Neil Armstrong Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/f21ba3e8c4d9d028ac74c6f3c588ddbffe739399.1649989179.git.Sandor.yu@nxp.com --- drivers/gpu/drm/bridge/synopsys/Kconfig | 10 + drivers/gpu/drm/bridge/synopsys/Makefile | 1 + .../drm/bridge/synopsys/dw-hdmi-gp-audio.c | 199 ++++++++++++++++++ drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 132 +++++++++++- drivers/gpu/drm/bridge/synopsys/dw-hdmi.h | 13 +- include/drm/bridge/dw_hdmi.h | 7 + 6 files changed, 358 insertions(+), 4 deletions(-) create mode 100644 drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 21a1be3ced0f..a4a31b669b65 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig @@ -25,6 +25,16 @@ config DRM_DW_HDMI_I2S_AUDIO Support the I2S Audio interface which is part of the Synopsys Designware HDMI block. +config DRM_DW_HDMI_GP_AUDIO + tristate "Synopsys Designware GP Audio interface" + depends on DRM_DW_HDMI && SND + select SND_PCM + select SND_PCM_ELD + select SND_PCM_IEC958 + help + Support the GP Audio interface which is part of the Synopsys + Designware HDMI block. + config DRM_DW_HDMI_CEC tristate "Synopsis Designware CEC interface" depends on DRM_DW_HDMI diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile index 91d746ad5de1..ce715562e9e5 100644 --- a/drivers/gpu/drm/bridge/synopsys/Makefile +++ b/drivers/gpu/drm/bridge/synopsys/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o +obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c new file mode 100644 index 000000000000..11ea1c84eb35 --- /dev/null +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * dw-hdmi-gp-audio.c + * + * Copyright 2020-2022 NXP + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw-hdmi-audio.h" + +#define DRIVER_NAME "dw-hdmi-gp-audio" +#define DRV_NAME "hdmi-gp-audio" + +struct snd_dw_hdmi { + struct dw_hdmi_audio_data data; + struct platform_device *audio_pdev; + unsigned int pos; +}; + +struct dw_hdmi_channel_conf { + u8 conf1; + u8 ca; +}; + +/* + * The default mapping of ALSA channels to HDMI channels and speaker + * allocation bits. Note that we can't do channel remapping here - + * channels must be in the same order. + * + * Mappings for alsa-lib pcm/surround*.conf files: + * + * Front Sur4.0 Sur4.1 Sur5.0 Sur5.1 Sur7.1 + * Channels 2 4 6 6 6 8 + * + * Our mapping from ALSA channel to CEA686D speaker name and HDMI channel: + * + * Number of ALSA channels + * ALSA Channel 2 3 4 5 6 7 8 + * 0 FL:0 = = = = = = + * 1 FR:1 = = = = = = + * 2 FC:3 RL:4 LFE:2 = = = + * 3 RR:5 RL:4 FC:3 = = + * 4 RR:5 RL:4 = = + * 5 RR:5 = = + * 6 RC:6 = + * 7 RLC/FRC RLC/FRC + */ +static struct dw_hdmi_channel_conf default_hdmi_channel_config[7] = { + { 0x03, 0x00 }, /* FL,FR */ + { 0x0b, 0x02 }, /* FL,FR,FC */ + { 0x33, 0x08 }, /* FL,FR,RL,RR */ + { 0x37, 0x09 }, /* FL,FR,LFE,RL,RR */ + { 0x3f, 0x0b }, /* FL,FR,LFE,FC,RL,RR */ + { 0x7f, 0x0f }, /* FL,FR,LFE,FC,RL,RR,RC */ + { 0xff, 0x13 }, /* FL,FR,LFE,FC,RL,RR,[FR]RC,[FR]LC */ +}; + +static int audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct snd_dw_hdmi *dw = dev_get_drvdata(dev); + int ret = 0; + u8 ca; + + dw_hdmi_set_sample_rate(dw->data.hdmi, params->sample_rate); + + ca = default_hdmi_channel_config[params->channels - 2].ca; + + dw_hdmi_set_channel_count(dw->data.hdmi, params->channels); + dw_hdmi_set_channel_allocation(dw->data.hdmi, ca); + + dw_hdmi_set_sample_non_pcm(dw->data.hdmi, + params->iec.status[0] & IEC958_AES0_NONAUDIO); + dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width); + + return ret; +} + +static void audio_shutdown(struct device *dev, void *data) +{ +} + +static int audio_mute_stream(struct device *dev, void *data, + bool enable, int direction) +{ + struct snd_dw_hdmi *dw = dev_get_drvdata(dev); + int ret = 0; + + if (!enable) + dw_hdmi_audio_enable(dw->data.hdmi); + else + dw_hdmi_audio_disable(dw->data.hdmi); + + return ret; +} + +static int audio_get_eld(struct device *dev, void *data, + u8 *buf, size_t len) +{ + struct dw_hdmi_audio_data *audio = data; + u8 *eld; + + eld = audio->get_eld(audio->hdmi); + if (eld) + memcpy(buf, eld, min_t(size_t, MAX_ELD_BYTES, len)); + else + /* Pass en empty ELD if connector not available */ + memset(buf, 0, len); + + return 0; +} + +static int audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct snd_dw_hdmi *dw = dev_get_drvdata(dev); + + return dw_hdmi_set_plugged_cb(dw->data.hdmi, fn, codec_dev); +} + +static const struct hdmi_codec_ops audio_codec_ops = { + .hw_params = audio_hw_params, + .audio_shutdown = audio_shutdown, + .mute_stream = audio_mute_stream, + .get_eld = audio_get_eld, + .hook_plugged_cb = audio_hook_plugged_cb, +}; + +static int snd_dw_hdmi_probe(struct platform_device *pdev) +{ + struct dw_hdmi_audio_data *data = pdev->dev.platform_data; + struct snd_dw_hdmi *dw; + + const struct hdmi_codec_pdata codec_data = { + .i2s = 1, + .spdif = 0, + .ops = &audio_codec_ops, + .max_i2s_channels = 8, + .data = data, + }; + + dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL); + if (!dw) + return -ENOMEM; + + dw->data = *data; + + platform_set_drvdata(pdev, dw); + + dw->audio_pdev = platform_device_register_data(&pdev->dev, + HDMI_CODEC_DRV_NAME, 1, + &codec_data, + sizeof(codec_data)); + + return PTR_ERR_OR_ZERO(dw->audio_pdev); +} + +static int snd_dw_hdmi_remove(struct platform_device *pdev) +{ + struct snd_dw_hdmi *dw = platform_get_drvdata(pdev); + + platform_device_unregister(dw->audio_pdev); + + return 0; +} + +static struct platform_driver snd_dw_hdmi_driver = { + .probe = snd_dw_hdmi_probe, + .remove = snd_dw_hdmi_remove, + .driver = { + .name = DRIVER_NAME, + }, +}; + +module_platform_driver(snd_dw_hdmi_driver); + +MODULE_AUTHOR("Shengjiu Wang "); +MODULE_DESCRIPTION("Synopsys Designware HDMI GPA ALSA interface"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 3500b4b98297..f3f82a04c252 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -191,7 +191,10 @@ struct dw_hdmi { spinlock_t audio_lock; struct mutex audio_mutex; + unsigned int sample_non_pcm; + unsigned int sample_width; unsigned int sample_rate; + unsigned int channels; unsigned int audio_cts; unsigned int audio_n; bool audio_enable; @@ -589,6 +592,8 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk) n = 4096; else if (pixel_clk == 74176000 || pixel_clk == 148352000) n = 11648; + else if (pixel_clk == 297000000) + n = 3072; else n = 4096; n *= mult; @@ -601,6 +606,8 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk) n = 17836; else if (pixel_clk == 148352000) n = 8918; + else if (pixel_clk == 297000000) + n = 4704; else n = 6272; n *= mult; @@ -615,6 +622,8 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk) n = 11648; else if (pixel_clk == 148352000) n = 5824; + else if (pixel_clk == 297000000) + n = 5120; else n = 6144; n *= mult; @@ -659,8 +668,8 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi, config3 = hdmi_readb(hdmi, HDMI_CONFIG3_ID); - /* Only compute CTS when using internal AHB audio */ - if (config3 & HDMI_CONFIG3_AHBAUDDMA) { + /* Compute CTS when using internal AHB audio or General Parallel audio*/ + if ((config3 & HDMI_CONFIG3_AHBAUDDMA) || (config3 & HDMI_CONFIG3_GPAUD)) { /* * Compute the CTS value from the N value. Note that CTS and N * can be up to 20 bits in total, so we need 64-bit math. Also @@ -702,6 +711,22 @@ static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi) mutex_unlock(&hdmi->audio_mutex); } +void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width) +{ + mutex_lock(&hdmi->audio_mutex); + hdmi->sample_width = width; + mutex_unlock(&hdmi->audio_mutex); +} +EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_width); + +void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm) +{ + mutex_lock(&hdmi->audio_mutex); + hdmi->sample_non_pcm = non_pcm; + mutex_unlock(&hdmi->audio_mutex); +} +EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_non_pcm); + void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate) { mutex_lock(&hdmi->audio_mutex); @@ -717,6 +742,7 @@ void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt) u8 layout; mutex_lock(&hdmi->audio_mutex); + hdmi->channels = cnt; /* * For >2 channel PCM audio, we need to select layout 1 @@ -765,6 +791,89 @@ static u8 *hdmi_audio_get_eld(struct dw_hdmi *hdmi) return hdmi->curr_conn->eld; } +static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi) +{ + const struct dw_hdmi_plat_data *pdata = hdmi->plat_data; + int sample_freq = 0x2, org_sample_freq = 0xD; + int ch_mask = BIT(hdmi->channels) - 1; + + switch (hdmi->sample_rate) { + case 32000: + sample_freq = 0x03; + org_sample_freq = 0x0C; + break; + case 44100: + sample_freq = 0x00; + org_sample_freq = 0x0F; + break; + case 48000: + sample_freq = 0x02; + org_sample_freq = 0x0D; + break; + case 88200: + sample_freq = 0x08; + org_sample_freq = 0x07; + break; + case 96000: + sample_freq = 0x0A; + org_sample_freq = 0x05; + break; + case 176400: + sample_freq = 0x0C; + org_sample_freq = 0x03; + break; + case 192000: + sample_freq = 0x0E; + org_sample_freq = 0x01; + break; + default: + break; + } + + hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n); + hdmi_enable_audio_clk(hdmi, true); + + hdmi_writeb(hdmi, 0x1, HDMI_FC_AUDSCHNLS0); + hdmi_writeb(hdmi, hdmi->channels, HDMI_FC_AUDSCHNLS2); + hdmi_writeb(hdmi, 0x22, HDMI_FC_AUDSCHNLS3); + hdmi_writeb(hdmi, 0x22, HDMI_FC_AUDSCHNLS4); + hdmi_writeb(hdmi, 0x11, HDMI_FC_AUDSCHNLS5); + hdmi_writeb(hdmi, 0x11, HDMI_FC_AUDSCHNLS6); + hdmi_writeb(hdmi, (0x3 << 4) | sample_freq, HDMI_FC_AUDSCHNLS7); + hdmi_writeb(hdmi, (org_sample_freq << 4) | 0xb, HDMI_FC_AUDSCHNLS8); + + hdmi_writeb(hdmi, ch_mask, HDMI_GP_CONF1); + hdmi_writeb(hdmi, 0x02, HDMI_GP_CONF2); + hdmi_writeb(hdmi, 0x01, HDMI_GP_CONF0); + + hdmi_modb(hdmi, 0x3, 0x3, HDMI_FC_DATAUTO3); + + /* hbr */ + if (hdmi->sample_rate == 192000 && hdmi->channels == 8 && + hdmi->sample_width == 32 && hdmi->sample_non_pcm) + hdmi_modb(hdmi, 0x01, 0x01, HDMI_GP_CONF2); + + if (pdata->enable_audio) + pdata->enable_audio(hdmi, + hdmi->channels, + hdmi->sample_width, + hdmi->sample_rate, + hdmi->sample_non_pcm); +} + +static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi) +{ + const struct dw_hdmi_plat_data *pdata = hdmi->plat_data; + + hdmi_set_cts_n(hdmi, hdmi->audio_cts, 0); + + hdmi_modb(hdmi, 0, 0x3, HDMI_FC_DATAUTO3); + if (pdata->disable_audio) + pdata->disable_audio(hdmi); + + hdmi_enable_audio_clk(hdmi, false); +} + static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi) { hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n); @@ -3258,6 +3367,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, hdmi->plat_data = plat_data; hdmi->dev = dev; hdmi->sample_rate = 48000; + hdmi->channels = 2; hdmi->disabled = true; hdmi->rxsense = true; hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE); @@ -3481,6 +3591,24 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, pdevinfo.size_data = sizeof(audio); pdevinfo.dma_mask = DMA_BIT_MASK(32); hdmi->audio = platform_device_register_full(&pdevinfo); + } else if (iores && config3 & HDMI_CONFIG3_GPAUD) { + struct dw_hdmi_audio_data audio; + + audio.phys = iores->start; + audio.base = hdmi->regs; + audio.irq = irq; + audio.hdmi = hdmi; + audio.get_eld = hdmi_audio_get_eld; + + hdmi->enable_audio = dw_hdmi_gp_audio_enable; + hdmi->disable_audio = dw_hdmi_gp_audio_disable; + + pdevinfo.name = "dw-hdmi-gp-audio"; + pdevinfo.id = PLATFORM_DEVID_NONE; + pdevinfo.data = &audio; + pdevinfo.size_data = sizeof(audio); + pdevinfo.dma_mask = DMA_BIT_MASK(32); + hdmi->audio = platform_device_register_full(&pdevinfo); } if (!plat_data->disable_cec && (config0 & HDMI_CONFIG0_CEC)) { diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h index 18df3e119553..af43a0414b78 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h @@ -158,8 +158,17 @@ #define HDMI_FC_SPDDEVICEINF 0x1062 #define HDMI_FC_AUDSCONF 0x1063 #define HDMI_FC_AUDSSTAT 0x1064 -#define HDMI_FC_AUDSCHNLS7 0x106e -#define HDMI_FC_AUDSCHNLS8 0x106f +#define HDMI_FC_AUDSV 0x1065 +#define HDMI_FC_AUDSU 0x1066 +#define HDMI_FC_AUDSCHNLS0 0x1067 +#define HDMI_FC_AUDSCHNLS1 0x1068 +#define HDMI_FC_AUDSCHNLS2 0x1069 +#define HDMI_FC_AUDSCHNLS3 0x106A +#define HDMI_FC_AUDSCHNLS4 0x106B +#define HDMI_FC_AUDSCHNLS5 0x106C +#define HDMI_FC_AUDSCHNLS6 0x106D +#define HDMI_FC_AUDSCHNLS7 0x106E +#define HDMI_FC_AUDSCHNLS8 0x106F #define HDMI_FC_DATACH0FILL 0x1070 #define HDMI_FC_DATACH1FILL 0x1071 #define HDMI_FC_DATACH2FILL 0x1072 diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index 70082f80a8c8..f668e75fbabe 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -143,6 +143,11 @@ struct dw_hdmi_plat_data { const struct drm_display_info *info, const struct drm_display_mode *mode); + /* Platform-specific audio enable/disable (optional) */ + void (*enable_audio)(struct dw_hdmi *hdmi, int channel, + int width, int rate, int non_pcm); + void (*disable_audio)(struct dw_hdmi *hdmi); + /* Vendor PHY support */ const struct dw_hdmi_phy_ops *phy_ops; const char *phy_name; @@ -173,6 +178,8 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn, struct device *codec_dev); +void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm); +void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width); void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt); void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status); From fb8da7f3111ab500606960bef1bb32450c664750 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=ADcolas=20F=2E=20R=2E=20A=2E=20Prado?= Date: Thu, 7 Apr 2022 21:30:34 -0400 Subject: [PATCH 099/219] drm/bridge: anx7625: Use uint8 for lane-swing arrays MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As defined in the anx7625 dt-binding, the analogix,lane0-swing and analogix,lane1-swing properties are uint8 arrays. Yet, the driver was reading the array as if it were of uint32 and masking to 8-bit before writing to the registers. This means that a devicetree written in accordance to the dt-binding would have its values incorrectly parsed. Fix the issue by reading the array as uint8 and storing them as uint8 internally, so that we can also drop the masking when writing the registers. Fixes: fd0310b6fe7d ("drm/bridge: anx7625: add MIPI DPI input feature") Signed-off-by: Nícolas F. R. A. Prado Reviewed-by: AngeloGioacchino Del Regno Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20220408013034.673418-1-nfraprado@collabora.com --- drivers/gpu/drm/bridge/analogix/anx7625.c | 12 ++++++------ drivers/gpu/drm/bridge/analogix/anx7625.h | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index f2bc30c98c77..376da01243a3 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -1486,12 +1486,12 @@ static void anx7625_dp_adjust_swing(struct anx7625_data *ctx) for (i = 0; i < ctx->pdata.dp_lane0_swing_reg_cnt; i++) anx7625_reg_write(ctx, ctx->i2c.tx_p1_client, DP_TX_LANE0_SWING_REG0 + i, - ctx->pdata.lane0_reg_data[i] & 0xFF); + ctx->pdata.lane0_reg_data[i]); for (i = 0; i < ctx->pdata.dp_lane1_swing_reg_cnt; i++) anx7625_reg_write(ctx, ctx->i2c.tx_p1_client, DP_TX_LANE1_SWING_REG0 + i, - ctx->pdata.lane1_reg_data[i] & 0xFF); + ctx->pdata.lane1_reg_data[i]); } static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on) @@ -1598,8 +1598,8 @@ static int anx7625_get_swing_setting(struct device *dev, num_regs = DP_TX_SWING_REG_CNT; pdata->dp_lane0_swing_reg_cnt = num_regs; - of_property_read_u32_array(dev->of_node, "analogix,lane0-swing", - pdata->lane0_reg_data, num_regs); + of_property_read_u8_array(dev->of_node, "analogix,lane0-swing", + pdata->lane0_reg_data, num_regs); } if (of_get_property(dev->of_node, @@ -1608,8 +1608,8 @@ static int anx7625_get_swing_setting(struct device *dev, num_regs = DP_TX_SWING_REG_CNT; pdata->dp_lane1_swing_reg_cnt = num_regs; - of_property_read_u32_array(dev->of_node, "analogix,lane1-swing", - pdata->lane1_reg_data, num_regs); + of_property_read_u8_array(dev->of_node, "analogix,lane1-swing", + pdata->lane1_reg_data, num_regs); } return 0; diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h index edbbfe410a56..e257a84db962 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.h +++ b/drivers/gpu/drm/bridge/analogix/anx7625.h @@ -426,9 +426,9 @@ struct anx7625_platform_data { int mipi_lanes; int audio_en; int dp_lane0_swing_reg_cnt; - int lane0_reg_data[DP_TX_SWING_REG_CNT]; + u8 lane0_reg_data[DP_TX_SWING_REG_CNT]; int dp_lane1_swing_reg_cnt; - int lane1_reg_data[DP_TX_SWING_REG_CNT]; + u8 lane1_reg_data[DP_TX_SWING_REG_CNT]; u32 low_power_mode; struct device_node *mipi_host_node; }; From 29d699a4c006940cf06f647ed02cc083234e8e33 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 7 Apr 2022 20:56:16 +0200 Subject: [PATCH 100/219] dt-bindings: display: bridge: icn6211: Document DSI data-lanes property It is necessary to specify the number of connected/used DSI data lanes when using the DSI input port of this bridge. Document the 'data-lanes' property of the DSI input port. Signed-off-by: Marek Vasut Cc: Jagan Teki Cc: Laurent Pinchart Cc: Maxime Ripard Cc: Rob Herring Cc: Robert Foss Cc: Sam Ravnborg Cc: Thomas Zimmermann Cc: devicetree@vger.kernel.org To: dri-devel@lists.freedesktop.org Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20220407185617.179573-1-marex@denx.de --- .../display/bridge/chipone,icn6211.yaml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/display/bridge/chipone,icn6211.yaml b/Documentation/devicetree/bindings/display/bridge/chipone,icn6211.yaml index 7257fd0ae4da..4f0b7c71313c 100644 --- a/Documentation/devicetree/bindings/display/bridge/chipone,icn6211.yaml +++ b/Documentation/devicetree/bindings/display/bridge/chipone,icn6211.yaml @@ -41,10 +41,26 @@ properties: properties: port@0: - $ref: /schemas/graph.yaml#/properties/port + $ref: /schemas/graph.yaml#/$defs/port-base + unevaluatedProperties: false description: Video port for MIPI DSI input + properties: + endpoint: + $ref: /schemas/media/video-interfaces.yaml# + unevaluatedProperties: false + + properties: + data-lanes: + description: array of physical DSI data lane indexes. + minItems: 1 + items: + - const: 1 + - const: 2 + - const: 3 + - const: 4 + port@1: $ref: /schemas/graph.yaml#/properties/port description: From 4ab85930b7183eaabdaffbcecd89c12e2aca071a Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 7 Apr 2022 20:56:17 +0200 Subject: [PATCH 101/219] drm: bridge: icn6211: Add DSI lane count DT property parsing The driver currently hard-codes DSI lane count to two, however the chip is capable of operating in 1..4 DSI lanes mode. Parse 'data-lanes' DT property and program the result into DSI_CTRL register. Signed-off-by: Marek Vasut Cc: Jagan Teki Cc: Laurent Pinchart Cc: Maxime Ripard Cc: Robert Foss Cc: Sam Ravnborg Cc: Thomas Zimmermann To: dri-devel@lists.freedesktop.org Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20220407185617.179573-2-marex@denx.de --- drivers/gpu/drm/bridge/chipone-icn6211.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c index 6ce0633d4c08..47dea657a752 100644 --- a/drivers/gpu/drm/bridge/chipone-icn6211.c +++ b/drivers/gpu/drm/bridge/chipone-icn6211.c @@ -395,6 +395,11 @@ static void chipone_atomic_enable(struct drm_bridge *bridge, /* dsi specific sequence */ chipone_writeb(icn, SYNC_EVENT_DLY, 0x80); chipone_writeb(icn, HFP_MIN, hfp & 0xff); + + /* DSI data lane count */ + chipone_writeb(icn, DSI_CTRL, + DSI_CTRL_UNKNOWN | DSI_CTRL_DSI_LANES(icn->dsi->lanes - 1)); + chipone_writeb(icn, MIPI_PD_CK_LANE, 0xa0); chipone_writeb(icn, PLL_CTRL(12), 0xff); chipone_writeb(icn, MIPI_PN_SWAP, 0x00); @@ -480,9 +485,23 @@ static void chipone_mode_set(struct drm_bridge *bridge, static int chipone_dsi_attach(struct chipone *icn) { struct mipi_dsi_device *dsi = icn->dsi; - int ret; + struct device *dev = icn->dev; + struct device_node *endpoint; + int dsi_lanes, ret; + + endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0); + dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes"); + of_node_put(endpoint); + + /* + * If the 'data-lanes' property does not exist in DT or is invalid, + * default to previously hard-coded behavior, which was 4 data lanes. + */ + if (dsi_lanes >= 1 && dsi_lanes <= 4) + icn->dsi->lanes = dsi_lanes; + else + icn->dsi->lanes = 4; - dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; From 72c3c8d6e5275b19fd2d32ec787e8135a421c7ec Mon Sep 17 00:00:00 2001 From: Matt Atwood Date: Mon, 18 Apr 2022 11:51:57 +0530 Subject: [PATCH 102/219] drm/i915/rpl-p: Add PCI IDs Adding initial PCI ids for RPL-P. RPL-P behaves identically to ADL-P from i915's point of view. Changes since V1 : - SUBPLATFORM ADL_N and RPL_P clash as both are ADLP based - Matthew R Bspec: 55376 Signed-off-by: Matt Atwood Signed-off-by: Madhumitha Tolakanahalli Pradeep Signed-off-by: Tejas Upadhyay [mattrope: Corrected comment formatting to match coding style] Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20220418062157.2974665-1-tejaskumarx.surendrakumar.upadhyay@intel.com --- arch/x86/kernel/early-quirks.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 4 +++- drivers/gpu/drm/i915/i915_pci.c | 1 + drivers/gpu/drm/i915/intel_device_info.c | 9 +++++---- drivers/gpu/drm/i915/intel_device_info.h | 11 ++++++++--- include/drm/i915_pciids.h | 9 +++++++++ 6 files changed, 27 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 805596736e20..a6c1867fc7aa 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -558,6 +558,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_ADLP_IDS(&gen11_early_ops), INTEL_ADLN_IDS(&gen11_early_ops), INTEL_RPLS_IDS(&gen11_early_ops), + INTEL_RPLP_IDS(&gen11_early_ops), }; struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ce2cd6491d6d..3711d618a372 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1054,9 +1054,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_DG2_G12(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12) #define IS_ADLS_RPLS(dev_priv) \ - IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S) + IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL) #define IS_ADLP_N(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N) +#define IS_ADLP_RPLP(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 9e077929ed67..f59e526b03fc 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -1151,6 +1151,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_ADLN_IDS(&adl_p_info), INTEL_DG1_IDS(&dg1_info), INTEL_RPLS_IDS(&adl_s_info), + INTEL_RPLP_IDS(&adl_p_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 41a5b98d1342..74c3ffb66b8d 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -181,8 +181,9 @@ static const u16 subplatform_n_ids[] = { INTEL_ADLN_IDS(0), }; -static const u16 subplatform_rpls_ids[] = { +static const u16 subplatform_rpl_ids[] = { INTEL_RPLS_IDS(0), + INTEL_RPLP_IDS(0), }; static bool find_devid(u16 id, const u16 *p, unsigned int num) @@ -227,9 +228,9 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915) } else if (find_devid(devid, subplatform_n_ids, ARRAY_SIZE(subplatform_n_ids))) { mask = BIT(INTEL_SUBPLATFORM_N); - } else if (find_devid(devid, subplatform_rpls_ids, - ARRAY_SIZE(subplatform_rpls_ids))) { - mask = BIT(INTEL_SUBPLATFORM_RPL_S); + } else if (find_devid(devid, subplatform_rpl_ids, + ARRAY_SIZE(subplatform_rpl_ids))) { + mask = BIT(INTEL_SUBPLATFORM_RPL); } GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index f9b955810593..4053efaa55da 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -114,11 +114,16 @@ enum intel_platform { #define INTEL_SUBPLATFORM_G11 1 #define INTEL_SUBPLATFORM_G12 2 -/* ADL-S */ -#define INTEL_SUBPLATFORM_RPL_S 0 +/* ADL */ +#define INTEL_SUBPLATFORM_RPL 0 /* ADL-P */ -#define INTEL_SUBPLATFORM_N 0 +/* + * As #define INTEL_SUBPLATFORM_RPL 0 will apply + * here too, SUBPLATFORM_N will have different + * bit set + */ +#define INTEL_SUBPLATFORM_N 1 enum intel_ppgtt_type { INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE, diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 0800dc8b98b3..a7b5eea7ffaa 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -683,4 +683,13 @@ INTEL_VGA_DEVICE(0xA78A, info), \ INTEL_VGA_DEVICE(0xA78B, info) +/* RPL-P */ +#define INTEL_RPLP_IDS(info) \ + INTEL_VGA_DEVICE(0xA720, info), \ + INTEL_VGA_DEVICE(0xA721, info), \ + INTEL_VGA_DEVICE(0xA7A0, info), \ + INTEL_VGA_DEVICE(0xA7A1, info), \ + INTEL_VGA_DEVICE(0xA7A8, info), \ + INTEL_VGA_DEVICE(0xA7A9, info) + #endif /* _I915_PCIIDS_H */ From ac2f033aa4fbc94a512e703a953ed36e1bb45d0a Mon Sep 17 00:00:00 2001 From: Xiaomeng Tong Date: Wed, 13 Apr 2022 13:11:05 +0800 Subject: [PATCH 103/219] drm/gma500: fix a potential repeat execution in psb_driver_load Instead of exiting the loop as expected when an entry is found, the list_for_each_entry() continues until the traversal is complete. To avoid potential executing 'ret = gma_backlight_init(dev);' repeatly, goto outside the loop when found entry by replacing switch/case with if statement. Signed-off-by: Xiaomeng Tong Signed-off-by: Patrik Jakobsson [Fixed indentation] Link: https://patchwork.freedesktop.org/patch/msgid/20220413051105.5612-1-xiam0nd.tong@gmail.com --- drivers/gpu/drm/gma500/psb_drv.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 2aff54d505e2..1d8744f3e702 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -396,9 +396,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) drm_for_each_connector_iter(connector, &conn_iter) { gma_encoder = gma_attached_encoder(connector); - switch (gma_encoder->type) { - case INTEL_OUTPUT_LVDS: - case INTEL_OUTPUT_MIPI: + if (gma_encoder->type == INTEL_OUTPUT_LVDS || + gma_encoder->type == INTEL_OUTPUT_MIPI) { ret = gma_backlight_init(dev); break; } From 52b1b46c39ae4321459a1a28dd4e596497b375b8 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 19 Apr 2022 12:04:04 +0200 Subject: [PATCH 104/219] of: Create platform devices for OF framebuffers Create a platform device for each OF-declared framebuffer and have offb bind to these devices. Allows for real hot-unplugging and other drivers besides offb. Originally, offb created framebuffer devices while initializing its module by parsing the OF device tree. No actual Linux device was set up. This tied OF framebuffers to offb and makes writing other drivers for the OF framebuffers complicated. The absence of a Linux device further prevented real hot-unplugging. Adding a distinct platform device for each OF framebuffer solves both problems. Specifically, a DRM driver can now provide graphics output for modern userspace. Some of the offb init code is now located in the OF initialization. There's now also an implementation of of_platform_default_populate_init(), which was missing before. The OF side creates different devices for either OF display nodes or BootX displays as they require different handling by the driver. The offb drivers picks up each type of device and runs the appropriate fbdev initialization. Tested with OF display nodes on qemu's ppc64le target. v3: * declare variable 'node' with function scope (Rob) v2: * run PPC code as part of existing initialization (Rob) * add a few more error warnings (Javier) Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Reviewed-by: Rob Herring Link: https://patchwork.freedesktop.org/patch/msgid/20220419100405.12600-2-tzimmermann@suse.de --- drivers/of/platform.c | 83 +++++++++++++++++++++++------- drivers/video/fbdev/offb.c | 102 ++++++++++++++++++++++++------------- 2 files changed, 131 insertions(+), 54 deletions(-) diff --git a/drivers/of/platform.c b/drivers/of/platform.c index a16b74f32aa9..176ed71d20c0 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -507,7 +507,6 @@ int of_platform_default_populate(struct device_node *root, } EXPORT_SYMBOL_GPL(of_platform_default_populate); -#ifndef CONFIG_PPC static const struct of_device_id reserved_mem_matches[] = { { .compatible = "qcom,rmtfs-mem" }, { .compatible = "qcom,cmd-db" }, @@ -527,27 +526,74 @@ static int __init of_platform_default_populate_init(void) if (!of_have_populated_dt()) return -ENODEV; - /* - * Handle certain compatibles explicitly, since we don't want to create - * platform_devices for every node in /reserved-memory with a - * "compatible", - */ - for_each_matching_node(node, reserved_mem_matches) + if (IS_ENABLED(CONFIG_PPC)) { + struct device_node *boot_display = NULL; + struct platform_device *dev; + int ret; + + /* Check if we have a MacOS display without a node spec */ + if (of_get_property(of_chosen, "linux,bootx-noscreen", NULL)) { + /* + * The old code tried to work out which node was the MacOS + * display based on the address. I'm dropping that since the + * lack of a node spec only happens with old BootX versions + * (users can update) and with this code, they'll still get + * a display (just not the palette hacks). + */ + dev = platform_device_alloc("bootx-noscreen", 0); + if (WARN_ON(!dev)) + return -ENOMEM; + ret = platform_device_add(dev); + if (WARN_ON(ret)) { + platform_device_put(dev); + return ret; + } + } + + /* + * For OF framebuffers, first create the device for the boot display, + * then for the other framebuffers. Only fail for the boot display; + * ignore errors for the rest. + */ + for_each_node_by_type(node, "display") { + if (!of_get_property(node, "linux,opened", NULL) || + !of_get_property(node, "linux,boot-display", NULL)) + continue; + dev = of_platform_device_create(node, "of-display", NULL); + if (WARN_ON(!dev)) + return -ENOMEM; + boot_display = node; + break; + } + for_each_node_by_type(node, "display") { + if (!of_get_property(node, "linux,opened", NULL) || node == boot_display) + continue; + of_platform_device_create(node, "of-display", NULL); + } + + } else { + /* + * Handle certain compatibles explicitly, since we don't want to create + * platform_devices for every node in /reserved-memory with a + * "compatible", + */ + for_each_matching_node(node, reserved_mem_matches) + of_platform_device_create(node, NULL, NULL); + + node = of_find_node_by_path("/firmware"); + if (node) { + of_platform_populate(node, NULL, NULL, NULL); + of_node_put(node); + } + + node = of_get_compatible_child(of_chosen, "simple-framebuffer"); of_platform_device_create(node, NULL, NULL); - - node = of_find_node_by_path("/firmware"); - if (node) { - of_platform_populate(node, NULL, NULL, NULL); of_node_put(node); + + /* Populate everything else. */ + of_platform_default_populate(NULL, NULL, NULL); } - node = of_get_compatible_child(of_chosen, "simple-framebuffer"); - of_platform_device_create(node, NULL, NULL); - of_node_put(node); - - /* Populate everything else. */ - of_platform_default_populate(NULL, NULL, NULL); - return 0; } arch_initcall_sync(of_platform_default_populate_init); @@ -558,7 +604,6 @@ static int __init of_platform_sync_state_init(void) return 0; } late_initcall_sync(of_platform_sync_state_init); -#endif int of_platform_device_destroy(struct device *dev, void *data) { diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index afdb6aa48add..b1acb1ebebe9 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -386,10 +386,10 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR; } -static void __init offb_init_fb(const char *name, - int width, int height, int depth, - int pitch, unsigned long address, - int foreign_endian, struct device_node *dp) +static void offb_init_fb(struct platform_device *parent, const char *name, + int width, int height, int depth, + int pitch, unsigned long address, + int foreign_endian, struct device_node *dp) { unsigned long res_size = pitch * height; struct offb_par *par = &default_par; @@ -410,12 +410,13 @@ static void __init offb_init_fb(const char *name, return; } - info = framebuffer_alloc(sizeof(u32) * 16, NULL); + info = framebuffer_alloc(sizeof(u32) * 16, &parent->dev); if (!info) { release_mem_region(res_start, res_size); return; } + platform_set_drvdata(parent, info); fix = &info->fix; var = &info->var; @@ -535,7 +536,8 @@ out_aper: } -static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) +static void offb_init_nodriver(struct platform_device *parent, struct device_node *dp, + int no_real_node) { unsigned int len; int i, width = 640, height = 480, depth = 8, pitch = 640; @@ -650,46 +652,76 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) /* kludge for valkyrie */ if (of_node_name_eq(dp, "valkyrie")) address += 0x1000; - offb_init_fb(no_real_node ? "bootx" : NULL, + offb_init_fb(parent, no_real_node ? "bootx" : NULL, width, height, depth, pitch, address, foreign_endian, no_real_node ? NULL : dp); } } -static int __init offb_init(void) +static int offb_remove(struct platform_device *pdev) { - struct device_node *dp = NULL, *boot_disp = NULL; + struct fb_info *info = platform_get_drvdata(pdev); - if (fb_get_options("offb", NULL)) - return -ENODEV; - - /* Check if we have a MacOS display without a node spec */ - if (of_get_property(of_chosen, "linux,bootx-noscreen", NULL) != NULL) { - /* The old code tried to work out which node was the MacOS - * display based on the address. I'm dropping that since the - * lack of a node spec only happens with old BootX versions - * (users can update) and with this code, they'll still get - * a display (just not the palette hacks). - */ - offb_init_nodriver(of_chosen, 1); - } - - for_each_node_by_type(dp, "display") { - if (of_get_property(dp, "linux,opened", NULL) && - of_get_property(dp, "linux,boot-display", NULL)) { - boot_disp = dp; - offb_init_nodriver(dp, 0); - } - } - for_each_node_by_type(dp, "display") { - if (of_get_property(dp, "linux,opened", NULL) && - dp != boot_disp) - offb_init_nodriver(dp, 0); - } + if (info) + unregister_framebuffer(info); return 0; } +static int offb_probe_bootx_noscreen(struct platform_device *pdev) +{ + offb_init_nodriver(pdev, of_chosen, 1); + return 0; +} + +static struct platform_driver offb_driver_bootx_noscreen = { + .driver = { + .name = "bootx-noscreen", + }, + .probe = offb_probe_bootx_noscreen, + .remove = offb_remove, +}; + +static int offb_probe_display(struct platform_device *pdev) +{ + offb_init_nodriver(pdev, pdev->dev.of_node, 0); + + return 0; +} + +static const struct of_device_id offb_of_match_display[] = { + { .compatible = "display", }, + { }, +}; +MODULE_DEVICE_TABLE(of, offb_of_match_display); + +static struct platform_driver offb_driver_display = { + .driver = { + .name = "of-display", + .of_match_table = offb_of_match_display, + }, + .probe = offb_probe_display, + .remove = offb_remove, +}; + +static int __init offb_init(void) +{ + if (fb_get_options("offb", NULL)) + return -ENODEV; + + platform_driver_register(&offb_driver_bootx_noscreen); + platform_driver_register(&offb_driver_display); + + return 0; +} module_init(offb_init); + +static void __exit offb_exit(void) +{ + platform_driver_unregister(&offb_driver_display); + platform_driver_unregister(&offb_driver_bootx_noscreen); +} +module_exit(offb_exit); + MODULE_LICENSE("GPL"); From b76ecff8317eed4818fdd449bacc81e05534f88b Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 19 Apr 2022 12:04:05 +0200 Subject: [PATCH 105/219] fbdev: Warn in hot-unplug workaround for framebuffers without device A workaround makes fbdev hot-unplugging work for framebuffers without device. The only user for this feature was offb. As each OF framebuffer now has an associated platform device, the workaround hould no longer be triggered. Update it with a warning and rewrite the comment. Fbdev drivers that trigger the hot-unplug workaround really need to be fixed. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Suggested-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20220419100405.12600-3-tzimmermann@suse.de --- drivers/video/fbdev/core/fbmem.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index bc6ed750e915..84427470367b 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1577,14 +1577,12 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a, * allocate the memory range. * * If it's not a platform device, at least print a warning. A - * fix would add code to remove the device from the system. + * fix would add code to remove the device from the system. For + * framebuffers without any Linux device, print a warning as + * well. */ if (!device) { - /* TODO: Represent each OF framebuffer as its own - * device in the device hierarchy. For now, offb - * doesn't have such a device, so unregister the - * framebuffer as before without warning. - */ + pr_warn("fb%d: no device set\n", i); do_unregister_framebuffer(registered_fb[i]); } else if (dev_is_platform(device)) { registered_fb[i]->forced_out = true; From 681f8a5c6e372dbfd2a313ace417e7749543de1d Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 18 Apr 2022 17:09:36 +0200 Subject: [PATCH 106/219] drm/i915: Fix DISP_POS_Y and DISP_HEIGHT defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 428cb15d5b00 ("drm/i915: Clean up pre-skl primary plane registers") introduced DISP_POS_Y and DISP_HEIGHT defines but accidentally set these their masks to REG_GENMASK(31, 0) instead of REG_GENMASK(31, 16). This breaks the primary display pane on at least pineview machines, fix the mask to fix the primary display pane only showing black. Tested on an Acer One AO532h with an Intel N450 SoC. Fixes: 428cb15d5b00 ("drm/i915: Clean up pre-skl primary plane registers") Cc: José Roberto de Souza Cc: Ville Syrjälä Signed-off-by: Hans de Goede Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220418150936.5499-1-hdegoede@redhat.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/i915_reg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 51f46fe45c72..5f1f38684d65 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4352,12 +4352,12 @@ #define _DSPAADDR 0x70184 #define _DSPASTRIDE 0x70188 #define _DSPAPOS 0x7018C /* reserved */ -#define DISP_POS_Y_MASK REG_GENMASK(31, 0) +#define DISP_POS_Y_MASK REG_GENMASK(31, 16) #define DISP_POS_Y(y) REG_FIELD_PREP(DISP_POS_Y_MASK, (y)) #define DISP_POS_X_MASK REG_GENMASK(15, 0) #define DISP_POS_X(x) REG_FIELD_PREP(DISP_POS_X_MASK, (x)) #define _DSPASIZE 0x70190 -#define DISP_HEIGHT_MASK REG_GENMASK(31, 0) +#define DISP_HEIGHT_MASK REG_GENMASK(31, 16) #define DISP_HEIGHT(h) REG_FIELD_PREP(DISP_HEIGHT_MASK, (h)) #define DISP_WIDTH_MASK REG_GENMASK(15, 0) #define DISP_WIDTH(w) REG_FIELD_PREP(DISP_WIDTH_MASK, (w)) From b962a068347533e72ddb60ace6d649a5b974485b Mon Sep 17 00:00:00 2001 From: Vinod Govindapillai Date: Sun, 17 Apr 2022 12:31:05 +0300 Subject: [PATCH 107/219] drm/i915: program wm blocks to at least blocks required per line MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In configurations with single DRAM channel, for usecases like 4K 60 Hz, FIFO underruns are observed quite frequently. Looks like the wm0 watermark values need to bumped up because the wm0 memory latency calculations are probably not taking the DRAM channel's impact into account. As per the Bspec 49325, if the ddb allocation can hold at least one plane_blocks_per_line we should have selected method2. Assuming that modern HW versions have enough dbuf to hold at least one line, set the wm blocks to equivalent to blocks per line. v2: styling and comments changes (Ville) v3: Updated the reviewed-by tag v4: max_t to max and patch styling (Ville) References: https://gitlab.freedesktop.org/drm/intel/-/issues/4321 Cc: Ville Syrjälä Cc: Stanislav Lisovskiy Signed-off-by: Vinod Govindapillai Reviewed-by: Stanislav Lisovskiy Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220417093105.729014-1-vinod.govindapillai@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 594ab59e4991..ee0047fdc95d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5475,6 +5475,25 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, } blocks = fixed16_to_u32_round_up(selected_result) + 1; + /* + * Lets have blocks at minimum equivalent to plane_blocks_per_line + * as there will be at minimum one line for lines configuration. This + * is a work around for FIFO underruns observed with resolutions like + * 4k 60 Hz in single channel DRAM configurations. + * + * As per the Bspec 49325, if the ddb allocation can hold at least + * one plane_blocks_per_line, we should have selected method2 in + * the above logic. Assuming that modern versions have enough dbuf + * and method2 guarantees blocks equivalent to at least 1 line, + * select the blocks as plane_blocks_per_line. + * + * TODO: Revisit the logic when we have better understanding on DRAM + * channels' impact on the level 0 memory latency and the relevant + * wm calculations. + */ + if (skl_wm_has_lines(dev_priv, level)) + blocks = max(blocks, + fixed16_to_u32_round_up(wp->plane_blocks_per_line)); lines = div_round_up_fixed16(selected_result, wp->plane_blocks_per_line); From d90502d2ef99366d7d7c2bd9503165ec5baf590c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 21 Mar 2022 21:50:04 +0200 Subject: [PATCH 108/219] drm/i915: Program i830 DPLL FP register later MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow the new i9xx DPLL FP register programming sequence introduced in commit 62d66b218386 ("drm/i915: Fold i9xx_set_pll_dividers() into i9xx_enable_pll()") in the i830 "power well" code as well. Just for consistency. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220321195006.775-2-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 29044cf58b87..fc5c091c415a 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -9844,9 +9844,6 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) PLL_REF_INPUT_DREFCLK | DPLL_VCO_ENABLE; - intel_de_write(dev_priv, FP0(pipe), fp); - intel_de_write(dev_priv, FP1(pipe), fp); - intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); @@ -9855,6 +9852,9 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); + intel_de_write(dev_priv, FP0(pipe), fp); + intel_de_write(dev_priv, FP1(pipe), fp); + /* * Apparently we need to have VGA mode enabled prior to changing * the P1/P2 dividers. Otherwise the DPLL will keep using the old From 1e53f9e41400e548a1112bc9b973eabddc9eb79b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Tue, 19 Apr 2022 11:27:52 -0700 Subject: [PATCH 109/219] drm/i915/display: Add workaround 22014263786 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This workaround fixes screen flickers with FBC. BSpec: 33450 BSpec: 52890 BSpec: 54369 BSpec: 66624 Reviewed-by: Matt Roper Cc: Matt Roper Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20220419182753.364237-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_fbc.c | 9 +++++++++ drivers/gpu/drm/i915/i915_reg.h | 1 + 2 files changed, 10 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 670835318a1f..b7bdb0739744 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -811,6 +811,14 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc) fbc->funcs->program_cfb(fbc); } +static void intel_fbc_program_workarounds(struct intel_fbc *fbc) +{ + /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,dg2,adlp */ + if (DISPLAY_VER(fbc->i915) >= 11) + intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0, + DPFC_CHICKEN_FORCE_SLB_INVALIDATION); +} + static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; @@ -1462,6 +1470,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state, intel_fbc_update_state(state, crtc, plane); + intel_fbc_program_workarounds(fbc); intel_fbc_program_cfb(fbc); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5f1f38684d65..aa0062d07269 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1400,6 +1400,7 @@ #define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */ #define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */ #define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */ +#define DPFC_CHICKEN_FORCE_SLB_INVALIDATION REG_BIT(13) /* icl+ */ #define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */ #define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268) From 36bf0611600dd3187fe478833ba804e7976fcae5 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Tue, 19 Apr 2022 23:48:19 +0200 Subject: [PATCH 110/219] dt-bindings: display: ssd1307fb: Deprecate "-i2c" compatible strings The current compatible strings for SSD130x I2C controllers contain both an "fb" and "-i2c" suffixes. It seems to indicate that are for a fbdev driver and also that are for devices that can be accessed over an I2C bus. But a DT is supposed to describe the hardware and not Linux implementation details. So let's deprecate those compatible strings and add new ones that only contain the vendor and device name, without any of these suffixes. These will just describe the device and can be matched by both I2C and SPI DRM drivers. The required properties should still be enforced for old ones. While being there, just drop the "sinowealth,sh1106-i2c" compatible string since that was never present in a released Linux version. Signed-off-by: Javier Martinez Canillas Acked-by: Mark Brown Reviewed-by: Geert Uytterhoeven Reviewed-by: Rob Herring Link: https://patchwork.freedesktop.org/patch/msgid/20220419214824.335075-2-javierm@redhat.com --- .../bindings/display/solomon,ssd1307fb.yaml | 44 +++++++++++++------ 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml index ade61d502edd..7653b6c3fcb6 100644 --- a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml +++ b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml @@ -12,12 +12,22 @@ maintainers: properties: compatible: - enum: - - sinowealth,sh1106-i2c - - solomon,ssd1305fb-i2c - - solomon,ssd1306fb-i2c - - solomon,ssd1307fb-i2c - - solomon,ssd1309fb-i2c + oneOf: + # Deprecated compatible strings + - items: + - enum: + - solomon,ssd1305fb-i2c + - solomon,ssd1306fb-i2c + - solomon,ssd1307fb-i2c + - solomon,ssd1309fb-i2c + deprecated: true + - items: + - enum: + - sinowealth,sh1106 + - solomon,ssd1305 + - solomon,ssd1306 + - solomon,ssd1307 + - solomon,ssd1309 reg: maxItems: 1 @@ -136,7 +146,7 @@ allOf: properties: compatible: contains: - const: sinowealth,sh1106-i2c + const: sinowealth,sh1106 then: properties: solomon,dclk-div: @@ -148,7 +158,9 @@ allOf: properties: compatible: contains: - const: solomon,ssd1305fb-i2c + enum: + - solomon,ssd1305-i2c + - solomon,ssd1305 then: properties: solomon,dclk-div: @@ -160,7 +172,9 @@ allOf: properties: compatible: contains: - const: solomon,ssd1306fb-i2c + enum: + - solomon,ssd1306-i2c + - solomon,ssd1306 then: properties: solomon,dclk-div: @@ -172,7 +186,9 @@ allOf: properties: compatible: contains: - const: solomon,ssd1307fb-i2c + enum: + - solomon,ssd1307-i2c + - solomon,ssd1307 then: properties: solomon,dclk-div: @@ -186,7 +202,9 @@ allOf: properties: compatible: contains: - const: solomon,ssd1309fb-i2c + enum: + - solomon,ssd1309-i2c + - solomon,ssd1309 then: properties: solomon,dclk-div: @@ -203,14 +221,14 @@ examples: #size-cells = <0>; ssd1307: oled@3c { - compatible = "solomon,ssd1307fb-i2c"; + compatible = "solomon,ssd1307"; reg = <0x3c>; pwms = <&pwm 4 3000>; reset-gpios = <&gpio2 7>; }; ssd1306: oled@3d { - compatible = "solomon,ssd1306fb-i2c"; + compatible = "solomon,ssd1306"; reg = <0x3c>; pwms = <&pwm 4 3000>; reset-gpios = <&gpio2 7>; From 1b6a7961908882b65def0bd2cffba3f2eba44226 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Tue, 19 Apr 2022 23:48:20 +0200 Subject: [PATCH 111/219] dt-bindings: display: ssd1307fb: Extend schema for SPI controllers The Solomon SSD130x OLED displays can either have an I2C or SPI interface, add to the schema the properties and examples for OLED devices under SPI. Signed-off-by: Javier Martinez Canillas Acked-by: Mark Brown Reviewed-by: Geert Uytterhoeven Reviewed-by: Rob Herring Link: https://patchwork.freedesktop.org/patch/msgid/20220419214824.335075-3-javierm@redhat.com --- .../bindings/display/solomon,ssd1307fb.yaml | 42 ++++++++++++++++++- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml index 7653b6c3fcb6..3fbd87c2c120 100644 --- a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml +++ b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml @@ -38,9 +38,20 @@ properties: reset-gpios: maxItems: 1 + # Only required for SPI + dc-gpios: + description: + GPIO connected to the controller's D/C# (Data/Command) pin, + that is needed for 4-wire SPI to tell the controller if the + data sent is for a command register or the display data RAM + maxItems: 1 + vbat-supply: description: The supply for VBAT + # Only required for SPI + spi-max-frequency: true + solomon,height: $ref: /schemas/types.yaml#/definitions/uint32 default: 16 @@ -220,14 +231,14 @@ examples: #address-cells = <1>; #size-cells = <0>; - ssd1307: oled@3c { + ssd1307_i2c: oled@3c { compatible = "solomon,ssd1307"; reg = <0x3c>; pwms = <&pwm 4 3000>; reset-gpios = <&gpio2 7>; }; - ssd1306: oled@3d { + ssd1306_i2c: oled@3d { compatible = "solomon,ssd1306"; reg = <0x3c>; pwms = <&pwm 4 3000>; @@ -238,3 +249,30 @@ examples: solomon,lookup-table = /bits/ 8 <0x3f 0x3f 0x3f 0x3f>; }; }; + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + + ssd1307_spi: oled@0 { + compatible = "solomon,ssd1307"; + reg = <0x0>; + pwms = <&pwm 4 3000>; + reset-gpios = <&gpio2 7>; + dc-gpios = <&gpio2 8>; + spi-max-frequency = <10000000>; + }; + + ssd1306_spi: oled@1 { + compatible = "solomon,ssd1306"; + reg = <0x1>; + pwms = <&pwm 4 3000>; + reset-gpios = <&gpio2 7>; + dc-gpios = <&gpio2 8>; + spi-max-frequency = <10000000>; + solomon,com-lrremap; + solomon,com-invdir; + solomon,com-offset = <32>; + solomon,lookup-table = /bits/ 8 <0x3f 0x3f 0x3f 0x3f>; + }; + }; From fb197474eddc9369492228f1e423f4ccf0f309b4 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Tue, 19 Apr 2022 23:48:21 +0200 Subject: [PATCH 112/219] drm/solomon: Add ssd130x new compatible strings and deprecate old ones. The current compatible strings for SSD130x I2C controllers contain an "fb" and "-i2c" suffixes. These have been deprecated and more correct ones were added, that don't encode a subsystem or bus used to interface the devices. Signed-off-by: Javier Martinez Canillas Acked-by: Mark Brown Reviewed-by: Geert Uytterhoeven Link: https://patchwork.freedesktop.org/patch/msgid/20220419214824.335075-4-javierm@redhat.com --- drivers/gpu/drm/solomon/ssd130x-i2c.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c index d099b241dd3f..45867ef2bc8b 100644 --- a/drivers/gpu/drm/solomon/ssd130x-i2c.c +++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c @@ -88,9 +88,26 @@ static struct ssd130x_deviceinfo ssd130x_ssd1309_deviceinfo = { static const struct of_device_id ssd130x_of_match[] = { { - .compatible = "sinowealth,sh1106-i2c", + .compatible = "sinowealth,sh1106", .data = &ssd130x_sh1106_deviceinfo, }, + { + .compatible = "solomon,ssd1305", + .data = &ssd130x_ssd1305_deviceinfo, + }, + { + .compatible = "solomon,ssd1306", + .data = &ssd130x_ssd1306_deviceinfo, + }, + { + .compatible = "solomon,ssd1307", + .data = &ssd130x_ssd1307_deviceinfo, + }, + { + .compatible = "solomon,ssd1309", + .data = &ssd130x_ssd1309_deviceinfo, + }, + /* Deprecated but kept for backward compatibility */ { .compatible = "solomon,ssd1305fb-i2c", .data = &ssd130x_ssd1305_deviceinfo, From 4203e88ba80bbcdfaa7689db286d07cf4f2993d0 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Tue, 19 Apr 2022 23:48:22 +0200 Subject: [PATCH 113/219] drm/solomon: Move device info from ssd130x-i2c to the core driver These are declared in the ssd130x-i2c transport driver but the information is not I2C specific, and could be used by other SSD130x transport drivers. Move them to the ssd130x core driver and just set the OF device entries to an ID that could be used to lookup the correct device info from an array. While being there, also move the SSD130X_DATA and SSD130X_COMMAND control bytes. Since even though they are used by the I2C interface, they could also be useful for other transport protocols such as SPI. Suggested-by: Chen-Yu Tsai Signed-off-by: Javier Martinez Canillas Acked-by: Mark Brown Reviewed-by: Geert Uytterhoeven Link: https://patchwork.freedesktop.org/patch/msgid/20220419214824.335075-5-javierm@redhat.com --- drivers/gpu/drm/solomon/ssd130x-i2c.c | 52 ++++++--------------------- drivers/gpu/drm/solomon/ssd130x.c | 35 ++++++++++++++++-- drivers/gpu/drm/solomon/ssd130x.h | 14 ++++++++ 3 files changed, 56 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c index 45867ef2bc8b..d6835ec71c39 100644 --- a/drivers/gpu/drm/solomon/ssd130x-i2c.c +++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c @@ -53,76 +53,43 @@ static void ssd130x_i2c_shutdown(struct i2c_client *client) ssd130x_shutdown(ssd130x); } -static struct ssd130x_deviceinfo ssd130x_sh1106_deviceinfo = { - .default_vcomh = 0x40, - .default_dclk_div = 1, - .default_dclk_frq = 5, - .page_mode_only = 1, -}; - -static struct ssd130x_deviceinfo ssd130x_ssd1305_deviceinfo = { - .default_vcomh = 0x34, - .default_dclk_div = 1, - .default_dclk_frq = 7, -}; - -static struct ssd130x_deviceinfo ssd130x_ssd1306_deviceinfo = { - .default_vcomh = 0x20, - .default_dclk_div = 1, - .default_dclk_frq = 8, - .need_chargepump = 1, -}; - -static struct ssd130x_deviceinfo ssd130x_ssd1307_deviceinfo = { - .default_vcomh = 0x20, - .default_dclk_div = 2, - .default_dclk_frq = 12, - .need_pwm = 1, -}; - -static struct ssd130x_deviceinfo ssd130x_ssd1309_deviceinfo = { - .default_vcomh = 0x34, - .default_dclk_div = 1, - .default_dclk_frq = 10, -}; - static const struct of_device_id ssd130x_of_match[] = { { .compatible = "sinowealth,sh1106", - .data = &ssd130x_sh1106_deviceinfo, + .data = &ssd130x_variants[SH1106_ID], }, { .compatible = "solomon,ssd1305", - .data = &ssd130x_ssd1305_deviceinfo, + .data = &ssd130x_variants[SSD1305_ID], }, { .compatible = "solomon,ssd1306", - .data = &ssd130x_ssd1306_deviceinfo, + .data = &ssd130x_variants[SSD1306_ID], }, { .compatible = "solomon,ssd1307", - .data = &ssd130x_ssd1307_deviceinfo, + .data = &ssd130x_variants[SSD1307_ID], }, { .compatible = "solomon,ssd1309", - .data = &ssd130x_ssd1309_deviceinfo, + .data = &ssd130x_variants[SSD1309_ID], }, /* Deprecated but kept for backward compatibility */ { .compatible = "solomon,ssd1305fb-i2c", - .data = &ssd130x_ssd1305_deviceinfo, + .data = &ssd130x_variants[SSD1305_ID], }, { .compatible = "solomon,ssd1306fb-i2c", - .data = &ssd130x_ssd1306_deviceinfo, + .data = &ssd130x_variants[SSD1306_ID], }, { .compatible = "solomon,ssd1307fb-i2c", - .data = &ssd130x_ssd1307_deviceinfo, + .data = &ssd130x_variants[SSD1307_ID], }, { .compatible = "solomon,ssd1309fb-i2c", - .data = &ssd130x_ssd1309_deviceinfo, + .data = &ssd130x_variants[SSD1309_ID], }, { /* sentinel */ } }; @@ -142,3 +109,4 @@ module_i2c_driver(ssd130x_i2c_driver); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Javier Martinez Canillas "); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(DRM_SSD130X); diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index a7e784518c69..ba2de93d00f0 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -39,9 +39,6 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 -#define SSD130X_DATA 0x40 -#define SSD130X_COMMAND 0x80 - #define SSD130X_PAGE_COL_START_LOW 0x00 #define SSD130X_PAGE_COL_START_HIGH 0x10 #define SSD130X_SET_ADDRESS_MODE 0x20 @@ -94,6 +91,38 @@ #define MAX_CONTRAST 255 +const struct ssd130x_deviceinfo ssd130x_variants[] = { + [SH1106_ID] = { + .default_vcomh = 0x40, + .default_dclk_div = 1, + .default_dclk_frq = 5, + .page_mode_only = 1, + }, + [SSD1305_ID] = { + .default_vcomh = 0x34, + .default_dclk_div = 1, + .default_dclk_frq = 7, + }, + [SSD1306_ID] = { + .default_vcomh = 0x20, + .default_dclk_div = 1, + .default_dclk_frq = 8, + .need_chargepump = 1, + }, + [SSD1307_ID] = { + .default_vcomh = 0x20, + .default_dclk_div = 2, + .default_dclk_frq = 12, + .need_pwm = 1, + }, + [SSD1309_ID] = { + .default_vcomh = 0x34, + .default_dclk_div = 1, + .default_dclk_frq = 10, + } +}; +EXPORT_SYMBOL_NS_GPL(ssd130x_variants, DRM_SSD130X); + static inline struct ssd130x_device *drm_to_ssd130x(struct drm_device *drm) { return container_of(drm, struct ssd130x_device, drm); diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h index f5b062576fdf..d14f78c2eb07 100644 --- a/drivers/gpu/drm/solomon/ssd130x.h +++ b/drivers/gpu/drm/solomon/ssd130x.h @@ -18,6 +18,18 @@ #include +#define SSD130X_DATA 0x40 +#define SSD130X_COMMAND 0x80 + +enum ssd130x_variants { + SH1106_ID, + SSD1305_ID, + SSD1306_ID, + SSD1307_ID, + SSD1309_ID, + NR_SSD130X_VARIANTS +}; + struct ssd130x_deviceinfo { u32 default_vcomh; u32 default_dclk_div; @@ -71,6 +83,8 @@ struct ssd130x_device { u8 page_end; }; +extern const struct ssd130x_deviceinfo ssd130x_variants[]; + struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap); int ssd130x_remove(struct ssd130x_device *ssd130x); void ssd130x_shutdown(struct ssd130x_device *ssd130x); From 74373977d2ca26e5735377f8874be70bc2f030f5 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Tue, 19 Apr 2022 23:48:23 +0200 Subject: [PATCH 114/219] drm/solomon: Add SSD130x OLED displays SPI support The ssd130x driver only provides the core support for these devices but it does not have any bus transport logic. Add a driver to interface over SPI. There is a difference in the communication protocol when using 4-wire SPI instead of I2C. For the latter, a control byte that contains a D/C# field has to be sent. This field tells the controller whether the data has to be written to the command register or to the graphics display data memory. But for 4-wire SPI that control byte is not used, instead a real D/C# line must be pulled HIGH for commands data and LOW for graphics display data. For this reason the standard SPI regmap can't be used and a custom .write bus handler is needed. Signed-off-by: Javier Martinez Canillas Acked-by: Mark Brown Reviewed-by: Geert Uytterhoeven Link: https://patchwork.freedesktop.org/patch/msgid/20220419214824.335075-6-javierm@redhat.com --- drivers/gpu/drm/solomon/Kconfig | 9 ++ drivers/gpu/drm/solomon/Makefile | 1 + drivers/gpu/drm/solomon/ssd130x-spi.c | 178 ++++++++++++++++++++++++++ 3 files changed, 188 insertions(+) create mode 100644 drivers/gpu/drm/solomon/ssd130x-spi.c diff --git a/drivers/gpu/drm/solomon/Kconfig b/drivers/gpu/drm/solomon/Kconfig index 8c0a0c788385..e170716d976b 100644 --- a/drivers/gpu/drm/solomon/Kconfig +++ b/drivers/gpu/drm/solomon/Kconfig @@ -20,3 +20,12 @@ config DRM_SSD130X_I2C I2C bus. If M is selected the module will be called ssd130x-i2c. + +config DRM_SSD130X_SPI + tristate "DRM support for Solomon SSD130X OLED displays (SPI bus)" + depends on DRM_SSD130X && SPI + select REGMAP + help + Say Y here if the SSD130x OLED display is connected via SPI bus. + + If M is selected the module will be called ssd130x-spi. diff --git a/drivers/gpu/drm/solomon/Makefile b/drivers/gpu/drm/solomon/Makefile index 4bfc5acb0447..b5fc792257d7 100644 --- a/drivers/gpu/drm/solomon/Makefile +++ b/drivers/gpu/drm/solomon/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_DRM_SSD130X) += ssd130x.o obj-$(CONFIG_DRM_SSD130X_I2C) += ssd130x-i2c.o +obj-$(CONFIG_DRM_SSD130X_SPI) += ssd130x-spi.o diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c new file mode 100644 index 000000000000..43722adab1f8 --- /dev/null +++ b/drivers/gpu/drm/solomon/ssd130x-spi.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * DRM driver for Solomon SSD130X OLED displays (SPI bus) + * + * Copyright 2022 Red Hat Inc. + * Authors: Javier Martinez Canillas + */ +#include +#include + +#include "ssd130x.h" + +#define DRIVER_NAME "ssd130x-spi" +#define DRIVER_DESC "DRM driver for Solomon SSD130X OLED displays (SPI)" + +struct ssd130x_spi_transport { + struct spi_device *spi; + struct gpio_desc *dc; +}; + +static const struct regmap_config ssd130x_spi_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +/* + * The regmap bus .write handler, it is just a wrapper around spi_write() + * but toggling the Data/Command control pin (D/C#). Since for 4-wire SPI + * a D/C# pin is used, in contrast with I2C where a control byte is sent, + * prior to every data byte, that contains a bit with the D/C# value. + * + * These control bytes are considered registers by the ssd130x core driver + * and can be used by the ssd130x SPI driver to determine if the data sent + * is for a command register or for the Graphic Display Data RAM (GDDRAM). + */ +static int ssd130x_spi_write(void *context, const void *data, size_t count) +{ + struct ssd130x_spi_transport *t = context; + struct spi_device *spi = t->spi; + const u8 *reg = data; + + if (*reg == SSD130X_COMMAND) + gpiod_set_value_cansleep(t->dc, 0); + + if (*reg == SSD130X_DATA) + gpiod_set_value_cansleep(t->dc, 1); + + /* Remove control byte since is not used in a 4-wire SPI interface */ + return spi_write(spi, reg + 1, count - 1); +} + +/* The ssd130x driver does not read registers but regmap expects a .read */ +static int ssd130x_spi_read(void *context, const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + return -EOPNOTSUPP; +} + +/* + * A custom bus is needed due the special write that toggles a D/C# pin, + * another option could be to just have a .reg_write() callback but that + * will prevent to do data writes in bulk. + * + * Once the regmap API is extended to support defining a bulk write handler + * in the struct regmap_config, this can be simplified and the bus dropped. + */ +static struct regmap_bus regmap_ssd130x_spi_bus = { + .write = ssd130x_spi_write, + .read = ssd130x_spi_read, +}; + +static int ssd130x_spi_probe(struct spi_device *spi) +{ + struct ssd130x_spi_transport *t; + struct ssd130x_device *ssd130x; + struct regmap *regmap; + struct gpio_desc *dc; + struct device *dev = &spi->dev; + + dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW); + if (IS_ERR(dc)) + return dev_err_probe(dev, PTR_ERR(dc), + "Failed to get dc gpio\n"); + + t = devm_kzalloc(dev, sizeof(*t), GFP_KERNEL); + if (!t) + return dev_err_probe(dev, -ENOMEM, + "Failed to allocate SPI transport data\n"); + + t->spi = spi; + t->dc = dc; + + regmap = devm_regmap_init(dev, ®map_ssd130x_spi_bus, t, + &ssd130x_spi_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ssd130x = ssd130x_probe(dev, regmap); + if (IS_ERR(ssd130x)) + return PTR_ERR(ssd130x); + + spi_set_drvdata(spi, ssd130x); + + return 0; +} + +static void ssd130x_spi_remove(struct spi_device *spi) +{ + struct ssd130x_device *ssd130x = spi_get_drvdata(spi); + + ssd130x_remove(ssd130x); +} + +static void ssd130x_spi_shutdown(struct spi_device *spi) +{ + struct ssd130x_device *ssd130x = spi_get_drvdata(spi); + + ssd130x_shutdown(ssd130x); +} + +static const struct of_device_id ssd130x_of_match[] = { + { + .compatible = "sinowealth,sh1106", + .data = &ssd130x_variants[SH1106_ID], + }, + { + .compatible = "solomon,ssd1305", + .data = &ssd130x_variants[SSD1305_ID], + }, + { + .compatible = "solomon,ssd1306", + .data = &ssd130x_variants[SSD1306_ID], + }, + { + .compatible = "solomon,ssd1307", + .data = &ssd130x_variants[SSD1307_ID], + }, + { + .compatible = "solomon,ssd1309", + .data = &ssd130x_variants[SSD1309_ID], + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ssd130x_of_match); + +/* + * The SPI core always reports a MODALIAS uevent of the form "spi:", even + * if the device was registered via OF. This means that the module will not be + * auto loaded, unless it contains an alias that matches the MODALIAS reported. + * + * To workaround this issue, add a SPI device ID table. Even when this should + * not be needed for this driver to match the registered SPI devices. + */ +static const struct spi_device_id ssd130x_spi_table[] = { + { "sh1106", SH1106_ID }, + { "ssd1305", SSD1305_ID }, + { "ssd1306", SSD1306_ID }, + { "ssd1307", SSD1307_ID }, + { "ssd1309", SSD1309_ID }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(spi, ssd130x_spi_table); + +static struct spi_driver ssd130x_spi_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = ssd130x_of_match, + }, + .probe = ssd130x_spi_probe, + .remove = ssd130x_spi_remove, + .shutdown = ssd130x_spi_shutdown, +}; +module_spi_driver(ssd130x_spi_driver); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("Javier Martinez Canillas "); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(DRM_SSD130X); From 2cef35958da89500f30e2693b4f8ca898e27ae34 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:40 +0300 Subject: [PATCH 115/219] drm/i915: Move per-platform power well hooks to intel_display_power_well.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the implementation of platform specific power well hooks to intel_display_power_well.c, to reduce the clutter in intel_display_power.c. The locking of all the power domain/power well state is handled in the power domain functions in intel_display_power.c using i915_power_domains::lock. This patch also moves the chy_phy_powergate_ch/lanes() functions to intel_display_power_well.c which borrow the same lock to protect the DISPLAY_PHY_CONTROL register state, which the HW uses both for toggling power wells and power gating PHY lanes. No functional change. v2: - Clarify in the commit log why CHV functions using the i915_power_domains::lock were moved, while others locking the power domain/well state were kept in intel_display_power.c . (Jouni) - Move forward declaration of chv_phy_powergate_ch/lanes() to intel_display_power_well.h . Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-1-imre.deak@intel.com --- .../drm/i915/display/intel_display_power.c | 1747 ---------------- .../drm/i915/display/intel_display_power.h | 5 - .../i915/display/intel_display_power_well.c | 1806 +++++++++++++++++ .../i915/display/intel_display_power_well.h | 62 +- drivers/gpu/drm/i915/display/intel_dpio_phy.c | 1 + drivers/gpu/drm/i915/display/intel_pps.c | 1 + 6 files changed, 1835 insertions(+), 1787 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 6a5695008f7c..b3e8ede90039 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -9,24 +9,16 @@ #include "i915_irq.h" #include "intel_cdclk.h" #include "intel_combo_phy.h" -#include "intel_combo_phy_regs.h" -#include "intel_crt.h" #include "intel_de.h" #include "intel_display_power.h" #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dmc.h" -#include "intel_dpio_phy.h" -#include "intel_dpll.h" -#include "intel_hotplug.h" #include "intel_mchbar_regs.h" #include "intel_pch_refclk.h" #include "intel_pcode.h" #include "intel_pm.h" -#include "intel_pps.h" #include "intel_snps_phy.h" -#include "intel_tc.h" -#include "intel_vga.h" #include "vlv_sideband.h" const char * @@ -235,604 +227,6 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, return ret; } -/* - * Starting with Haswell, we have a "Power Down Well" that can be turned off - * when not needed anymore. We have 4 registers that can request the power well - * to be enabled, and it will only be disabled if none of the registers is - * requesting it to be enabled. - */ -static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, - u8 irq_pipe_mask, bool has_vga) -{ - if (has_vga) - intel_vga_reset_io_mem(dev_priv); - - if (irq_pipe_mask) - gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); -} - -static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, - u8 irq_pipe_mask) -{ - if (irq_pipe_mask) - gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); -} - -#define ICL_AUX_PW_TO_CH(pw_idx) \ - ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) - -#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ - ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) - -static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) -{ - int pw_idx = power_well->desc->hsw.idx; - - return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : - ICL_AUX_PW_TO_CH(pw_idx); -} - -static struct intel_digital_port * -aux_ch_to_digital_port(struct drm_i915_private *dev_priv, - enum aux_ch aux_ch) -{ - struct intel_digital_port *dig_port = NULL; - struct intel_encoder *encoder; - - for_each_intel_encoder(&dev_priv->drm, encoder) { - /* We'll check the MST primary port */ - if (encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - dig_port = enc_to_dig_port(encoder); - if (!dig_port) - continue; - - if (dig_port->aux_ch != aux_ch) { - dig_port = NULL; - continue; - } - - break; - } - - return dig_port; -} - -static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, - const struct i915_power_well *power_well) -{ - enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); - struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); - - return intel_port_to_phy(i915, dig_port->base.port); -} - -static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, - bool timeout_expected) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - int enable_delay = power_well->desc->hsw.fixed_enable_delay; - - /* - * For some power wells we're not supposed to watch the status bit for - * an ack, but rather just wait a fixed amount of time and then - * proceed. This is only used on DG2. - */ - if (IS_DG2(dev_priv) && enable_delay) { - usleep_range(enable_delay, 2 * enable_delay); - return; - } - - /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ - if (intel_de_wait_for_set(dev_priv, regs->driver, - HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { - drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", - intel_power_well_name(power_well)); - - drm_WARN_ON(&dev_priv->drm, !timeout_expected); - - } -} - -static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, - const struct i915_power_well_regs *regs, - int pw_idx) -{ - u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); - u32 ret; - - ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; - ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; - if (regs->kvmr.reg) - ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; - ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; - - return ret; -} - -static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - bool disabled; - u32 reqs; - - /* - * Bspec doesn't require waiting for PWs to get disabled, but still do - * this for paranoia. The known cases where a PW will be forced on: - * - a KVMR request on any power well via the KVMR request register - * - a DMC request on PW1 and MISC_IO power wells via the BIOS and - * DEBUG request registers - * Skip the wait in case any of the request bits are set and print a - * diagnostic message. - */ - wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & - HSW_PWR_WELL_CTL_STATE(pw_idx))) || - (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); - if (disabled) - return; - - drm_dbg_kms(&dev_priv->drm, - "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", - intel_power_well_name(power_well), - !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); -} - -static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, - enum skl_power_gate pg) -{ - /* Timeout 5us for PG#0, for other PGs 1us */ - drm_WARN_ON(&dev_priv->drm, - intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, - SKL_FUSE_PG_DIST_STATUS(pg), 1)); -} - -static void hsw_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - u32 val; - - if (power_well->desc->hsw.has_fuses) { - enum skl_power_gate pg; - - pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : - SKL_PW_CTL_IDX_TO_PG(pw_idx); - - /* Wa_16013190616:adlp */ - if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) - intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); - - /* - * For PW1 we have to wait both for the PW0/PG0 fuse state - * before enabling the power well and PW1/PG1's own fuse - * state after the enabling. For all other power wells with - * fuses we only have to wait for that PW/PG's fuse state - * after the enabling. - */ - if (pg == SKL_PG1) - gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); - } - - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(pw_idx)); - - hsw_wait_for_power_well_enable(dev_priv, power_well, false); - - if (power_well->desc->hsw.has_fuses) { - enum skl_power_gate pg; - - pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : - SKL_PW_CTL_IDX_TO_PG(pw_idx); - gen9_wait_for_power_well_fuses(dev_priv, pg); - } - - hsw_power_well_post_enable(dev_priv, - power_well->desc->hsw.irq_pipe_mask, - power_well->desc->hsw.has_vga); -} - -static void hsw_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - u32 val; - - hsw_power_well_pre_disable(dev_priv, - power_well->desc->hsw.irq_pipe_mask); - - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); - hsw_wait_for_power_well_disable(dev_priv, power_well); -} - -static void -icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); - u32 val; - - drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); - - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(pw_idx)); - - if (DISPLAY_VER(dev_priv) < 12) { - val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); - intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), - val | ICL_LANE_ENABLE_AUX); - } - - hsw_wait_for_power_well_enable(dev_priv, power_well, false); - - /* Display WA #1178: icl */ - if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && - !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { - val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); - val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; - intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); - } -} - -static void -icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); - u32 val; - - drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); - - val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); - intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), - val & ~ICL_LANE_ENABLE_AUX); - - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); - - hsw_wait_for_power_well_disable(dev_priv, power_well); -} - -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) - -static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, - struct intel_digital_port *dig_port) -{ - if (drm_WARN_ON(&dev_priv->drm, !dig_port)) - return; - - if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) - return; - - drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); -} - -#else - -static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, - struct intel_digital_port *dig_port) -{ -} - -#endif - -#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) - -static void icl_tc_cold_exit(struct drm_i915_private *i915) -{ - int ret, tries = 0; - - while (1) { - ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0, - 250, 1); - if (ret != -EAGAIN || ++tries == 3) - break; - msleep(1); - } - - /* Spec states that TC cold exit can take up to 1ms to complete */ - if (!ret) - msleep(1); - - /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ - drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : - "succeeded"); -} - -static void -icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); - struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - bool is_tbt = power_well->desc->hsw.is_tc_tbt; - bool timeout_expected; - u32 val; - - icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); - - val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); - val &= ~DP_AUX_CH_CTL_TBT_IO; - if (is_tbt) - val |= DP_AUX_CH_CTL_TBT_IO; - intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); - - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); - - /* - * An AUX timeout is expected if the TBT DP tunnel is down, - * or need to enable AUX on a legacy TypeC port as part of the TC-cold - * exit sequence. - */ - timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); - if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) - icl_tc_cold_exit(dev_priv); - - hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); - - if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { - enum tc_port tc_port; - - tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x2)); - - if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), - DKL_CMN_UC_DW27_UC_HEALTH, 1)) - drm_warn(&dev_priv->drm, - "Timeout waiting TC uC health\n"); - } -} - -static void -icl_aux_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); - - if (intel_phy_is_tc(dev_priv, phy)) - return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); - else if (IS_ICELAKE(dev_priv)) - return icl_combo_phy_aux_power_well_enable(dev_priv, - power_well); - else - return hsw_power_well_enable(dev_priv, power_well); -} - -static void -icl_aux_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); - - if (intel_phy_is_tc(dev_priv, phy)) - return hsw_power_well_disable(dev_priv, power_well); - else if (IS_ICELAKE(dev_priv)) - return icl_combo_phy_aux_power_well_disable(dev_priv, - power_well); - else - return hsw_power_well_disable(dev_priv, power_well); -} - -/* - * We should only use the power well if we explicitly asked the hardware to - * enable it, so check if it's enabled and also check if we've requested it to - * be enabled. - */ -static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - enum i915_power_well_id id = power_well->desc->id; - int pw_idx = power_well->desc->hsw.idx; - u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | - HSW_PWR_WELL_CTL_STATE(pw_idx); - u32 val; - - val = intel_de_read(dev_priv, regs->driver); - - /* - * On GEN9 big core due to a DMC bug the driver's request bits for PW1 - * and the MISC_IO PW will be not restored, so check instead for the - * BIOS's own request bits, which are forced-on for these power wells - * when exiting DC5/6. - */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && - (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) - val |= intel_de_read(dev_priv, regs->bios); - - return (val & mask) == mask; -} - -static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) -{ - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), - "DC9 already programmed to be enabled.\n"); - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, DC_STATE_EN) & - DC_STATE_EN_UPTO_DC5, - "DC5 still not disabled to enable DC9.\n"); - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & - HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), - "Power well 2 on.\n"); - drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), - "Interrupts not disabled yet.\n"); - - /* - * TODO: check for the following to verify the conditions to enter DC9 - * state are satisfied: - * 1] Check relevant display engine registers to verify if mode set - * disable sequence was followed. - * 2] Check if display uninitialize sequence is initialized. - */ -} - -static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) -{ - drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), - "Interrupts not disabled yet.\n"); - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, DC_STATE_EN) & - DC_STATE_EN_UPTO_DC5, - "DC5 still not disabled.\n"); - - /* - * TODO: check for the following to verify DC9 state was indeed - * entered before programming to disable it: - * 1] Check relevant display engine registers to verify if mode - * set disable sequence was followed. - * 2] Check if display uninitialize sequence is initialized. - */ -} - -static void gen9_write_dc_state(struct drm_i915_private *dev_priv, - u32 state) -{ - int rewrites = 0; - int rereads = 0; - u32 v; - - intel_de_write(dev_priv, DC_STATE_EN, state); - - /* It has been observed that disabling the dc6 state sometimes - * doesn't stick and dmc keeps returning old value. Make sure - * the write really sticks enough times and also force rewrite until - * we are confident that state is exactly what we want. - */ - do { - v = intel_de_read(dev_priv, DC_STATE_EN); - - if (v != state) { - intel_de_write(dev_priv, DC_STATE_EN, state); - rewrites++; - rereads = 0; - } else if (rereads++ > 5) { - break; - } - - } while (rewrites < 100); - - if (v != state) - drm_err(&dev_priv->drm, - "Writing dc state to 0x%x failed, now 0x%x\n", - state, v); - - /* Most of the times we need one retry, avoid spam */ - if (rewrites > 1) - drm_dbg_kms(&dev_priv->drm, - "Rewrote dc state to 0x%x %d times\n", - state, rewrites); -} - -static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) -{ - u32 mask; - - mask = DC_STATE_EN_UPTO_DC5; - - if (DISPLAY_VER(dev_priv) >= 12) - mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 - | DC_STATE_EN_DC9; - else if (DISPLAY_VER(dev_priv) == 11) - mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - mask |= DC_STATE_EN_DC9; - else - mask |= DC_STATE_EN_UPTO_DC6; - - return mask; -} - -static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) -{ - u32 val; - - if (!HAS_DISPLAY(dev_priv)) - return; - - val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); - - drm_dbg_kms(&dev_priv->drm, - "Resetting DC state tracking from %02x to %02x\n", - dev_priv->dmc.dc_state, val); - dev_priv->dmc.dc_state = val; -} - -/** - * gen9_set_dc_state - set target display C power state - * @dev_priv: i915 device instance - * @state: target DC power state - * - DC_STATE_DISABLE - * - DC_STATE_EN_UPTO_DC5 - * - DC_STATE_EN_UPTO_DC6 - * - DC_STATE_EN_DC9 - * - * Signal to DMC firmware/HW the target DC power state passed in @state. - * DMC/HW can turn off individual display clocks and power rails when entering - * a deeper DC power state (higher in number) and turns these back when exiting - * that state to a shallower power state (lower in number). The HW will decide - * when to actually enter a given state on an on-demand basis, for instance - * depending on the active state of display pipes. The state of display - * registers backed by affected power rails are saved/restored as needed. - * - * Based on the above enabling a deeper DC power state is asynchronous wrt. - * enabling it. Disabling a deeper power state is synchronous: for instance - * setting %DC_STATE_DISABLE won't complete until all HW resources are turned - * back on and register state is restored. This is guaranteed by the MMIO write - * to DC_STATE_EN blocking until the state is restored. - */ -static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) -{ - u32 val; - u32 mask; - - if (!HAS_DISPLAY(dev_priv)) - return; - - if (drm_WARN_ON_ONCE(&dev_priv->drm, - state & ~dev_priv->dmc.allowed_dc_mask)) - state &= dev_priv->dmc.allowed_dc_mask; - - val = intel_de_read(dev_priv, DC_STATE_EN); - mask = gen9_dc_mask(dev_priv); - drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", - val & mask, state); - - /* Check if DMC is ignoring our DC state requests */ - if ((val & mask) != dev_priv->dmc.dc_state) - drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", - dev_priv->dmc.dc_state, val & mask); - - val &= ~mask; - val |= state; - - gen9_write_dc_state(dev_priv, val); - - dev_priv->dmc.dc_state = val & mask; -} - static u32 sanitize_target_dc_state(struct drm_i915_private *dev_priv, u32 target_dc_state) @@ -858,53 +252,6 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv, return target_dc_state; } -static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) -{ - drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); - gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); -} - -static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) -{ - u32 val; - - drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); - val = intel_de_read(dev_priv, DC_STATE_EN); - val &= ~DC_STATE_DC3CO_STATUS; - intel_de_write(dev_priv, DC_STATE_EN, val); - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - /* - * Delay of 200us DC3CO Exit time B.Spec 49196 - */ - usleep_range(200, 210); -} - -static void bxt_enable_dc9(struct drm_i915_private *dev_priv) -{ - assert_can_enable_dc9(dev_priv); - - drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); - /* - * Power sequencer reset is not needed on - * platforms with South Display Engine on PCH, - * because PPS registers are always on. - */ - if (!HAS_PCH_SPLIT(dev_priv)) - intel_pps_reset_all(dev_priv); - gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); -} - -static void bxt_disable_dc9(struct drm_i915_private *dev_priv) -{ - assert_can_disable_dc9(dev_priv); - - drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - intel_pps_unlock_regs_wa(dev_priv); -} - /** * intel_display_power_set_target_dc_state - Set target dc state. * @dev_priv: i915 device @@ -949,912 +296,8 @@ unlock: mutex_unlock(&power_domains->lock); } -static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) -{ - enum i915_power_well_id high_pg; - - /* Power wells at this level and above must be disabled for DC5 entry */ - if (DISPLAY_VER(dev_priv) == 12) - high_pg = ICL_DISP_PW_3; - else - high_pg = SKL_DISP_PW_2; - - drm_WARN_ONCE(&dev_priv->drm, - intel_display_power_well_is_enabled(dev_priv, high_pg), - "Power wells above platform's DC5 limit still enabled.\n"); - - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, DC_STATE_EN) & - DC_STATE_EN_UPTO_DC5), - "DC5 already programmed to be enabled.\n"); - assert_rpm_wakelock_held(&dev_priv->runtime_pm); - - assert_dmc_loaded(dev_priv); -} - -static void gen9_enable_dc5(struct drm_i915_private *dev_priv) -{ - assert_can_enable_dc5(dev_priv); - - drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); - - /* Wa Display #1183: skl,kbl,cfl */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, - intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); - - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); -} - -static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) -{ - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, - "Backlight is not disabled.\n"); - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, DC_STATE_EN) & - DC_STATE_EN_UPTO_DC6), - "DC6 already programmed to be enabled.\n"); - - assert_dmc_loaded(dev_priv); -} - -static void skl_enable_dc6(struct drm_i915_private *dev_priv) -{ - assert_can_enable_dc6(dev_priv); - - drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); - - /* Wa Display #1183: skl,kbl,cfl */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, - intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); - - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); -} - -static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); - u32 bios_req = intel_de_read(dev_priv, regs->bios); - - /* Take over the request bit if set by BIOS. */ - if (bios_req & mask) { - u32 drv_req = intel_de_read(dev_priv, regs->driver); - - if (!(drv_req & mask)) - intel_de_write(dev_priv, regs->driver, drv_req | mask); - intel_de_write(dev_priv, regs->bios, bios_req & ~mask); - } -} - -static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); -} - -static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); -} - -static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); -} - -static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) -{ - struct i915_power_well *power_well; - - power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); - if (intel_power_well_refcount(power_well) > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); - - power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); - if (intel_power_well_refcount(power_well) > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); - - if (IS_GEMINILAKE(dev_priv)) { - power_well = lookup_power_well(dev_priv, - GLK_DISP_PW_DPIO_CMN_C); - if (intel_power_well_refcount(power_well) > 0) - bxt_ddi_phy_verify_state(dev_priv, - power_well->desc->bxt.phy); - } -} - -static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && - (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); -} - -static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) -{ - u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); - u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; - - drm_WARN(&dev_priv->drm, - hw_enabled_dbuf_slices != enabled_dbuf_slices, - "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", - hw_enabled_dbuf_slices, - enabled_dbuf_slices); -} - -static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) -{ - struct intel_cdclk_config cdclk_config = {}; - - if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) { - tgl_disable_dc3co(dev_priv); - return; - } - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - if (!HAS_DISPLAY(dev_priv)) - return; - - intel_cdclk_get_cdclk(dev_priv, &cdclk_config); - /* Can't read out voltage_level so can't use intel_cdclk_changed() */ - drm_WARN_ON(&dev_priv->drm, - intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, - &cdclk_config)); - - gen9_assert_dbuf_enabled(dev_priv); - - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - bxt_verify_ddi_phy_power_wells(dev_priv); - - if (DISPLAY_VER(dev_priv) >= 11) - /* - * DMC retains HW context only for port A, the other combo - * PHY's HW context for port B is lost after DC transitions, - * so we need to restore it manually. - */ - intel_combo_phy_init(dev_priv); -} - -static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - gen9_disable_dc_states(dev_priv); -} - -static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - if (!intel_dmc_has_payload(dev_priv)) - return; - - switch (dev_priv->dmc.target_dc_state) { - case DC_STATE_EN_DC3CO: - tgl_enable_dc3co(dev_priv); - break; - case DC_STATE_EN_UPTO_DC6: - skl_enable_dc6(dev_priv); - break; - case DC_STATE_EN_UPTO_DC5: - gen9_enable_dc5(dev_priv); - break; - } -} - -static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ -} - -static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ -} - -static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return true; -} - -static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) - i830_enable_pipe(dev_priv, PIPE_A); - if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) - i830_enable_pipe(dev_priv, PIPE_B); -} - -static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - i830_disable_pipe(dev_priv, PIPE_B); - i830_disable_pipe(dev_priv, PIPE_A); -} - -static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && - intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; -} - -static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - if (intel_power_well_refcount(power_well) > 0) - i830_pipes_power_well_enable(dev_priv, power_well); - else - i830_pipes_power_well_disable(dev_priv, power_well); -} - -static void vlv_set_power_well(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, bool enable) -{ - int pw_idx = power_well->desc->vlv.idx; - u32 mask; - u32 state; - u32 ctrl; - - mask = PUNIT_PWRGT_MASK(pw_idx); - state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : - PUNIT_PWRGT_PWR_GATE(pw_idx); - - vlv_punit_get(dev_priv); - -#define COND \ - ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) - - if (COND) - goto out; - - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); - ctrl &= ~mask; - ctrl |= state; - vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); - - if (wait_for(COND, 100)) - drm_err(&dev_priv->drm, - "timeout setting power well state %08x (%08x)\n", - state, - vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); - -#undef COND - -out: - vlv_punit_put(dev_priv); -} - -static void vlv_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_set_power_well(dev_priv, power_well, true); -} - -static void vlv_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_set_power_well(dev_priv, power_well, false); -} - -static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - int pw_idx = power_well->desc->vlv.idx; - bool enabled = false; - u32 mask; - u32 state; - u32 ctrl; - - mask = PUNIT_PWRGT_MASK(pw_idx); - ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); - - vlv_punit_get(dev_priv); - - state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; - /* - * We only ever set the power-on and power-gate states, anything - * else is unexpected. - */ - drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && - state != PUNIT_PWRGT_PWR_GATE(pw_idx)); - if (state == ctrl) - enabled = true; - - /* - * A transient state at this point would mean some unexpected party - * is poking at the power controls too. - */ - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; - drm_WARN_ON(&dev_priv->drm, ctrl != state); - - vlv_punit_put(dev_priv); - - return enabled; -} - -static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) -{ - u32 val; - - /* - * On driver load, a pipe may be active and driving a DSI display. - * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck - * (and never recovering) in this case. intel_dsi_post_disable() will - * clear it when we turn off the display. - */ - val = intel_de_read(dev_priv, DSPCLK_GATE_D); - val &= DPOUNIT_CLOCK_GATE_DISABLE; - val |= VRHUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D, val); - - /* - * Disable trickle feed and enable pnd deadline calculation - */ - intel_de_write(dev_priv, MI_ARB_VLV, - MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); - intel_de_write(dev_priv, CBR1_VLV, 0); - - drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); - intel_de_write(dev_priv, RAWCLK_FREQ_VLV, - DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, - 1000)); -} - -static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) -{ - struct intel_encoder *encoder; - enum pipe pipe; - - /* - * Enable the CRI clock source so we can get at the - * display and the reference clock for VGA - * hotplug / manual detection. Supposedly DSI also - * needs the ref clock up and running. - * - * CHV DPLL B/C have some issues if VGA mode is enabled. - */ - for_each_pipe(dev_priv, pipe) { - u32 val = intel_de_read(dev_priv, DPLL(pipe)); - - val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; - if (pipe != PIPE_A) - val |= DPLL_INTEGRATED_CRI_CLK_VLV; - - intel_de_write(dev_priv, DPLL(pipe), val); - } - - vlv_init_display_clock_gating(dev_priv); - - spin_lock_irq(&dev_priv->irq_lock); - valleyview_enable_display_irqs(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - - /* - * During driver initialization/resume we can avoid restoring the - * part of the HW/SW state that will be inited anyway explicitly. - */ - if (dev_priv->power_domains.initializing) - return; - - intel_hpd_init(dev_priv); - intel_hpd_poll_disable(dev_priv); - - /* Re-enable the ADPA, if we have one */ - for_each_intel_encoder(&dev_priv->drm, encoder) { - if (encoder->type == INTEL_OUTPUT_ANALOG) - intel_crt_reset(&encoder->base); - } - - intel_vga_redisable_power_on(dev_priv); - - intel_pps_unlock_regs_wa(dev_priv); -} - -static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) -{ - spin_lock_irq(&dev_priv->irq_lock); - valleyview_disable_display_irqs(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - - /* make sure we're done processing display irqs */ - intel_synchronize_irq(dev_priv); - - intel_pps_reset_all(dev_priv); - - /* Prevent us from re-enabling polling on accident in late suspend */ - if (!dev_priv->drm.dev->power.is_suspended) - intel_hpd_poll_enable(dev_priv); -} - -static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_set_power_well(dev_priv, power_well, true); - - vlv_display_power_well_init(dev_priv); -} - -static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_display_power_well_deinit(dev_priv); - - vlv_set_power_well(dev_priv, power_well, false); -} - -static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - /* since ref/cri clock was enabled */ - udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ - - vlv_set_power_well(dev_priv, power_well, true); - - /* - * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - - * 6. De-assert cmn_reset/side_reset. Same as VLV X0. - * a. GUnit 0x2110 bit[0] set to 1 (def 0) - * b. The other bits such as sfr settings / modesel may all - * be set to 0. - * - * This should only be done on init and resume from S3 with - * both PLLs disabled, or we risk losing DPIO and PLL - * synchronization. - */ - intel_de_write(dev_priv, DPIO_CTL, - intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); -} - -static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) - assert_pll_disabled(dev_priv, pipe); - - /* Assert common reset */ - intel_de_write(dev_priv, DPIO_CTL, - intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); - - vlv_set_power_well(dev_priv, power_well, false); -} - #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) -#define BITS_SET(val, bits) (((val) & (bits)) == (bits)) - -static void assert_chv_phy_status(struct drm_i915_private *dev_priv) -{ - struct i915_power_well *cmn_bc = - lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); - struct i915_power_well *cmn_d = - lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); - u32 phy_control = dev_priv->chv_phy_control; - u32 phy_status = 0; - u32 phy_status_mask = 0xffffffff; - - /* - * The BIOS can leave the PHY is some weird state - * where it doesn't fully power down some parts. - * Disable the asserts until the PHY has been fully - * reset (ie. the power well has been disabled at - * least once). - */ - if (!dev_priv->chv_phy_assert[DPIO_PHY0]) - phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | - PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); - - if (!dev_priv->chv_phy_assert[DPIO_PHY1]) - phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); - - if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { - phy_status |= PHY_POWERGOOD(DPIO_PHY0); - - /* this assumes override is only used to enable lanes */ - if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) - phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); - - if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) - phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); - - /* CL1 is on whenever anything is on in either channel */ - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | - PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) - phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); - - /* - * The DPLLB check accounts for the pipe B + port A usage - * with CL2 powered up but all the lanes in the second channel - * powered down. - */ - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && - (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) - phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); - - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); - - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); - } - - if (intel_power_well_is_enabled(dev_priv, cmn_d)) { - phy_status |= PHY_POWERGOOD(DPIO_PHY1); - - /* this assumes override is only used to enable lanes */ - if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) - phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); - - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) - phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); - - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); - } - - phy_status &= phy_status_mask; - - /* - * The PHY may be busy with some initial calibration and whatnot, - * so the power state can take a while to actually change. - */ - if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, - phy_status_mask, phy_status, 10)) - drm_err(&dev_priv->drm, - "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", - intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, - phy_status, dev_priv->chv_phy_control); -} - -#undef BITS_SET - -static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum dpio_phy phy; - enum pipe pipe; - u32 tmp; - - drm_WARN_ON_ONCE(&dev_priv->drm, - power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && - power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); - - if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { - pipe = PIPE_A; - phy = DPIO_PHY0; - } else { - pipe = PIPE_C; - phy = DPIO_PHY1; - } - - /* since ref/cri clock was enabled */ - udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ - vlv_set_power_well(dev_priv, power_well, true); - - /* Poll for phypwrgood signal */ - if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, - PHY_POWERGOOD(phy), 1)) - drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", - phy); - - vlv_dpio_get(dev_priv); - - /* Enable dynamic power down */ - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); - tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | - DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); - - if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { - tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); - tmp |= DPIO_DYNPWRDOWNEN_CH1; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); - } else { - /* - * Force the non-existing CL2 off. BXT does this - * too, so maybe it saves some power even though - * CL2 doesn't exist? - */ - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); - tmp |= DPIO_CL2_LDOFUSE_PWRENB; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); - } - - vlv_dpio_put(dev_priv); - - dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); - - drm_dbg_kms(&dev_priv->drm, - "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->chv_phy_control); - - assert_chv_phy_status(dev_priv); -} - -static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum dpio_phy phy; - - drm_WARN_ON_ONCE(&dev_priv->drm, - power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && - power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); - - if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { - phy = DPIO_PHY0; - assert_pll_disabled(dev_priv, PIPE_A); - assert_pll_disabled(dev_priv, PIPE_B); - } else { - phy = DPIO_PHY1; - assert_pll_disabled(dev_priv, PIPE_C); - } - - dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); - - vlv_set_power_well(dev_priv, power_well, false); - - drm_dbg_kms(&dev_priv->drm, - "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->chv_phy_control); - - /* PHY is fully reset now, so we can enable the PHY state asserts */ - dev_priv->chv_phy_assert[phy] = true; - - assert_chv_phy_status(dev_priv); -} - -static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, - enum dpio_channel ch, bool override, unsigned int mask) -{ - enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; - u32 reg, val, expected, actual; - - /* - * The BIOS can leave the PHY is some weird state - * where it doesn't fully power down some parts. - * Disable the asserts until the PHY has been fully - * reset (ie. the power well has been disabled at - * least once). - */ - if (!dev_priv->chv_phy_assert[phy]) - return; - - if (ch == DPIO_CH0) - reg = _CHV_CMN_DW0_CH0; - else - reg = _CHV_CMN_DW6_CH1; - - vlv_dpio_get(dev_priv); - val = vlv_dpio_read(dev_priv, pipe, reg); - vlv_dpio_put(dev_priv); - - /* - * This assumes !override is only used when the port is disabled. - * All lanes should power down even without the override when - * the port is disabled. - */ - if (!override || mask == 0xf) { - expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; - /* - * If CH1 common lane is not active anymore - * (eg. for pipe B DPLL) the entire channel will - * shut down, which causes the common lane registers - * to read as 0. That means we can't actually check - * the lane power down status bits, but as the entire - * register reads as 0 it's a good indication that the - * channel is indeed entirely powered down. - */ - if (ch == DPIO_CH1 && val == 0) - expected = 0; - } else if (mask != 0x0) { - expected = DPIO_ANYDL_POWERDOWN; - } else { - expected = 0; - } - - if (ch == DPIO_CH0) - actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; - else - actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; - actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; - - drm_WARN(&dev_priv->drm, actual != expected, - "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", - !!(actual & DPIO_ALLDL_POWERDOWN), - !!(actual & DPIO_ANYDL_POWERDOWN), - !!(expected & DPIO_ALLDL_POWERDOWN), - !!(expected & DPIO_ANYDL_POWERDOWN), - reg, val); -} - -bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, - enum dpio_channel ch, bool override) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - bool was_override; - - mutex_lock(&power_domains->lock); - - was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - - if (override == was_override) - goto out; - - if (override) - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - else - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); - - drm_dbg_kms(&dev_priv->drm, - "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", - phy, ch, dev_priv->chv_phy_control); - - assert_chv_phy_status(dev_priv); - -out: - mutex_unlock(&power_domains->lock); - - return was_override; -} - -void chv_phy_powergate_lanes(struct intel_encoder *encoder, - bool override, unsigned int mask) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_power_domains *power_domains = &dev_priv->power_domains; - enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); - enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); - - mutex_lock(&power_domains->lock); - - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); - - if (override) - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - else - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); - - drm_dbg_kms(&dev_priv->drm, - "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", - phy, ch, mask, dev_priv->chv_phy_control); - - assert_chv_phy_status(dev_priv); - - assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); - - mutex_unlock(&power_domains->lock); -} - -static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum pipe pipe = PIPE_A; - bool enabled; - u32 state, ctrl; - - vlv_punit_get(dev_priv); - - state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); - /* - * We only ever set the power-on and power-gate states, anything - * else is unexpected. - */ - drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && - state != DP_SSS_PWR_GATE(pipe)); - enabled = state == DP_SSS_PWR_ON(pipe); - - /* - * A transient state at this point would mean some unexpected party - * is poking at the power controls too. - */ - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); - drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); - - vlv_punit_put(dev_priv); - - return enabled; -} - -static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, - bool enable) -{ - enum pipe pipe = PIPE_A; - u32 state; - u32 ctrl; - - state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); - - vlv_punit_get(dev_priv); - -#define COND \ - ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) - - if (COND) - goto out; - - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); - ctrl &= ~DP_SSC_MASK(pipe); - ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); - vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); - - if (wait_for(COND, 100)) - drm_err(&dev_priv->drm, - "timeout setting power well state %08x (%08x)\n", - state, - vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); - -#undef COND - -out: - vlv_punit_put(dev_priv); -} - -static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); -} - -static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - chv_set_pipe_power_well(dev_priv, power_well, true); - - vlv_display_power_well_init(dev_priv); -} - -static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_display_power_well_deinit(dev_priv); - - chv_set_pipe_power_well(dev_priv, power_well, false); -} - static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) { return power_domains->async_put_domains[0] | @@ -3034,27 +1477,6 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, #define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) #define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) -static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = i9xx_always_on_power_well_noop, - .disable = i9xx_always_on_power_well_noop, - .is_enabled = i9xx_always_on_power_well_enabled, -}; - -static const struct i915_power_well_ops chv_pipe_power_well_ops = { - .sync_hw = chv_pipe_power_well_sync_hw, - .enable = chv_pipe_power_well_enable, - .disable = chv_pipe_power_well_disable, - .is_enabled = chv_pipe_power_well_enabled, -}; - -static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = chv_dpio_cmn_power_well_enable, - .disable = chv_dpio_cmn_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - static const struct i915_power_well_desc i9xx_always_on_power_well[] = { { .name = "always-on", @@ -3065,13 +1487,6 @@ static const struct i915_power_well_desc i9xx_always_on_power_well[] = { }, }; -static const struct i915_power_well_ops i830_pipes_power_well_ops = { - .sync_hw = i830_pipes_power_well_sync_hw, - .enable = i830_pipes_power_well_enable, - .disable = i830_pipes_power_well_disable, - .is_enabled = i830_pipes_power_well_enabled, -}; - static const struct i915_power_well_desc i830_power_wells[] = { { .name = "always-on", @@ -3088,35 +1503,6 @@ static const struct i915_power_well_desc i830_power_wells[] = { }, }; -static const struct i915_power_well_regs hsw_power_well_regs = { - .bios = HSW_PWR_WELL_CTL1, - .driver = HSW_PWR_WELL_CTL2, - .kvmr = HSW_PWR_WELL_CTL3, - .debug = HSW_PWR_WELL_CTL4, -}; - -static const struct i915_power_well_ops hsw_power_well_ops = { - .regs = &hsw_power_well_regs, - .sync_hw = hsw_power_well_sync_hw, - .enable = hsw_power_well_enable, - .disable = hsw_power_well_disable, - .is_enabled = hsw_power_well_enabled, -}; - -static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = gen9_dc_off_power_well_enable, - .disable = gen9_dc_off_power_well_disable, - .is_enabled = gen9_dc_off_power_well_enabled, -}; - -static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = bxt_dpio_cmn_power_well_enable, - .disable = bxt_dpio_cmn_power_well_disable, - .is_enabled = bxt_dpio_cmn_power_well_enabled, -}; - static const struct i915_power_well_desc hsw_power_wells[] = { { .name = "always-on", @@ -3158,27 +1544,6 @@ static const struct i915_power_well_desc bdw_power_wells[] = { }, }; -static const struct i915_power_well_ops vlv_display_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = vlv_display_power_well_enable, - .disable = vlv_display_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = vlv_dpio_cmn_power_well_enable, - .disable = vlv_dpio_cmn_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static const struct i915_power_well_ops vlv_dpio_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = vlv_power_well_enable, - .disable = vlv_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - static const struct i915_power_well_desc vlv_power_wells[] = { { .name = "always-on", @@ -3560,34 +1925,6 @@ static const struct i915_power_well_desc glk_power_wells[] = { }, }; -static const struct i915_power_well_regs icl_aux_power_well_regs = { - .bios = ICL_PWR_WELL_CTL_AUX1, - .driver = ICL_PWR_WELL_CTL_AUX2, - .debug = ICL_PWR_WELL_CTL_AUX4, -}; - -static const struct i915_power_well_ops icl_aux_power_well_ops = { - .regs = &icl_aux_power_well_regs, - .sync_hw = hsw_power_well_sync_hw, - .enable = icl_aux_power_well_enable, - .disable = icl_aux_power_well_disable, - .is_enabled = hsw_power_well_enabled, -}; - -static const struct i915_power_well_regs icl_ddi_power_well_regs = { - .bios = ICL_PWR_WELL_CTL_DDI1, - .driver = ICL_PWR_WELL_CTL_DDI2, - .debug = ICL_PWR_WELL_CTL_DDI4, -}; - -static const struct i915_power_well_ops icl_ddi_power_well_ops = { - .regs = &icl_ddi_power_well_regs, - .sync_hw = hsw_power_well_sync_hw, - .enable = hsw_power_well_enable, - .disable = hsw_power_well_disable, - .is_enabled = hsw_power_well_enabled, -}; - static const struct i915_power_well_desc icl_power_wells[] = { { .name = "always-on", @@ -3801,90 +2138,6 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }; -static void -tgl_tc_cold_request(struct drm_i915_private *i915, bool block) -{ - u8 tries = 0; - int ret; - - while (1) { - u32 low_val; - u32 high_val = 0; - - if (block) - low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; - else - low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; - - /* - * Spec states that we should timeout the request after 200us - * but the function below will timeout after 500us - */ - ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val); - if (ret == 0) { - if (block && - (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) - ret = -EIO; - else - break; - } - - if (++tries == 3) - break; - - msleep(1); - } - - if (ret) - drm_err(&i915->drm, "TC cold %sblock failed\n", - block ? "" : "un"); - else - drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", - block ? "" : "un"); -} - -static void -tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, - struct i915_power_well *power_well) -{ - tgl_tc_cold_request(i915, true); -} - -static void -tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, - struct i915_power_well *power_well) -{ - tgl_tc_cold_request(i915, false); -} - -static void -tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, - struct i915_power_well *power_well) -{ - if (intel_power_well_refcount(power_well) > 0) - tgl_tc_cold_off_power_well_enable(i915, power_well); - else - tgl_tc_cold_off_power_well_disable(i915, power_well); -} - -static bool -tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - /* - * Not the correctly implementation but there is no way to just read it - * from PCODE, so returning count to avoid state mismatch errors - */ - return intel_power_well_refcount(power_well); -} - -static const struct i915_power_well_ops tgl_tc_cold_off_ops = { - .sync_hw = tgl_tc_cold_off_power_well_sync_hw, - .enable = tgl_tc_cold_off_power_well_enable, - .disable = tgl_tc_cold_off_power_well_disable, - .is_enabled = tgl_tc_cold_off_power_well_is_enabled, -}; - static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "always-on", diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index ced384b0a165..95b939149910 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -305,9 +305,4 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \ intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0) -void chv_phy_powergate_lanes(struct intel_encoder *encoder, - bool override, unsigned int mask); -bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, - enum dpio_channel ch, bool override); - #endif /* __INTEL_DISPLAY_POWER_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 2a0fb9d9c60f..b49f0de30296 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -4,7 +4,59 @@ */ #include "i915_drv.h" +#include "i915_irq.h" +#include "intel_combo_phy.h" +#include "intel_combo_phy_regs.h" +#include "intel_crt.h" +#include "intel_de.h" #include "intel_display_power_well.h" +#include "intel_display_types.h" +#include "intel_dpio_phy.h" +#include "intel_dpll.h" +#include "intel_dmc.h" +#include "intel_hotplug.h" +#include "intel_pcode.h" +#include "intel_pm.h" +#include "intel_pps.h" +#include "intel_vga.h" +#include "intel_tc.h" +#include "vlv_sideband.h" +#include "vlv_sideband_reg.h" + +struct i915_power_well_regs { + i915_reg_t bios; + i915_reg_t driver; + i915_reg_t kvmr; + i915_reg_t debug; +}; + +struct i915_power_well_ops { + const struct i915_power_well_regs *regs; + /* + * Synchronize the well's hw state to match the current sw state, for + * example enable/disable it based on the current refcount. Called + * during driver init and resume time, possibly after first calling + * the enable/disable handlers. + */ + void (*sync_hw)(struct drm_i915_private *i915, + struct i915_power_well *power_well); + /* + * Enable the well and resources that depend on it (for example + * interrupts located on the well). Called after the 0->1 refcount + * transition. + */ + void (*enable)(struct drm_i915_private *i915, + struct i915_power_well *power_well); + /* + * Disable the well and resources that depend on it. Called after + * the 1->0 refcount transition. + */ + void (*disable)(struct drm_i915_private *i915, + struct i915_power_well *power_well); + /* Returns the hw enabled state. */ + bool (*is_enabled)(struct drm_i915_private *i915, + struct i915_power_well *power_well); +}; struct i915_power_well * lookup_power_well(struct drm_i915_private *i915, @@ -111,3 +163,1757 @@ int intel_power_well_refcount(struct i915_power_well *power_well) { return power_well->count; } + +/* + * Starting with Haswell, we have a "Power Down Well" that can be turned off + * when not needed anymore. We have 4 registers that can request the power well + * to be enabled, and it will only be disabled if none of the registers is + * requesting it to be enabled. + */ +static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, + u8 irq_pipe_mask, bool has_vga) +{ + if (has_vga) + intel_vga_reset_io_mem(dev_priv); + + if (irq_pipe_mask) + gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); +} + +static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, + u8 irq_pipe_mask) +{ + if (irq_pipe_mask) + gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); +} + +#define ICL_AUX_PW_TO_CH(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) + +#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) + +static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) +{ + int pw_idx = power_well->desc->hsw.idx; + + return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : + ICL_AUX_PW_TO_CH(pw_idx); +} + +static struct intel_digital_port * +aux_ch_to_digital_port(struct drm_i915_private *dev_priv, + enum aux_ch aux_ch) +{ + struct intel_digital_port *dig_port = NULL; + struct intel_encoder *encoder; + + for_each_intel_encoder(&dev_priv->drm, encoder) { + /* We'll check the MST primary port */ + if (encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + dig_port = enc_to_dig_port(encoder); + if (!dig_port) + continue; + + if (dig_port->aux_ch != aux_ch) { + dig_port = NULL; + continue; + } + + break; + } + + return dig_port; +} + +static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, + const struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); + struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); + + return intel_port_to_phy(i915, dig_port->base.port); +} + +static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, + bool timeout_expected) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = power_well->desc->hsw.idx; + int enable_delay = power_well->desc->hsw.fixed_enable_delay; + + /* + * For some power wells we're not supposed to watch the status bit for + * an ack, but rather just wait a fixed amount of time and then + * proceed. This is only used on DG2. + */ + if (IS_DG2(dev_priv) && enable_delay) { + usleep_range(enable_delay, 2 * enable_delay); + return; + } + + /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ + if (intel_de_wait_for_set(dev_priv, regs->driver, + HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { + drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", + intel_power_well_name(power_well)); + + drm_WARN_ON(&dev_priv->drm, !timeout_expected); + + } +} + +static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, + const struct i915_power_well_regs *regs, + int pw_idx) +{ + u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); + u32 ret; + + ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; + ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; + if (regs->kvmr.reg) + ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; + ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; + + return ret; +} + +static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = power_well->desc->hsw.idx; + bool disabled; + u32 reqs; + + /* + * Bspec doesn't require waiting for PWs to get disabled, but still do + * this for paranoia. The known cases where a PW will be forced on: + * - a KVMR request on any power well via the KVMR request register + * - a DMC request on PW1 and MISC_IO power wells via the BIOS and + * DEBUG request registers + * Skip the wait in case any of the request bits are set and print a + * diagnostic message. + */ + wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & + HSW_PWR_WELL_CTL_STATE(pw_idx))) || + (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); + if (disabled) + return; + + drm_dbg_kms(&dev_priv->drm, + "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", + intel_power_well_name(power_well), + !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); +} + +static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, + enum skl_power_gate pg) +{ + /* Timeout 5us for PG#0, for other PGs 1us */ + drm_WARN_ON(&dev_priv->drm, + intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, + SKL_FUSE_PG_DIST_STATUS(pg), 1)); +} + +static void hsw_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = power_well->desc->hsw.idx; + u32 val; + + if (power_well->desc->hsw.has_fuses) { + enum skl_power_gate pg; + + pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : + SKL_PW_CTL_IDX_TO_PG(pw_idx); + + /* Wa_16013190616:adlp */ + if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); + + /* + * For PW1 we have to wait both for the PW0/PG0 fuse state + * before enabling the power well and PW1/PG1's own fuse + * state after the enabling. For all other power wells with + * fuses we only have to wait for that PW/PG's fuse state + * after the enabling. + */ + if (pg == SKL_PG1) + gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); + } + + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val | HSW_PWR_WELL_CTL_REQ(pw_idx)); + + hsw_wait_for_power_well_enable(dev_priv, power_well, false); + + if (power_well->desc->hsw.has_fuses) { + enum skl_power_gate pg; + + pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : + SKL_PW_CTL_IDX_TO_PG(pw_idx); + gen9_wait_for_power_well_fuses(dev_priv, pg); + } + + hsw_power_well_post_enable(dev_priv, + power_well->desc->hsw.irq_pipe_mask, + power_well->desc->hsw.has_vga); +} + +static void hsw_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = power_well->desc->hsw.idx; + u32 val; + + hsw_power_well_pre_disable(dev_priv, + power_well->desc->hsw.irq_pipe_mask); + + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); + hsw_wait_for_power_well_disable(dev_priv, power_well); +} + +static void +icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = power_well->desc->hsw.idx; + enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); + u32 val; + + drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); + + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val | HSW_PWR_WELL_CTL_REQ(pw_idx)); + + if (DISPLAY_VER(dev_priv) < 12) { + val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); + intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), + val | ICL_LANE_ENABLE_AUX); + } + + hsw_wait_for_power_well_enable(dev_priv, power_well, false); + + /* Display WA #1178: icl */ + if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && + !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { + val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); + val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; + intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); + } +} + +static void +icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = power_well->desc->hsw.idx; + enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); + u32 val; + + drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); + + val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); + intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), + val & ~ICL_LANE_ENABLE_AUX); + + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); + + hsw_wait_for_power_well_disable(dev_priv, power_well); +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + +static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, + struct intel_digital_port *dig_port) +{ + if (drm_WARN_ON(&dev_priv->drm, !dig_port)) + return; + + if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) + return; + + drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); +} + +#else + +static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, + struct intel_digital_port *dig_port) +{ +} + +#endif + +#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) + +static void icl_tc_cold_exit(struct drm_i915_private *i915) +{ + int ret, tries = 0; + + while (1) { + ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0, + 250, 1); + if (ret != -EAGAIN || ++tries == 3) + break; + msleep(1); + } + + /* Spec states that TC cold exit can take up to 1ms to complete */ + if (!ret) + msleep(1); + + /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ + drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : + "succeeded"); +} + +static void +icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); + struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + bool is_tbt = power_well->desc->hsw.is_tc_tbt; + bool timeout_expected; + u32 val; + + icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); + + val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); + val &= ~DP_AUX_CH_CTL_TBT_IO; + if (is_tbt) + val |= DP_AUX_CH_CTL_TBT_IO; + intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); + + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); + + /* + * An AUX timeout is expected if the TBT DP tunnel is down, + * or need to enable AUX on a legacy TypeC port as part of the TC-cold + * exit sequence. + */ + timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); + if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) + icl_tc_cold_exit(dev_priv); + + hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); + + if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { + enum tc_port tc_port; + + tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), + HIP_INDEX_VAL(tc_port, 0x2)); + + if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), + DKL_CMN_UC_DW27_UC_HEALTH, 1)) + drm_warn(&dev_priv->drm, + "Timeout waiting TC uC health\n"); + } +} + +static void +icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); + struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); + + icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); + + hsw_power_well_disable(dev_priv, power_well); +} + +static void +icl_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); + + if (intel_phy_is_tc(dev_priv, phy)) + return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); + else if (IS_ICELAKE(dev_priv)) + return icl_combo_phy_aux_power_well_enable(dev_priv, + power_well); + else + return hsw_power_well_enable(dev_priv, power_well); +} + +static void +icl_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); + + if (intel_phy_is_tc(dev_priv, phy)) + return icl_tc_phy_aux_power_well_disable(dev_priv, power_well); + else if (IS_ICELAKE(dev_priv)) + return icl_combo_phy_aux_power_well_disable(dev_priv, + power_well); + else + return hsw_power_well_disable(dev_priv, power_well); +} + +/* + * We should only use the power well if we explicitly asked the hardware to + * enable it, so check if it's enabled and also check if we've requested it to + * be enabled. + */ +static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + enum i915_power_well_id id = power_well->desc->id; + int pw_idx = power_well->desc->hsw.idx; + u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | + HSW_PWR_WELL_CTL_STATE(pw_idx); + u32 val; + + val = intel_de_read(dev_priv, regs->driver); + + /* + * On GEN9 big core due to a DMC bug the driver's request bits for PW1 + * and the MISC_IO PW will be not restored, so check instead for the + * BIOS's own request bits, which are forced-on for these power wells + * when exiting DC5/6. + */ + if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && + (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) + val |= intel_de_read(dev_priv, regs->bios); + + return (val & mask) == mask; +} + +static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) +{ + drm_WARN_ONCE(&dev_priv->drm, + (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), + "DC9 already programmed to be enabled.\n"); + drm_WARN_ONCE(&dev_priv->drm, + intel_de_read(dev_priv, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC5, + "DC5 still not disabled to enable DC9.\n"); + drm_WARN_ONCE(&dev_priv->drm, + intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & + HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), + "Power well 2 on.\n"); + drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), + "Interrupts not disabled yet.\n"); + + /* + * TODO: check for the following to verify the conditions to enter DC9 + * state are satisfied: + * 1] Check relevant display engine registers to verify if mode set + * disable sequence was followed. + * 2] Check if display uninitialize sequence is initialized. + */ +} + +static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) +{ + drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), + "Interrupts not disabled yet.\n"); + drm_WARN_ONCE(&dev_priv->drm, + intel_de_read(dev_priv, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC5, + "DC5 still not disabled.\n"); + + /* + * TODO: check for the following to verify DC9 state was indeed + * entered before programming to disable it: + * 1] Check relevant display engine registers to verify if mode + * set disable sequence was followed. + * 2] Check if display uninitialize sequence is initialized. + */ +} + +static void gen9_write_dc_state(struct drm_i915_private *dev_priv, + u32 state) +{ + int rewrites = 0; + int rereads = 0; + u32 v; + + intel_de_write(dev_priv, DC_STATE_EN, state); + + /* It has been observed that disabling the dc6 state sometimes + * doesn't stick and dmc keeps returning old value. Make sure + * the write really sticks enough times and also force rewrite until + * we are confident that state is exactly what we want. + */ + do { + v = intel_de_read(dev_priv, DC_STATE_EN); + + if (v != state) { + intel_de_write(dev_priv, DC_STATE_EN, state); + rewrites++; + rereads = 0; + } else if (rereads++ > 5) { + break; + } + + } while (rewrites < 100); + + if (v != state) + drm_err(&dev_priv->drm, + "Writing dc state to 0x%x failed, now 0x%x\n", + state, v); + + /* Most of the times we need one retry, avoid spam */ + if (rewrites > 1) + drm_dbg_kms(&dev_priv->drm, + "Rewrote dc state to 0x%x %d times\n", + state, rewrites); +} + +static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) +{ + u32 mask; + + mask = DC_STATE_EN_UPTO_DC5; + + if (DISPLAY_VER(dev_priv) >= 12) + mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 + | DC_STATE_EN_DC9; + else if (DISPLAY_VER(dev_priv) == 11) + mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; + else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + mask |= DC_STATE_EN_DC9; + else + mask |= DC_STATE_EN_UPTO_DC6; + + return mask; +} + +void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) +{ + u32 val; + + if (!HAS_DISPLAY(dev_priv)) + return; + + val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); + + drm_dbg_kms(&dev_priv->drm, + "Resetting DC state tracking from %02x to %02x\n", + dev_priv->dmc.dc_state, val); + dev_priv->dmc.dc_state = val; +} + +/** + * gen9_set_dc_state - set target display C power state + * @dev_priv: i915 device instance + * @state: target DC power state + * - DC_STATE_DISABLE + * - DC_STATE_EN_UPTO_DC5 + * - DC_STATE_EN_UPTO_DC6 + * - DC_STATE_EN_DC9 + * + * Signal to DMC firmware/HW the target DC power state passed in @state. + * DMC/HW can turn off individual display clocks and power rails when entering + * a deeper DC power state (higher in number) and turns these back when exiting + * that state to a shallower power state (lower in number). The HW will decide + * when to actually enter a given state on an on-demand basis, for instance + * depending on the active state of display pipes. The state of display + * registers backed by affected power rails are saved/restored as needed. + * + * Based on the above enabling a deeper DC power state is asynchronous wrt. + * enabling it. Disabling a deeper power state is synchronous: for instance + * setting %DC_STATE_DISABLE won't complete until all HW resources are turned + * back on and register state is restored. This is guaranteed by the MMIO write + * to DC_STATE_EN blocking until the state is restored. + */ +void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) +{ + u32 val; + u32 mask; + + if (!HAS_DISPLAY(dev_priv)) + return; + + if (drm_WARN_ON_ONCE(&dev_priv->drm, + state & ~dev_priv->dmc.allowed_dc_mask)) + state &= dev_priv->dmc.allowed_dc_mask; + + val = intel_de_read(dev_priv, DC_STATE_EN); + mask = gen9_dc_mask(dev_priv); + drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", + val & mask, state); + + /* Check if DMC is ignoring our DC state requests */ + if ((val & mask) != dev_priv->dmc.dc_state) + drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", + dev_priv->dmc.dc_state, val & mask); + + val &= ~mask; + val |= state; + + gen9_write_dc_state(dev_priv, val); + + dev_priv->dmc.dc_state = val & mask; +} + +static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) +{ + drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); + gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); +} + +static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) +{ + u32 val; + + drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); + val = intel_de_read(dev_priv, DC_STATE_EN); + val &= ~DC_STATE_DC3CO_STATUS; + intel_de_write(dev_priv, DC_STATE_EN, val); + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + /* + * Delay of 200us DC3CO Exit time B.Spec 49196 + */ + usleep_range(200, 210); +} + +static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) +{ + enum i915_power_well_id high_pg; + + /* Power wells at this level and above must be disabled for DC5 entry */ + if (DISPLAY_VER(dev_priv) == 12) + high_pg = ICL_DISP_PW_3; + else + high_pg = SKL_DISP_PW_2; + + drm_WARN_ONCE(&dev_priv->drm, + intel_display_power_well_is_enabled(dev_priv, high_pg), + "Power wells above platform's DC5 limit still enabled.\n"); + + drm_WARN_ONCE(&dev_priv->drm, + (intel_de_read(dev_priv, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC5), + "DC5 already programmed to be enabled.\n"); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); + + assert_dmc_loaded(dev_priv); +} + +void gen9_enable_dc5(struct drm_i915_private *dev_priv) +{ + assert_can_enable_dc5(dev_priv); + + drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); + + /* Wa Display #1183: skl,kbl,cfl */ + if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) + intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, + intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); + + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); +} + +static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) +{ + drm_WARN_ONCE(&dev_priv->drm, + intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, + "Backlight is not disabled.\n"); + drm_WARN_ONCE(&dev_priv->drm, + (intel_de_read(dev_priv, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC6), + "DC6 already programmed to be enabled.\n"); + + assert_dmc_loaded(dev_priv); +} + +void skl_enable_dc6(struct drm_i915_private *dev_priv) +{ + assert_can_enable_dc6(dev_priv); + + drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); + + /* Wa Display #1183: skl,kbl,cfl */ + if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) + intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, + intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); + + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); +} + +void bxt_enable_dc9(struct drm_i915_private *dev_priv) +{ + assert_can_enable_dc9(dev_priv); + + drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); + /* + * Power sequencer reset is not needed on + * platforms with South Display Engine on PCH, + * because PPS registers are always on. + */ + if (!HAS_PCH_SPLIT(dev_priv)) + intel_pps_reset_all(dev_priv); + gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); +} + +void bxt_disable_dc9(struct drm_i915_private *dev_priv) +{ + assert_can_disable_dc9(dev_priv); + + drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + intel_pps_unlock_regs_wa(dev_priv); +} + +static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = power_well->desc->hsw.idx; + u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); + u32 bios_req = intel_de_read(dev_priv, regs->bios); + + /* Take over the request bit if set by BIOS. */ + if (bios_req & mask) { + u32 drv_req = intel_de_read(dev_priv, regs->driver); + + if (!(drv_req & mask)) + intel_de_write(dev_priv, regs->driver, drv_req | mask); + intel_de_write(dev_priv, regs->bios, bios_req & ~mask); + } +} + +static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); +} + +static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); +} + +static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); +} + +static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *power_well; + + power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); + if (intel_power_well_refcount(power_well) > 0) + bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); + + power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); + if (intel_power_well_refcount(power_well) > 0) + bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); + + if (IS_GEMINILAKE(dev_priv)) { + power_well = lookup_power_well(dev_priv, + GLK_DISP_PW_DPIO_CMN_C); + if (intel_power_well_refcount(power_well) > 0) + bxt_ddi_phy_verify_state(dev_priv, + power_well->desc->bxt.phy); + } +} + +static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && + (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); +} + +static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) +{ + u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); + u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; + + drm_WARN(&dev_priv->drm, + hw_enabled_dbuf_slices != enabled_dbuf_slices, + "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", + hw_enabled_dbuf_slices, + enabled_dbuf_slices); +} + +void gen9_disable_dc_states(struct drm_i915_private *dev_priv) +{ + struct intel_cdclk_config cdclk_config = {}; + + if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) { + tgl_disable_dc3co(dev_priv); + return; + } + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + if (!HAS_DISPLAY(dev_priv)) + return; + + intel_cdclk_get_cdclk(dev_priv, &cdclk_config); + /* Can't read out voltage_level so can't use intel_cdclk_changed() */ + drm_WARN_ON(&dev_priv->drm, + intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, + &cdclk_config)); + + gen9_assert_dbuf_enabled(dev_priv); + + if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + bxt_verify_ddi_phy_power_wells(dev_priv); + + if (DISPLAY_VER(dev_priv) >= 11) + /* + * DMC retains HW context only for port A, the other combo + * PHY's HW context for port B is lost after DC transitions, + * so we need to restore it manually. + */ + intel_combo_phy_init(dev_priv); +} + +static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + gen9_disable_dc_states(dev_priv); +} + +static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (!intel_dmc_has_payload(dev_priv)) + return; + + switch (dev_priv->dmc.target_dc_state) { + case DC_STATE_EN_DC3CO: + tgl_enable_dc3co(dev_priv); + break; + case DC_STATE_EN_UPTO_DC6: + skl_enable_dc6(dev_priv); + break; + case DC_STATE_EN_UPTO_DC5: + gen9_enable_dc5(dev_priv); + break; + } +} + +static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ +} + +static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ +} + +static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return true; +} + +static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) + i830_enable_pipe(dev_priv, PIPE_A); + if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) + i830_enable_pipe(dev_priv, PIPE_B); +} + +static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + i830_disable_pipe(dev_priv, PIPE_B); + i830_disable_pipe(dev_priv, PIPE_A); +} + +static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && + intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; +} + +static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (intel_power_well_refcount(power_well) > 0) + i830_pipes_power_well_enable(dev_priv, power_well); + else + i830_pipes_power_well_disable(dev_priv, power_well); +} + +static void vlv_set_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, bool enable) +{ + int pw_idx = power_well->desc->vlv.idx; + u32 mask; + u32 state; + u32 ctrl; + + mask = PUNIT_PWRGT_MASK(pw_idx); + state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : + PUNIT_PWRGT_PWR_GATE(pw_idx); + + vlv_punit_get(dev_priv); + +#define COND \ + ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) + + if (COND) + goto out; + + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); + ctrl &= ~mask; + ctrl |= state; + vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); + + if (wait_for(COND, 100)) + drm_err(&dev_priv->drm, + "timeout setting power well state %08x (%08x)\n", + state, + vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); + +#undef COND + +out: + vlv_punit_put(dev_priv); +} + +static void vlv_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, true); +} + +static void vlv_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, false); +} + +static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int pw_idx = power_well->desc->vlv.idx; + bool enabled = false; + u32 mask; + u32 state; + u32 ctrl; + + mask = PUNIT_PWRGT_MASK(pw_idx); + ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); + + vlv_punit_get(dev_priv); + + state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; + /* + * We only ever set the power-on and power-gate states, anything + * else is unexpected. + */ + drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && + state != PUNIT_PWRGT_PWR_GATE(pw_idx)); + if (state == ctrl) + enabled = true; + + /* + * A transient state at this point would mean some unexpected party + * is poking at the power controls too. + */ + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; + drm_WARN_ON(&dev_priv->drm, ctrl != state); + + vlv_punit_put(dev_priv); + + return enabled; +} + +static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) +{ + u32 val; + + /* + * On driver load, a pipe may be active and driving a DSI display. + * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck + * (and never recovering) in this case. intel_dsi_post_disable() will + * clear it when we turn off the display. + */ + val = intel_de_read(dev_priv, DSPCLK_GATE_D); + val &= DPOUNIT_CLOCK_GATE_DISABLE; + val |= VRHUNIT_CLOCK_GATE_DISABLE; + intel_de_write(dev_priv, DSPCLK_GATE_D, val); + + /* + * Disable trickle feed and enable pnd deadline calculation + */ + intel_de_write(dev_priv, MI_ARB_VLV, + MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); + intel_de_write(dev_priv, CBR1_VLV, 0); + + drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); + intel_de_write(dev_priv, RAWCLK_FREQ_VLV, + DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, + 1000)); +} + +static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + enum pipe pipe; + + /* + * Enable the CRI clock source so we can get at the + * display and the reference clock for VGA + * hotplug / manual detection. Supposedly DSI also + * needs the ref clock up and running. + * + * CHV DPLL B/C have some issues if VGA mode is enabled. + */ + for_each_pipe(dev_priv, pipe) { + u32 val = intel_de_read(dev_priv, DPLL(pipe)); + + val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; + if (pipe != PIPE_A) + val |= DPLL_INTEGRATED_CRI_CLK_VLV; + + intel_de_write(dev_priv, DPLL(pipe), val); + } + + vlv_init_display_clock_gating(dev_priv); + + spin_lock_irq(&dev_priv->irq_lock); + valleyview_enable_display_irqs(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + /* + * During driver initialization/resume we can avoid restoring the + * part of the HW/SW state that will be inited anyway explicitly. + */ + if (dev_priv->power_domains.initializing) + return; + + intel_hpd_init(dev_priv); + intel_hpd_poll_disable(dev_priv); + + /* Re-enable the ADPA, if we have one */ + for_each_intel_encoder(&dev_priv->drm, encoder) { + if (encoder->type == INTEL_OUTPUT_ANALOG) + intel_crt_reset(&encoder->base); + } + + intel_vga_redisable_power_on(dev_priv); + + intel_pps_unlock_regs_wa(dev_priv); +} + +static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->irq_lock); + valleyview_disable_display_irqs(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + /* make sure we're done processing display irqs */ + intel_synchronize_irq(dev_priv); + + intel_pps_reset_all(dev_priv); + + /* Prevent us from re-enabling polling on accident in late suspend */ + if (!dev_priv->drm.dev->power.is_suspended) + intel_hpd_poll_enable(dev_priv); +} + +static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, true); + + vlv_display_power_well_init(dev_priv); +} + +static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_display_power_well_deinit(dev_priv); + + vlv_set_power_well(dev_priv, power_well, false); +} + +static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + /* since ref/cri clock was enabled */ + udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ + + vlv_set_power_well(dev_priv, power_well, true); + + /* + * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - + * 6. De-assert cmn_reset/side_reset. Same as VLV X0. + * a. GUnit 0x2110 bit[0] set to 1 (def 0) + * b. The other bits such as sfr settings / modesel may all + * be set to 0. + * + * This should only be done on init and resume from S3 with + * both PLLs disabled, or we risk losing DPIO and PLL + * synchronization. + */ + intel_de_write(dev_priv, DPIO_CTL, + intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); +} + +static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) + assert_pll_disabled(dev_priv, pipe); + + /* Assert common reset */ + intel_de_write(dev_priv, DPIO_CTL, + intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); + + vlv_set_power_well(dev_priv, power_well, false); +} + +#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) + +#define BITS_SET(val, bits) (((val) & (bits)) == (bits)) + +static void assert_chv_phy_status(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *cmn_bc = + lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); + struct i915_power_well *cmn_d = + lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); + u32 phy_control = dev_priv->chv_phy_control; + u32 phy_status = 0; + u32 phy_status_mask = 0xffffffff; + + /* + * The BIOS can leave the PHY is some weird state + * where it doesn't fully power down some parts. + * Disable the asserts until the PHY has been fully + * reset (ie. the power well has been disabled at + * least once). + */ + if (!dev_priv->chv_phy_assert[DPIO_PHY0]) + phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | + PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); + + if (!dev_priv->chv_phy_assert[DPIO_PHY1]) + phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); + + if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { + phy_status |= PHY_POWERGOOD(DPIO_PHY0); + + /* this assumes override is only used to enable lanes */ + if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) + phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); + + if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) + phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); + + /* CL1 is on whenever anything is on in either channel */ + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | + PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) + phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); + + /* + * The DPLLB check accounts for the pipe B + port A usage + * with CL2 powered up but all the lanes in the second channel + * powered down. + */ + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && + (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) + phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); + + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); + + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); + } + + if (intel_power_well_is_enabled(dev_priv, cmn_d)) { + phy_status |= PHY_POWERGOOD(DPIO_PHY1); + + /* this assumes override is only used to enable lanes */ + if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) + phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); + + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) + phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); + + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); + } + + phy_status &= phy_status_mask; + + /* + * The PHY may be busy with some initial calibration and whatnot, + * so the power state can take a while to actually change. + */ + if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, + phy_status_mask, phy_status, 10)) + drm_err(&dev_priv->drm, + "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", + intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, + phy_status, dev_priv->chv_phy_control); +} + +#undef BITS_SET + +static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum dpio_phy phy; + enum pipe pipe; + u32 tmp; + + drm_WARN_ON_ONCE(&dev_priv->drm, + power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && + power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); + + if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { + pipe = PIPE_A; + phy = DPIO_PHY0; + } else { + pipe = PIPE_C; + phy = DPIO_PHY1; + } + + /* since ref/cri clock was enabled */ + udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ + vlv_set_power_well(dev_priv, power_well, true); + + /* Poll for phypwrgood signal */ + if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, + PHY_POWERGOOD(phy), 1)) + drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", + phy); + + vlv_dpio_get(dev_priv); + + /* Enable dynamic power down */ + tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); + tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | + DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); + + if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { + tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); + tmp |= DPIO_DYNPWRDOWNEN_CH1; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); + } else { + /* + * Force the non-existing CL2 off. BXT does this + * too, so maybe it saves some power even though + * CL2 doesn't exist? + */ + tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); + tmp |= DPIO_CL2_LDOFUSE_PWRENB; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); + } + + vlv_dpio_put(dev_priv); + + dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); + + drm_dbg_kms(&dev_priv->drm, + "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", + phy, dev_priv->chv_phy_control); + + assert_chv_phy_status(dev_priv); +} + +static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum dpio_phy phy; + + drm_WARN_ON_ONCE(&dev_priv->drm, + power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && + power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); + + if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { + phy = DPIO_PHY0; + assert_pll_disabled(dev_priv, PIPE_A); + assert_pll_disabled(dev_priv, PIPE_B); + } else { + phy = DPIO_PHY1; + assert_pll_disabled(dev_priv, PIPE_C); + } + + dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); + + vlv_set_power_well(dev_priv, power_well, false); + + drm_dbg_kms(&dev_priv->drm, + "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", + phy, dev_priv->chv_phy_control); + + /* PHY is fully reset now, so we can enable the PHY state asserts */ + dev_priv->chv_phy_assert[phy] = true; + + assert_chv_phy_status(dev_priv); +} + +static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, + enum dpio_channel ch, bool override, unsigned int mask) +{ + enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; + u32 reg, val, expected, actual; + + /* + * The BIOS can leave the PHY is some weird state + * where it doesn't fully power down some parts. + * Disable the asserts until the PHY has been fully + * reset (ie. the power well has been disabled at + * least once). + */ + if (!dev_priv->chv_phy_assert[phy]) + return; + + if (ch == DPIO_CH0) + reg = _CHV_CMN_DW0_CH0; + else + reg = _CHV_CMN_DW6_CH1; + + vlv_dpio_get(dev_priv); + val = vlv_dpio_read(dev_priv, pipe, reg); + vlv_dpio_put(dev_priv); + + /* + * This assumes !override is only used when the port is disabled. + * All lanes should power down even without the override when + * the port is disabled. + */ + if (!override || mask == 0xf) { + expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; + /* + * If CH1 common lane is not active anymore + * (eg. for pipe B DPLL) the entire channel will + * shut down, which causes the common lane registers + * to read as 0. That means we can't actually check + * the lane power down status bits, but as the entire + * register reads as 0 it's a good indication that the + * channel is indeed entirely powered down. + */ + if (ch == DPIO_CH1 && val == 0) + expected = 0; + } else if (mask != 0x0) { + expected = DPIO_ANYDL_POWERDOWN; + } else { + expected = 0; + } + + if (ch == DPIO_CH0) + actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; + else + actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; + actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; + + drm_WARN(&dev_priv->drm, actual != expected, + "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", + !!(actual & DPIO_ALLDL_POWERDOWN), + !!(actual & DPIO_ANYDL_POWERDOWN), + !!(expected & DPIO_ALLDL_POWERDOWN), + !!(expected & DPIO_ANYDL_POWERDOWN), + reg, val); +} + +bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, + enum dpio_channel ch, bool override) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + bool was_override; + + mutex_lock(&power_domains->lock); + + was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + + if (override == was_override) + goto out; + + if (override) + dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + else + dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); + + drm_dbg_kms(&dev_priv->drm, + "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", + phy, ch, dev_priv->chv_phy_control); + + assert_chv_phy_status(dev_priv); + +out: + mutex_unlock(&power_domains->lock); + + return was_override; +} + +void chv_phy_powergate_lanes(struct intel_encoder *encoder, + bool override, unsigned int mask) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct i915_power_domains *power_domains = &dev_priv->power_domains; + enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); + enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); + + mutex_lock(&power_domains->lock); + + dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); + dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); + + if (override) + dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + else + dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); + + drm_dbg_kms(&dev_priv->drm, + "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", + phy, ch, mask, dev_priv->chv_phy_control); + + assert_chv_phy_status(dev_priv); + + assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); + + mutex_unlock(&power_domains->lock); +} + +static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum pipe pipe = PIPE_A; + bool enabled; + u32 state, ctrl; + + vlv_punit_get(dev_priv); + + state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); + /* + * We only ever set the power-on and power-gate states, anything + * else is unexpected. + */ + drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && + state != DP_SSS_PWR_GATE(pipe)); + enabled = state == DP_SSS_PWR_ON(pipe); + + /* + * A transient state at this point would mean some unexpected party + * is poking at the power controls too. + */ + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); + drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); + + vlv_punit_put(dev_priv); + + return enabled; +} + +static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, + bool enable) +{ + enum pipe pipe = PIPE_A; + u32 state; + u32 ctrl; + + state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); + + vlv_punit_get(dev_priv); + +#define COND \ + ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) + + if (COND) + goto out; + + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); + ctrl &= ~DP_SSC_MASK(pipe); + ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); + + if (wait_for(COND, 100)) + drm_err(&dev_priv->drm, + "timeout setting power well state %08x (%08x)\n", + state, + vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); + +#undef COND + +out: + vlv_punit_put(dev_priv); +} + +static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); +} + +static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + chv_set_pipe_power_well(dev_priv, power_well, true); + + vlv_display_power_well_init(dev_priv); +} + +static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_display_power_well_deinit(dev_priv); + + chv_set_pipe_power_well(dev_priv, power_well, false); +} + +static void +tgl_tc_cold_request(struct drm_i915_private *i915, bool block) +{ + u8 tries = 0; + int ret; + + while (1) { + u32 low_val; + u32 high_val = 0; + + if (block) + low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; + else + low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; + + /* + * Spec states that we should timeout the request after 200us + * but the function below will timeout after 500us + */ + ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val); + if (ret == 0) { + if (block && + (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) + ret = -EIO; + else + break; + } + + if (++tries == 3) + break; + + msleep(1); + } + + if (ret) + drm_err(&i915->drm, "TC cold %sblock failed\n", + block ? "" : "un"); + else + drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", + block ? "" : "un"); +} + +static void +tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, + struct i915_power_well *power_well) +{ + tgl_tc_cold_request(i915, true); +} + +static void +tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, + struct i915_power_well *power_well) +{ + tgl_tc_cold_request(i915, false); +} + +static void +tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, + struct i915_power_well *power_well) +{ + if (intel_power_well_refcount(power_well) > 0) + tgl_tc_cold_off_power_well_enable(i915, power_well); + else + tgl_tc_cold_off_power_well_disable(i915, power_well); +} + +static bool +tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + /* + * Not the correctly implementation but there is no way to just read it + * from PCODE, so returning count to avoid state mismatch errors + */ + return intel_power_well_refcount(power_well); +} + + +const struct i915_power_well_ops i9xx_always_on_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = i9xx_always_on_power_well_noop, + .disable = i9xx_always_on_power_well_noop, + .is_enabled = i9xx_always_on_power_well_enabled, +}; + +const struct i915_power_well_ops chv_pipe_power_well_ops = { + .sync_hw = chv_pipe_power_well_sync_hw, + .enable = chv_pipe_power_well_enable, + .disable = chv_pipe_power_well_disable, + .is_enabled = chv_pipe_power_well_enabled, +}; + +const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = chv_dpio_cmn_power_well_enable, + .disable = chv_dpio_cmn_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +const struct i915_power_well_ops i830_pipes_power_well_ops = { + .sync_hw = i830_pipes_power_well_sync_hw, + .enable = i830_pipes_power_well_enable, + .disable = i830_pipes_power_well_disable, + .is_enabled = i830_pipes_power_well_enabled, +}; + +static const struct i915_power_well_regs hsw_power_well_regs = { + .bios = HSW_PWR_WELL_CTL1, + .driver = HSW_PWR_WELL_CTL2, + .kvmr = HSW_PWR_WELL_CTL3, + .debug = HSW_PWR_WELL_CTL4, +}; + +const struct i915_power_well_ops hsw_power_well_ops = { + .regs = &hsw_power_well_regs, + .sync_hw = hsw_power_well_sync_hw, + .enable = hsw_power_well_enable, + .disable = hsw_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + +const struct i915_power_well_ops gen9_dc_off_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = gen9_dc_off_power_well_enable, + .disable = gen9_dc_off_power_well_disable, + .is_enabled = gen9_dc_off_power_well_enabled, +}; + +const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = bxt_dpio_cmn_power_well_enable, + .disable = bxt_dpio_cmn_power_well_disable, + .is_enabled = bxt_dpio_cmn_power_well_enabled, +}; + +const struct i915_power_well_ops vlv_display_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = vlv_display_power_well_enable, + .disable = vlv_display_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = vlv_dpio_cmn_power_well_enable, + .disable = vlv_dpio_cmn_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +const struct i915_power_well_ops vlv_dpio_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = vlv_power_well_enable, + .disable = vlv_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +static const struct i915_power_well_regs icl_aux_power_well_regs = { + .bios = ICL_PWR_WELL_CTL_AUX1, + .driver = ICL_PWR_WELL_CTL_AUX2, + .debug = ICL_PWR_WELL_CTL_AUX4, +}; + +const struct i915_power_well_ops icl_aux_power_well_ops = { + .regs = &icl_aux_power_well_regs, + .sync_hw = hsw_power_well_sync_hw, + .enable = icl_aux_power_well_enable, + .disable = icl_aux_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + +static const struct i915_power_well_regs icl_ddi_power_well_regs = { + .bios = ICL_PWR_WELL_CTL_DDI1, + .driver = ICL_PWR_WELL_CTL_DDI2, + .debug = ICL_PWR_WELL_CTL_DDI4, +}; + +const struct i915_power_well_ops icl_ddi_power_well_ops = { + .regs = &icl_ddi_power_well_regs, + .sync_hw = hsw_power_well_sync_hw, + .enable = hsw_power_well_enable, + .disable = hsw_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + +const struct i915_power_well_ops tgl_tc_cold_off_ops = { + .sync_hw = tgl_tc_cold_off_power_well_sync_hw, + .enable = tgl_tc_cold_off_power_well_enable, + .disable = tgl_tc_cold_off_power_well_disable, + .is_enabled = tgl_tc_cold_off_power_well_is_enabled, +}; diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index 9a3756fdcf7f..de3ee1bfb06d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -36,41 +36,6 @@ enum i915_power_well_id { TGL_DISP_PW_TC_COLD_OFF, }; -struct i915_power_well_regs { - i915_reg_t bios; - i915_reg_t driver; - i915_reg_t kvmr; - i915_reg_t debug; -}; - -struct i915_power_well_ops { - const struct i915_power_well_regs *regs; - /* - * Synchronize the well's hw state to match the current sw state, for - * example enable/disable it based on the current refcount. Called - * during driver init and resume time, possibly after first calling - * the enable/disable handlers. - */ - void (*sync_hw)(struct drm_i915_private *i915, - struct i915_power_well *power_well); - /* - * Enable the well and resources that depend on it (for example - * interrupts located on the well). Called after the 0->1 refcount - * transition. - */ - void (*enable)(struct drm_i915_private *i915, - struct i915_power_well *power_well); - /* - * Disable the well and resources that depend on it. Called after - * the 1->0 refcount transition. - */ - void (*disable)(struct drm_i915_private *i915, - struct i915_power_well *power_well); - /* Returns the hw enabled state. */ - bool (*is_enabled)(struct drm_i915_private *i915, - struct i915_power_well *power_well); -}; - struct i915_power_well_desc { const char *name; bool always_on; @@ -150,4 +115,31 @@ const char *intel_power_well_name(struct i915_power_well *power_well); u64 intel_power_well_domains(struct i915_power_well *power_well); int intel_power_well_refcount(struct i915_power_well *power_well); +void chv_phy_powergate_lanes(struct intel_encoder *encoder, + bool override, unsigned int mask); +bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, + enum dpio_channel ch, bool override); + +void gen9_enable_dc5(struct drm_i915_private *dev_priv); +void skl_enable_dc6(struct drm_i915_private *dev_priv); +void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); +void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state); +void gen9_disable_dc_states(struct drm_i915_private *dev_priv); +void bxt_enable_dc9(struct drm_i915_private *dev_priv); +void bxt_disable_dc9(struct drm_i915_private *dev_priv); + +extern const struct i915_power_well_ops i9xx_always_on_power_well_ops; +extern const struct i915_power_well_ops chv_pipe_power_well_ops; +extern const struct i915_power_well_ops chv_dpio_cmn_power_well_ops; +extern const struct i915_power_well_ops i830_pipes_power_well_ops; +extern const struct i915_power_well_ops hsw_power_well_ops; +extern const struct i915_power_well_ops gen9_dc_off_power_well_ops; +extern const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops; +extern const struct i915_power_well_ops vlv_display_power_well_ops; +extern const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops; +extern const struct i915_power_well_ops vlv_dpio_power_well_ops; +extern const struct i915_power_well_ops icl_aux_power_well_ops; +extern const struct i915_power_well_ops icl_ddi_power_well_ops; +extern const struct i915_power_well_ops tgl_tc_cold_off_ops; + #endif diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 44edeb2e55c0..cc6abe761f5e 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -24,6 +24,7 @@ #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" +#include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dpio_phy.h" diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 64bd4ca0edd4..5a598dd06039 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -6,6 +6,7 @@ #include "g4x_dp.h" #include "i915_drv.h" #include "intel_de.h" +#include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dpll.h" From ac78f31b2f83637e24a044f62a60df7de1c0d7c9 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:41 +0300 Subject: [PATCH 116/219] drm/i915: Unexport the for_each_power_well() macros MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The for_each_power_well() macros are only used in intel_display_power.c and intel_display_power_well.c, so unexport them. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-2-imre.deak@intel.com --- .../drm/i915/display/intel_display_power.c | 8 ++++++++ .../drm/i915/display/intel_display_power.h | 20 ------------------- .../i915/display/intel_display_power_well.h | 12 +++++++++++ 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index b3e8ede90039..f3c14e721abc 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -21,6 +21,14 @@ #include "intel_snps_phy.h" #include "vlv_sideband.h" +#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \ + for_each_power_well(__dev_priv, __power_well) \ + for_each_if((__power_well)->desc->domains & (__domain_mask)) + +#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \ + for_each_power_well_reverse(__dev_priv, __power_well) \ + for_each_if((__power_well)->desc->domains & (__domain_mask)) + const char * intel_display_power_domain_str(enum intel_display_power_domain domain) { diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 95b939149910..e80317e7868b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -172,26 +172,6 @@ struct intel_display_power_domain_set { for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ for_each_if(BIT_ULL(domain) & (mask)) -#define for_each_power_well(__dev_priv, __power_well) \ - for ((__power_well) = (__dev_priv)->power_domains.power_wells; \ - (__power_well) - (__dev_priv)->power_domains.power_wells < \ - (__dev_priv)->power_domains.power_well_count; \ - (__power_well)++) - -#define for_each_power_well_reverse(__dev_priv, __power_well) \ - for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ - (__dev_priv)->power_domains.power_well_count - 1; \ - (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ - (__power_well)--) - -#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \ - for_each_power_well(__dev_priv, __power_well) \ - for_each_if((__power_well)->desc->domains & (__domain_mask)) - -#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \ - for_each_power_well_reverse(__dev_priv, __power_well) \ - for_each_if((__power_well)->desc->domains & (__domain_mask)) - int intel_power_domains_init(struct drm_i915_private *dev_priv); void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index de3ee1bfb06d..c4a8a3d728e0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -12,6 +12,18 @@ struct drm_i915_private; struct i915_power_well; +#define for_each_power_well(__dev_priv, __power_well) \ + for ((__power_well) = (__dev_priv)->power_domains.power_wells; \ + (__power_well) - (__dev_priv)->power_domains.power_wells < \ + (__dev_priv)->power_domains.power_well_count; \ + (__power_well)++) + +#define for_each_power_well_reverse(__dev_priv, __power_well) \ + for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ + (__dev_priv)->power_domains.power_well_count - 1; \ + (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ + (__power_well)--) + /* * i915_power_well_id: * From 323286c81245b3ee31f495367e51d9ee8431bf13 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:42 +0300 Subject: [PATCH 117/219] drm/i915: Move the power domain->well mappings to intel_display_power_map.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the list of platform specific power domain -> power well definitions to intel_display_power_map.c. While at it group the platforms' power domain macros with the corresponding power well lists and keep all the power domain lists in the same order (matching the enum order). No functional changes. v2: - s/intel_display_power_internal.h/intel_display_power_map.h/ (Jani) - Simplify intel_cleanup_power_wells(). - Don't move intel_display_power_domain_str(). v3: - Rename intel_init/cleanup_power_wells() to intel_display_power_map_init/cleanup(). - Add documentation to intel_display_power_map_init/cleanup(). Cc: Jani Nikula Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-3-imre.deak@intel.com --- drivers/gpu/drm/i915/Makefile | 1 + .../drm/i915/display/intel_display_power.c | 2260 +---------------- .../i915/display/intel_display_power_map.c | 2150 ++++++++++++++++ .../i915/display/intel_display_power_map.h | 14 + 4 files changed, 2168 insertions(+), 2257 deletions(-) create mode 100644 drivers/gpu/drm/i915/display/intel_display_power_map.c create mode 100644 drivers/gpu/drm/i915/display/intel_display_power_map.h diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 815589f8658c..ed0b3e55a2e4 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -213,6 +213,7 @@ i915-y += \ display/intel_cursor.o \ display/intel_display.o \ display/intel_display_power.o \ + display/intel_display_power_map.o \ display/intel_display_power_well.o \ display/intel_dmc.o \ display/intel_dpio_phy.o \ diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index f3c14e721abc..d2f2fb24ca42 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -11,6 +11,7 @@ #include "intel_combo_phy.h" #include "intel_de.h" #include "intel_display_power.h" +#include "intel_display_power_map.h" #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dmc.h" @@ -848,2169 +849,6 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, } } -#define I830_PIPES_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define CHV_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define HSW_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define BDW_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) -#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) -#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) -#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -/* - * ICL PW_0/PG_0 domains (HW/DMC control): - * - PCI - * - clocks except port PLL - * - central power except FBC - * - shared functions except pipe interrupts, pipe MBUS, DBUF registers - * ICL PW_1/PG_1 domains (HW/DMC control): - * - DBUF function - * - PIPE_A and its planes, except VGA - * - transcoder EDP + PSR - * - transcoder DSI - * - DDI_A - * - FBC - */ -#define ICL_PW_4_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - /* VDSC/joining */ -#define ICL_PW_3_POWER_DOMAINS ( \ - ICL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_AUX_E) | \ - BIT_ULL(POWER_DOMAIN_AUX_F) | \ - BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - /* - * - transcoder WD - * - KVMR (HW control) - */ -#define ICL_PW_2_POWER_DOMAINS ( \ - ICL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - /* - * - KVMR (HW control) - */ -#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - ICL_PW_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_DC_OFF) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define ICL_DDI_IO_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) -#define ICL_DDI_IO_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) -#define ICL_DDI_IO_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) -#define ICL_DDI_IO_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) -#define ICL_DDI_IO_E_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) -#define ICL_DDI_IO_F_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) - -#define ICL_AUX_A_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_A)) -#define ICL_AUX_B_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_B)) -#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_C)) -#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_D)) -#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_E)) -#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_F)) -#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) -#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) -#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) -#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) - -#define TGL_PW_5_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_D) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ - BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define TGL_PW_4_POWER_DOMAINS ( \ - TGL_PW_5_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define TGL_PW_3_POWER_DOMAINS ( \ - TGL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define TGL_PW_2_POWER_DOMAINS ( \ - TGL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - TGL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) -#define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) -#define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) -#define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) -#define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5) -#define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6) - -#define TGL_AUX_A_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_A)) -#define TGL_AUX_B_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_B)) -#define TGL_AUX_C_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_C)) - -#define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) -#define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) -#define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) -#define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) -#define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5) -#define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6) - -#define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) -#define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) -#define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) -#define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) -#define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5) -#define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6) - -#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ - BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) - -#define RKL_PW_4_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define RKL_PW_3_POWER_DOMAINS ( \ - RKL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -/* - * There is no PW_2/PG_2 on RKL. - * - * RKL PW_1/PG_1 domains (under HW/DMC control): - * - DBUF function (note: registers are in PW0) - * - PIPE_A and its planes and VDSC/joining, except VGA - * - transcoder A - * - DDI_A and DDI_B - * - FBC - * - * RKL PW_0/PG_0 domains (under HW/DMC control): - * - PCI - * - clocks except port PLL - * - shared functions: - * * interrupts except pipe interrupts - * * MBus except PIPE_MBUS_DBOX_CTL - * * DBUF registers - * - central power except FBC - * - top-level GTC (DDI-level GTC is in the well associated with the DDI) - */ - -#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - RKL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -/* - * DG1 onwards Audio MMIO/VERBS lies in PG0 power well. - */ -#define DG1_PW_3_POWER_DOMAINS ( \ - TGL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define DG1_PW_2_POWER_DOMAINS ( \ - DG1_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define DG1_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - DG1_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -/* - * XE_LPD Power Domains - * - * Previous platforms required that PG(n-1) be enabled before PG(n). That - * dependency chain turns into a dependency tree on XE_LPD: - * - * PG0 - * | - * --PG1-- - * / \ - * PGA --PG2-- - * / | \ - * PGB PGC PGD - * - * Power wells must be enabled from top to bottom and disabled from bottom - * to top. This allows pipes to be power gated independently. - */ - -#define XELPD_PW_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_D) | \ - BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define XELPD_PW_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define XELPD_PW_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define XELPD_PW_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define XELPD_PW_2_POWER_DOMAINS ( \ - XELPD_PW_B_POWER_DOMAINS | \ - XELPD_PW_C_POWER_DOMAINS | \ - XELPD_PW_D_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \ - BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -/* - * XELPD PW_1/PG_1 domains (under HW/DMC control): - * - DBUF function (registers are in PW0) - * - Transcoder A - * - DDI_A and DDI_B - * - * XELPD PW_0/PW_1 domains (under HW/DMC control): - * - PCI - * - Clocks except port PLL - * - Shared functions: - * * interrupts except pipe interrupts - * * MBus except PIPE_MBUS_DBOX_CTL - * * DBUF registers - * - Central power except FBC - * - Top-level GTC (DDI-level GTC is in the well associated with the DDI) - */ - -#define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - XELPD_PW_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) -#define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) -#define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) -#define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) -#define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) -#define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) - -#define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) -#define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) -#define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) -#define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) - -#define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD) -#define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD) -#define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) -#define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) -#define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) -#define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) - -static const struct i915_power_well_desc i9xx_always_on_power_well[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, -}; - -static const struct i915_power_well_desc i830_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "pipes", - .domains = I830_PIPES_POWER_DOMAINS, - .ops = &i830_pipes_power_well_ops, - .id = DISP_PW_ID_NONE, - }, -}; - -static const struct i915_power_well_desc hsw_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "display", - .domains = HSW_DISPLAY_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = HSW_DISP_PW_GLOBAL, - { - .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, - .hsw.has_vga = true, - }, - }, -}; - -static const struct i915_power_well_desc bdw_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "display", - .domains = BDW_DISPLAY_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = HSW_DISP_PW_GLOBAL, - { - .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - }, - }, -}; - -static const struct i915_power_well_desc vlv_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "display", - .domains = VLV_DISPLAY_POWER_DOMAINS, - .ops = &vlv_display_power_well_ops, - .id = VLV_DISP_PW_DISP2D, - { - .vlv.idx = PUNIT_PWGT_IDX_DISP2D, - }, - }, - { - .name = "dpio-tx-b-01", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, - }, - }, - { - .name = "dpio-tx-b-23", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, - }, - }, - { - .name = "dpio-tx-c-01", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, - }, - }, - { - .name = "dpio-tx-c-23", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, - }, - }, - { - .name = "dpio-common", - .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, - .ops = &vlv_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, - }, - }, -}; - -static const struct i915_power_well_desc chv_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "display", - /* - * Pipe A power well is the new disp2d well. Pipe B and C - * power wells don't actually exist. Pipe A power well is - * required for any pipe to work. - */ - .domains = CHV_DISPLAY_POWER_DOMAINS, - .ops = &chv_pipe_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "dpio-common-bc", - .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, - .ops = &chv_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, - }, - }, - { - .name = "dpio-common-d", - .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, - .ops = &chv_dpio_cmn_power_well_ops, - .id = CHV_DISP_PW_DPIO_CMN_D, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, - }, - }, -}; - -static const struct i915_power_well_desc skl_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "MISC IO power well", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_MISC_IO, - { - .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, - }, - }, - { - .name = "DC off", - .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A/E IO power well", - .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, - }, - }, - { - .name = "DDI B IO power well", - .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_B, - }, - }, - { - .name = "DDI C IO power well", - .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_C, - }, - }, - { - .name = "DDI D IO power well", - .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_D, - }, - }, -}; - -static const struct i915_power_well_desc bxt_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "dpio-common-a", - .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DISP_PW_DPIO_CMN_A, - { - .bxt.phy = DPIO_PHY1, - }, - }, - { - .name = "dpio-common-bc", - .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .bxt.phy = DPIO_PHY0, - }, - }, -}; - -static const struct i915_power_well_desc glk_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "dpio-common-a", - .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DISP_PW_DPIO_CMN_A, - { - .bxt.phy = DPIO_PHY1, - }, - }, - { - .name = "dpio-common-b", - .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .bxt.phy = DPIO_PHY0, - }, - }, - { - .name = "dpio-common-c", - .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = GLK_DISP_PW_DPIO_CMN_C, - { - .bxt.phy = DPIO_PHY2, - }, - }, - { - .name = "AUX A", - .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = GLK_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = GLK_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX C", - .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = GLK_PW_CTL_IDX_AUX_C, - }, - }, - { - .name = "DDI A IO power well", - .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = GLK_PW_CTL_IDX_DDI_A, - }, - }, - { - .name = "DDI B IO power well", - .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_B, - }, - }, - { - .name = "DDI C IO power well", - .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_C, - }, - }, -}; - -static const struct i915_power_well_desc icl_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = ICL_PW_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_fuses = true, - }, - }, - { - .name = "power well 3", - .domains = ICL_PW_3_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_3, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A IO", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - }, - }, - { - .name = "DDI B IO", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - }, - }, - { - .name = "DDI C IO", - .domains = ICL_DDI_IO_C_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_C, - }, - }, - { - .name = "DDI D IO", - .domains = ICL_DDI_IO_D_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_D, - }, - }, - { - .name = "DDI E IO", - .domains = ICL_DDI_IO_E_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_E, - }, - }, - { - .name = "DDI F IO", - .domains = ICL_DDI_IO_F_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_F, - }, - }, - { - .name = "AUX A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX C TC1", - .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX D TC2", - .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_D, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX E TC3", - .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_E, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX F TC4", - .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_F, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX C TBT1", - .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX D TBT2", - .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX E TBT3", - .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX F TBT4", - .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "power well 4", - .domains = ICL_PW_4_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), - }, - }, -}; - -static const struct i915_power_well_desc tgl_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = TGL_PW_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_fuses = true, - }, - }, - { - .name = "power well 3", - .domains = TGL_PW_3_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_3, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A IO", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - } - }, - { - .name = "DDI B IO", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - } - }, - { - .name = "DDI C IO", - .domains = ICL_DDI_IO_C_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_C, - } - }, - { - .name = "DDI IO TC1", - .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, - }, - }, - { - .name = "DDI IO TC2", - .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, - }, - }, - { - .name = "DDI IO TC3", - .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, - }, - }, - { - .name = "DDI IO TC4", - .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, - }, - }, - { - .name = "DDI IO TC5", - .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, - }, - }, - { - .name = "DDI IO TC6", - .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, - }, - }, - { - .name = "TC cold off", - .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, - .ops = &tgl_tc_cold_off_ops, - .id = TGL_DISP_PW_TC_COLD_OFF, - }, - { - .name = "AUX A", - .domains = TGL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = TGL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX C", - .domains = TGL_AUX_C_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - }, - }, - { - .name = "AUX USBC1", - .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX USBC2", - .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX USBC3", - .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX USBC4", - .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX USBC5", - .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX USBC6", - .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX TBT1", - .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT2", - .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT3", - .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT4", - .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT5", - .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT6", - .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "power well 4", - .domains = TGL_PW_4_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), - } - }, - { - .name = "power well 5", - .domains = TGL_PW_5_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_PW_5, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_D), - }, - }, -}; - -static const struct i915_power_well_desc rkl_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 3", - .domains = RKL_PW_3_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_3, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "power well 4", - .domains = RKL_PW_4_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), - } - }, - { - .name = "DDI A IO", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - } - }, - { - .name = "DDI B IO", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - } - }, - { - .name = "DDI IO TC1", - .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, - }, - }, - { - .name = "DDI IO TC2", - .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, - }, - }, - { - .name = "AUX A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX USBC1", - .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - }, - }, - { - .name = "AUX USBC2", - .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - }, - }, -}; - -static const struct i915_power_well_desc dg1_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = DG1_PW_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_fuses = true, - }, - }, - { - .name = "power well 3", - .domains = DG1_PW_3_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_3, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A IO", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - } - }, - { - .name = "DDI B IO", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - } - }, - { - .name = "DDI IO TC1", - .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, - }, - }, - { - .name = "DDI IO TC2", - .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, - }, - }, - { - .name = "AUX A", - .domains = TGL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = TGL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX USBC1", - .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX USBC2", - .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "power well 4", - .domains = TGL_PW_4_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), - } - }, - { - .name = "power well 5", - .domains = TGL_PW_5_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_PW_5, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_D), - }, - }, -}; - -static const struct i915_power_well_desc xelpd_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = XELPD_PW_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "power well A", - .domains = XELPD_PW_A_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_PW_A, - .hsw.irq_pipe_mask = BIT(PIPE_A), - .hsw.has_fuses = true, - }, - }, - { - .name = "power well B", - .domains = XELPD_PW_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_PW_B, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_fuses = true, - }, - }, - { - .name = "power well C", - .domains = XELPD_PW_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_PW_C, - .hsw.irq_pipe_mask = BIT(PIPE_C), - .hsw.has_fuses = true, - }, - }, - { - .name = "power well D", - .domains = XELPD_PW_D_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_PW_D, - .hsw.irq_pipe_mask = BIT(PIPE_D), - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A IO", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - } - }, - { - .name = "DDI B IO", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - } - }, - { - .name = "DDI C IO", - .domains = ICL_DDI_IO_C_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_C, - } - }, - { - .name = "DDI IO D_XELPD", - .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_DDI_D, - } - }, - { - .name = "DDI IO E_XELPD", - .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_DDI_E, - } - }, - { - .name = "DDI IO TC1", - .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, - } - }, - { - .name = "DDI IO TC2", - .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, - } - }, - { - .name = "DDI IO TC3", - .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, - } - }, - { - .name = "DDI IO TC4", - .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, - } - }, - { - .name = "AUX A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - .hsw.fixed_enable_delay = 600, - }, - }, - { - .name = "AUX B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - .hsw.fixed_enable_delay = 600, - }, - }, - { - .name = "AUX C", - .domains = TGL_AUX_C_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - .hsw.fixed_enable_delay = 600, - }, - }, - { - .name = "AUX D_XELPD", - .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_AUX_D, - .hsw.fixed_enable_delay = 600, - }, - }, - { - .name = "AUX E_XELPD", - .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_AUX_E, - }, - }, - { - .name = "AUX USBC1", - .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - .hsw.fixed_enable_delay = 600, - }, - }, - { - .name = "AUX USBC2", - .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - }, - }, - { - .name = "AUX USBC3", - .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, - }, - }, - { - .name = "AUX USBC4", - .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, - }, - }, - { - .name = "AUX TBT1", - .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT2", - .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT3", - .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT4", - .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, - .hsw.is_tc_tbt = true, - }, - }, -}; - static int sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, int disable_power_well) @@ -3089,57 +927,6 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, return mask; } -static int -__set_power_wells(struct i915_power_domains *power_domains, - const struct i915_power_well_desc *power_well_descs, - int power_well_descs_sz, u64 skip_mask) -{ - struct drm_i915_private *i915 = container_of(power_domains, - struct drm_i915_private, - power_domains); - u64 power_well_ids = 0; - int power_well_count = 0; - int i, plt_idx = 0; - - for (i = 0; i < power_well_descs_sz; i++) - if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) - power_well_count++; - - power_domains->power_well_count = power_well_count; - power_domains->power_wells = - kcalloc(power_well_count, - sizeof(*power_domains->power_wells), - GFP_KERNEL); - if (!power_domains->power_wells) - return -ENOMEM; - - for (i = 0; i < power_well_descs_sz; i++) { - enum i915_power_well_id id = power_well_descs[i].id; - - if (BIT_ULL(id) & skip_mask) - continue; - - power_domains->power_wells[plt_idx++].desc = - &power_well_descs[i]; - - if (id == DISP_PW_ID_NONE) - continue; - - drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); - drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); - power_well_ids |= BIT_ULL(id); - } - - return 0; -} - -#define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ - __set_power_wells(power_domains, __power_well_descs, \ - ARRAY_SIZE(__power_well_descs), skip_mask) - -#define set_power_wells(power_domains, __power_well_descs) \ - set_power_wells_mask(power_domains, __power_well_descs, 0) - /** * intel_power_domains_init - initializes the power domain structures * @dev_priv: i915 device instance @@ -3150,7 +937,6 @@ __set_power_wells(struct i915_power_domains *power_domains, int intel_power_domains_init(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; - int err; dev_priv->params.disable_power_well = sanitize_disable_power_well_option(dev_priv, @@ -3168,47 +954,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) INIT_DELAYED_WORK(&power_domains->async_put_work, intel_display_power_put_async_work); - /* - * The enabling order will be from lower to higher indexed wells, - * the disabling order is reversed. - */ - if (!HAS_DISPLAY(dev_priv)) { - power_domains->power_well_count = 0; - err = 0; - } else if (DISPLAY_VER(dev_priv) >= 13) { - err = set_power_wells(power_domains, xelpd_power_wells); - } else if (IS_DG1(dev_priv)) { - err = set_power_wells(power_domains, dg1_power_wells); - } else if (IS_ALDERLAKE_S(dev_priv)) { - err = set_power_wells_mask(power_domains, tgl_power_wells, - BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); - } else if (IS_ROCKETLAKE(dev_priv)) { - err = set_power_wells(power_domains, rkl_power_wells); - } else if (DISPLAY_VER(dev_priv) == 12) { - err = set_power_wells(power_domains, tgl_power_wells); - } else if (DISPLAY_VER(dev_priv) == 11) { - err = set_power_wells(power_domains, icl_power_wells); - } else if (IS_GEMINILAKE(dev_priv)) { - err = set_power_wells(power_domains, glk_power_wells); - } else if (IS_BROXTON(dev_priv)) { - err = set_power_wells(power_domains, bxt_power_wells); - } else if (DISPLAY_VER(dev_priv) == 9) { - err = set_power_wells(power_domains, skl_power_wells); - } else if (IS_CHERRYVIEW(dev_priv)) { - err = set_power_wells(power_domains, chv_power_wells); - } else if (IS_BROADWELL(dev_priv)) { - err = set_power_wells(power_domains, bdw_power_wells); - } else if (IS_HASWELL(dev_priv)) { - err = set_power_wells(power_domains, hsw_power_wells); - } else if (IS_VALLEYVIEW(dev_priv)) { - err = set_power_wells(power_domains, vlv_power_wells); - } else if (IS_I830(dev_priv)) { - err = set_power_wells(power_domains, i830_power_wells); - } else { - err = set_power_wells(power_domains, i9xx_always_on_power_well); - } - - return err; + return intel_display_power_map_init(power_domains); } /** @@ -3219,7 +965,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) */ void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) { - kfree(dev_priv->power_domains.power_wells); + intel_display_power_map_cleanup(&dev_priv->power_domains); } static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c new file mode 100644 index 000000000000..97e0daec9544 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -0,0 +1,2150 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_reg.h" + +#include "vlv_sideband_reg.h" + +#include "intel_display_power_map.h" +#include "intel_display_power_well.h" + +#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) + +static const struct i915_power_well_desc i9xx_always_on_power_well[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, +}; + +#define I830_PIPES_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +static const struct i915_power_well_desc i830_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "pipes", + .domains = I830_PIPES_POWER_DOMAINS, + .ops = &i830_pipes_power_well_ops, + .id = DISP_PW_ID_NONE, + }, +}; + +#define HSW_DISPLAY_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +static const struct i915_power_well_desc hsw_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "display", + .domains = HSW_DISPLAY_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = HSW_DISP_PW_GLOBAL, + { + .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, + .hsw.has_vga = true, + }, + }, +}; + +#define BDW_DISPLAY_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +static const struct i915_power_well_desc bdw_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "display", + .domains = BDW_DISPLAY_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = HSW_DISP_PW_GLOBAL, + { + .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, + .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .hsw.has_vga = true, + }, + }, +}; + +#define VLV_DISPLAY_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ + BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +static const struct i915_power_well_desc vlv_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "display", + .domains = VLV_DISPLAY_POWER_DOMAINS, + .ops = &vlv_display_power_well_ops, + .id = VLV_DISP_PW_DISP2D, + { + .vlv.idx = PUNIT_PWGT_IDX_DISP2D, + }, + }, { + .name = "dpio-tx-b-01", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, + }, + }, { + .name = "dpio-tx-b-23", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, + }, + }, { + .name = "dpio-tx-c-01", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, + }, + }, { + .name = "dpio-tx-c-23", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, + }, + }, { + .name = "dpio-common", + .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, + .ops = &vlv_dpio_cmn_power_well_ops, + .id = VLV_DISP_PW_DPIO_CMN_BC, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, + }, + }, +}; + +#define CHV_DISPLAY_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +static const struct i915_power_well_desc chv_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "display", + /* + * Pipe A power well is the new disp2d well. Pipe B and C + * power wells don't actually exist. Pipe A power well is + * required for any pipe to work. + */ + .domains = CHV_DISPLAY_POWER_DOMAINS, + .ops = &chv_pipe_power_well_ops, + .id = DISP_PW_ID_NONE, + }, { + .name = "dpio-common-bc", + .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, + .ops = &chv_dpio_cmn_power_well_ops, + .id = VLV_DISP_PW_DPIO_CMN_BC, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, + }, + }, { + .name = "dpio-common-d", + .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, + .ops = &chv_dpio_cmn_power_well_ops, + .id = CHV_DISP_PW_DPIO_CMN_D, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, + }, + }, +}; + +#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +static const struct i915_power_well_desc skl_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "power well 1", + /* Handled by the DMC firmware */ + .domains = 0, + .ops = &hsw_power_well_ops, + .always_on = true, + .id = SKL_DISP_PW_1, + { + .hsw.idx = SKL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, { + .name = "MISC IO power well", + /* Handled by the DMC firmware */ + .domains = 0, + .ops = &hsw_power_well_ops, + .always_on = true, + .id = SKL_DISP_PW_MISC_IO, + { + .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, + }, + }, { + .name = "DC off", + .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_DC_OFF, + }, { + .name = "power well 2", + .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, { + .name = "DDI A/E IO power well", + .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, + }, + }, { + .name = "DDI B IO power well", + .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = SKL_PW_CTL_IDX_DDI_B, + }, + }, { + .name = "DDI C IO power well", + .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = SKL_PW_CTL_IDX_DDI_C, + }, + }, { + .name = "DDI D IO power well", + .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = SKL_PW_CTL_IDX_DDI_D, + }, + }, +}; + +#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +static const struct i915_power_well_desc bxt_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "power well 1", + /* Handled by the DMC firmware */ + .domains = 0, + .ops = &hsw_power_well_ops, + .always_on = true, + .id = SKL_DISP_PW_1, + { + .hsw.idx = SKL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, { + .name = "DC off", + .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_DC_OFF, + }, { + .name = "power well 2", + .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, { + .name = "dpio-common-a", + .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = BXT_DISP_PW_DPIO_CMN_A, + { + .bxt.phy = DPIO_PHY1, + }, + }, { + .name = "dpio-common-bc", + .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = VLV_DISP_PW_DPIO_CMN_BC, + { + .bxt.phy = DPIO_PHY0, + }, + }, +}; + +#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) +#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) +#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) + +#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +static const struct i915_power_well_desc glk_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "power well 1", + /* Handled by the DMC firmware */ + .domains = 0, + .ops = &hsw_power_well_ops, + .always_on = true, + .id = SKL_DISP_PW_1, + { + .hsw.idx = SKL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, { + .name = "DC off", + .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_DC_OFF, + }, { + .name = "power well 2", + .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, { + .name = "dpio-common-a", + .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = BXT_DISP_PW_DPIO_CMN_A, + { + .bxt.phy = DPIO_PHY1, + }, + }, { + .name = "dpio-common-b", + .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = VLV_DISP_PW_DPIO_CMN_BC, + { + .bxt.phy = DPIO_PHY0, + }, + }, { + .name = "dpio-common-c", + .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = GLK_DISP_PW_DPIO_CMN_C, + { + .bxt.phy = DPIO_PHY2, + }, + }, { + .name = "AUX A", + .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = GLK_PW_CTL_IDX_AUX_A, + }, + }, { + .name = "AUX B", + .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = GLK_PW_CTL_IDX_AUX_B, + }, + }, { + .name = "AUX C", + .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = GLK_PW_CTL_IDX_AUX_C, + }, + }, { + .name = "DDI A IO power well", + .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = GLK_PW_CTL_IDX_DDI_A, + }, + }, { + .name = "DDI B IO power well", + .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = SKL_PW_CTL_IDX_DDI_B, + }, + }, { + .name = "DDI C IO power well", + .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = SKL_PW_CTL_IDX_DDI_C, + }, + }, +}; + +/* + * ICL PW_0/PG_0 domains (HW/DMC control): + * - PCI + * - clocks except port PLL + * - central power except FBC + * - shared functions except pipe interrupts, pipe MBUS, DBUF registers + * ICL PW_1/PG_1 domains (HW/DMC control): + * - DBUF function + * - PIPE_A and its planes, except VGA + * - transcoder EDP + PSR + * - transcoder DSI + * - DDI_A + * - FBC + */ +#define ICL_PW_4_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + /* VDSC/joining */ + +#define ICL_PW_3_POWER_DOMAINS ( \ + ICL_PW_4_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_AUX_E) | \ + BIT_ULL(POWER_DOMAIN_AUX_F) | \ + BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + /* + * - transcoder WD + * - KVMR (HW control) + */ + +#define ICL_PW_2_POWER_DOMAINS ( \ + ICL_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + /* + * - KVMR (HW control) + */ + +#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + ICL_PW_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_DC_OFF) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define ICL_DDI_IO_A_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) +#define ICL_DDI_IO_B_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) +#define ICL_DDI_IO_C_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) +#define ICL_DDI_IO_D_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) +#define ICL_DDI_IO_E_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) +#define ICL_DDI_IO_F_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) + +#define ICL_AUX_A_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_IO_A)) + +#define ICL_AUX_B_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_B) +#define ICL_AUX_C_TC1_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_C) +#define ICL_AUX_D_TC2_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D) +#define ICL_AUX_E_TC3_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E) +#define ICL_AUX_F_TC4_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_F) +#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_C_TBT) +#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_TBT) +#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_TBT) +#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_F_TBT) + +static const struct i915_power_well_desc icl_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "power well 1", + /* Handled by the DMC firmware */ + .domains = 0, + .ops = &hsw_power_well_ops, + .always_on = true, + .id = SKL_DISP_PW_1, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, { + .name = "DC off", + .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_DC_OFF, + }, { + .name = "power well 2", + .domains = ICL_PW_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .hsw.has_fuses = true, + }, + }, { + .name = "power well 3", + .domains = ICL_PW_3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_3, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + }, + }, { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + }, + }, { + .name = "DDI C IO", + .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_C, + }, + }, { + .name = "DDI D IO", + .domains = ICL_DDI_IO_D_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_D, + }, + }, { + .name = "DDI E IO", + .domains = ICL_DDI_IO_E_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_E, + }, + }, { + .name = "DDI F IO", + .domains = ICL_DDI_IO_F_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_F, + }, + }, { + .name = "AUX A", + .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + }, + }, { + .name = "AUX B", + .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + }, + }, { + .name = "AUX C TC1", + .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_C, + .hsw.is_tc_tbt = false, + }, + }, { + .name = "AUX D TC2", + .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_D, + .hsw.is_tc_tbt = false, + }, + }, { + .name = "AUX E TC3", + .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_E, + .hsw.is_tc_tbt = false, + }, + }, { + .name = "AUX F TC4", + .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_F, + .hsw.is_tc_tbt = false, + }, + }, { + .name = "AUX C TBT1", + .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "AUX D TBT2", + .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "AUX E TBT3", + .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "AUX F TBT4", + .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "power well 4", + .domains = ICL_PW_4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + }, + }, +}; + +#define TGL_PW_5_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_D) | \ + BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_PW_4_POWER_DOMAINS ( \ + TGL_PW_5_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_PW_3_POWER_DOMAINS ( \ + TGL_PW_4_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_PW_2_POWER_DOMAINS ( \ + TGL_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + TGL_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) +#define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) +#define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) +#define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) +#define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5) +#define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6) + +#define TGL_AUX_A_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_IO_A)) +#define TGL_AUX_B_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_B) +#define TGL_AUX_C_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_C) + +#define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) +#define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) +#define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) +#define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) +#define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5) +#define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6) + +#define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) +#define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) +#define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) +#define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) +#define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5) +#define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6) + +#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ + BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) + +static const struct i915_power_well_desc tgl_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "power well 1", + /* Handled by the DMC firmware */ + .domains = 0, + .ops = &hsw_power_well_ops, + .always_on = true, + .id = SKL_DISP_PW_1, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, { + .name = "DC off", + .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_DC_OFF, + }, { + .name = "power well 2", + .domains = TGL_PW_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .hsw.has_fuses = true, + }, + }, { + .name = "power well 3", + .domains = TGL_PW_3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_3, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + } + }, { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + } + }, { + .name = "DDI C IO", + .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_C, + } + }, { + .name = "DDI IO TC1", + .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, + }, + }, { + .name = "DDI IO TC2", + .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, + }, + }, { + .name = "DDI IO TC3", + .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, + }, + }, { + .name = "DDI IO TC4", + .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, + }, + }, { + .name = "DDI IO TC5", + .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, + }, + }, { + .name = "DDI IO TC6", + .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, + }, + }, { + .name = "TC cold off", + .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, + .ops = &tgl_tc_cold_off_ops, + .id = TGL_DISP_PW_TC_COLD_OFF, + }, { + .name = "AUX A", + .domains = TGL_AUX_A_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + }, + }, { + .name = "AUX B", + .domains = TGL_AUX_B_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + }, + }, { + .name = "AUX C", + .domains = TGL_AUX_C_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_C, + }, + }, { + .name = "AUX USBC1", + .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, + .hsw.is_tc_tbt = false, + }, + }, { + .name = "AUX USBC2", + .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, + .hsw.is_tc_tbt = false, + }, + }, { + .name = "AUX USBC3", + .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, + .hsw.is_tc_tbt = false, + }, + }, { + .name = "AUX USBC4", + .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, + .hsw.is_tc_tbt = false, + }, + }, { + .name = "AUX USBC5", + .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, + .hsw.is_tc_tbt = false, + }, + }, { + .name = "AUX USBC6", + .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, + .hsw.is_tc_tbt = false, + }, + }, { + .name = "AUX TBT1", + .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "AUX TBT2", + .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "AUX TBT3", + .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "AUX TBT4", + .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "AUX TBT5", + .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "AUX TBT6", + .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "power well 4", + .domains = TGL_PW_4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + } + }, { + .name = "power well 5", + .domains = TGL_PW_5_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_PW_5, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_D), + }, + }, +}; + +#define RKL_PW_4_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define RKL_PW_3_POWER_DOMAINS ( \ + RKL_PW_4_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +/* + * There is no PW_2/PG_2 on RKL. + * + * RKL PW_1/PG_1 domains (under HW/DMC control): + * - DBUF function (note: registers are in PW0) + * - PIPE_A and its planes and VDSC/joining, except VGA + * - transcoder A + * - DDI_A and DDI_B + * - FBC + * + * RKL PW_0/PG_0 domains (under HW/DMC control): + * - PCI + * - clocks except port PLL + * - shared functions: + * * interrupts except pipe interrupts + * * MBus except PIPE_MBUS_DBOX_CTL + * * DBUF registers + * - central power except FBC + * - top-level GTC (DDI-level GTC is in the well associated with the DDI) + */ + +#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + RKL_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +static const struct i915_power_well_desc rkl_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "power well 1", + /* Handled by the DMC firmware */ + .domains = 0, + .ops = &hsw_power_well_ops, + .always_on = true, + .id = SKL_DISP_PW_1, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, { + .name = "DC off", + .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_DC_OFF, + }, { + .name = "power well 3", + .domains = RKL_PW_3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_3, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, { + .name = "power well 4", + .domains = RKL_PW_4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + } + }, { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + } + }, { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + } + }, { + .name = "DDI IO TC1", + .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, + }, + }, { + .name = "DDI IO TC2", + .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, + }, + }, { + .name = "AUX A", + .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + }, + }, { + .name = "AUX B", + .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + }, + }, { + .name = "AUX USBC1", + .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, + }, + }, { + .name = "AUX USBC2", + .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, + }, + }, +}; + +/* + * DG1 onwards Audio MMIO/VERBS lies in PG0 power well. + */ +#define DG1_PW_3_POWER_DOMAINS ( \ + TGL_PW_4_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define DG1_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + DG1_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define DG1_PW_2_POWER_DOMAINS ( \ + DG1_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +static const struct i915_power_well_desc dg1_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "power well 1", + /* Handled by the DMC firmware */ + .domains = 0, + .ops = &hsw_power_well_ops, + .always_on = true, + .id = SKL_DISP_PW_1, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, { + .name = "DC off", + .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_DC_OFF, + }, { + .name = "power well 2", + .domains = DG1_PW_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .hsw.has_fuses = true, + }, + }, { + .name = "power well 3", + .domains = DG1_PW_3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_3, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + } + }, { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + } + }, { + .name = "DDI IO TC1", + .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, + }, + }, { + .name = "DDI IO TC2", + .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, + }, + }, { + .name = "AUX A", + .domains = TGL_AUX_A_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + }, + }, { + .name = "AUX B", + .domains = TGL_AUX_B_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + }, + }, { + .name = "AUX USBC1", + .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, + .hsw.is_tc_tbt = false, + }, + }, { + .name = "AUX USBC2", + .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, + .hsw.is_tc_tbt = false, + }, + }, { + .name = "power well 4", + .domains = TGL_PW_4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + } + }, { + .name = "power well 5", + .domains = TGL_PW_5_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_PW_5, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_D), + }, + }, +}; + +/* + * XE_LPD Power Domains + * + * Previous platforms required that PG(n-1) be enabled before PG(n). That + * dependency chain turns into a dependency tree on XE_LPD: + * + * PG0 + * | + * --PG1-- + * / \ + * PGA --PG2-- + * / | \ + * PGB PGC PGD + * + * Power wells must be enabled from top to bottom and disabled from bottom + * to top. This allows pipes to be power gated independently. + */ + +#define XELPD_PW_D_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_D) | \ + BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define XELPD_PW_C_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define XELPD_PW_B_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define XELPD_PW_A_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define XELPD_PW_2_POWER_DOMAINS ( \ + XELPD_PW_B_POWER_DOMAINS | \ + XELPD_PW_C_POWER_DOMAINS | \ + XELPD_PW_D_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \ + BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +/* + * XELPD PW_1/PG_1 domains (under HW/DMC control): + * - DBUF function (registers are in PW0) + * - Transcoder A + * - DDI_A and DDI_B + * + * XELPD PW_0/PW_1 domains (under HW/DMC control): + * - PCI + * - Clocks except port PLL + * - Shared functions: + * * interrupts except pipe interrupts + * * MBus except PIPE_MBUS_DBOX_CTL + * * DBUF registers + * - Central power except FBC + * - Top-level GTC (DDI-level GTC is in the well associated with the DDI) + */ + +#define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + XELPD_PW_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ + BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) +#define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) +#define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) +#define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) +#define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) +#define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) + +#define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) +#define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) +#define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) +#define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) + +#define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD) +#define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD) +#define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) +#define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) +#define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) +#define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) + +static const struct i915_power_well_desc xelpd_power_wells[] = { + { + .name = "always-on", + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + .id = DISP_PW_ID_NONE, + }, { + .name = "power well 1", + /* Handled by the DMC firmware */ + .domains = 0, + .ops = &hsw_power_well_ops, + .always_on = true, + .id = SKL_DISP_PW_1, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, { + .name = "DC off", + .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_DC_OFF, + }, { + .name = "power well 2", + .domains = XELPD_PW_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, { + .name = "power well A", + .domains = XELPD_PW_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = XELPD_PW_CTL_IDX_PW_A, + .hsw.irq_pipe_mask = BIT(PIPE_A), + .hsw.has_fuses = true, + }, + }, { + .name = "power well B", + .domains = XELPD_PW_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = XELPD_PW_CTL_IDX_PW_B, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_fuses = true, + }, + }, { + .name = "power well C", + .domains = XELPD_PW_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = XELPD_PW_CTL_IDX_PW_C, + .hsw.irq_pipe_mask = BIT(PIPE_C), + .hsw.has_fuses = true, + }, + }, { + .name = "power well D", + .domains = XELPD_PW_D_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = XELPD_PW_CTL_IDX_PW_D, + .hsw.irq_pipe_mask = BIT(PIPE_D), + .hsw.has_fuses = true, + }, + }, { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + } + }, { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + } + }, { + .name = "DDI C IO", + .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_DDI_C, + } + }, { + .name = "DDI IO D_XELPD", + .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = XELPD_PW_CTL_IDX_DDI_D, + } + }, { + .name = "DDI IO E_XELPD", + .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = XELPD_PW_CTL_IDX_DDI_E, + } + }, { + .name = "DDI IO TC1", + .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, + } + }, { + .name = "DDI IO TC2", + .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, + } + }, { + .name = "DDI IO TC3", + .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, + } + }, { + .name = "DDI IO TC4", + .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS, + .ops = &icl_ddi_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, + } + }, { + .name = "AUX A", + .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + .hsw.fixed_enable_delay = 600, + }, + }, { + .name = "AUX B", + .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + .hsw.fixed_enable_delay = 600, + }, + }, { + .name = "AUX C", + .domains = TGL_AUX_C_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = ICL_PW_CTL_IDX_AUX_C, + .hsw.fixed_enable_delay = 600, + }, + }, { + .name = "AUX D_XELPD", + .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = XELPD_PW_CTL_IDX_AUX_D, + .hsw.fixed_enable_delay = 600, + }, + }, { + .name = "AUX E_XELPD", + .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = XELPD_PW_CTL_IDX_AUX_E, + }, + }, { + .name = "AUX USBC1", + .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, + .hsw.fixed_enable_delay = 600, + }, + }, { + .name = "AUX USBC2", + .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, + }, + }, { + .name = "AUX USBC3", + .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, + }, + }, { + .name = "AUX USBC4", + .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, + }, + }, { + .name = "AUX TBT1", + .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "AUX TBT2", + .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "AUX TBT3", + .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, + .hsw.is_tc_tbt = true, + }, + }, { + .name = "AUX TBT4", + .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, + .hsw.is_tc_tbt = true, + }, + }, +}; + +static int +__set_power_wells(struct i915_power_domains *power_domains, + const struct i915_power_well_desc *power_well_descs, + int power_well_descs_sz, u64 skip_mask) +{ + struct drm_i915_private *i915 = container_of(power_domains, + struct drm_i915_private, + power_domains); + u64 power_well_ids = 0; + int power_well_count = 0; + int i, plt_idx = 0; + + for (i = 0; i < power_well_descs_sz; i++) + if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) + power_well_count++; + + power_domains->power_well_count = power_well_count; + power_domains->power_wells = + kcalloc(power_well_count, + sizeof(*power_domains->power_wells), + GFP_KERNEL); + if (!power_domains->power_wells) + return -ENOMEM; + + for (i = 0; i < power_well_descs_sz; i++) { + enum i915_power_well_id id = power_well_descs[i].id; + + if (BIT_ULL(id) & skip_mask) + continue; + + power_domains->power_wells[plt_idx++].desc = + &power_well_descs[i]; + + if (id == DISP_PW_ID_NONE) + continue; + + drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); + drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); + power_well_ids |= BIT_ULL(id); + } + + return 0; +} + +#define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ + __set_power_wells(power_domains, __power_well_descs, \ + ARRAY_SIZE(__power_well_descs), skip_mask) + +#define set_power_wells(power_domains, __power_well_descs) \ + set_power_wells_mask(power_domains, __power_well_descs, 0) + +/** + * intel_display_power_map_init - initialize power domain -> power well mappings + * @power_domains: power domain state + * + * Creates all the power wells for the current platform, initializes the + * dynamic state for them and initializes the mapping of each power well to + * all the power domains the power well belongs to. + */ +int intel_display_power_map_init(struct i915_power_domains *power_domains) +{ + struct drm_i915_private *i915 = container_of(power_domains, + struct drm_i915_private, + power_domains); + /* + * The enabling order will be from lower to higher indexed wells, + * the disabling order is reversed. + */ + if (!HAS_DISPLAY(i915)) { + power_domains->power_well_count = 0; + return 0; + } + + if (DISPLAY_VER(i915) >= 13) + return set_power_wells(power_domains, xelpd_power_wells); + else if (IS_DG1(i915)) + return set_power_wells(power_domains, dg1_power_wells); + else if (IS_ALDERLAKE_S(i915)) + return set_power_wells_mask(power_domains, tgl_power_wells, + BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); + else if (IS_ROCKETLAKE(i915)) + return set_power_wells(power_domains, rkl_power_wells); + else if (DISPLAY_VER(i915) == 12) + return set_power_wells(power_domains, tgl_power_wells); + else if (DISPLAY_VER(i915) == 11) + return set_power_wells(power_domains, icl_power_wells); + else if (IS_GEMINILAKE(i915)) + return set_power_wells(power_domains, glk_power_wells); + else if (IS_BROXTON(i915)) + return set_power_wells(power_domains, bxt_power_wells); + else if (DISPLAY_VER(i915) == 9) + return set_power_wells(power_domains, skl_power_wells); + else if (IS_CHERRYVIEW(i915)) + return set_power_wells(power_domains, chv_power_wells); + else if (IS_BROADWELL(i915)) + return set_power_wells(power_domains, bdw_power_wells); + else if (IS_HASWELL(i915)) + return set_power_wells(power_domains, hsw_power_wells); + else if (IS_VALLEYVIEW(i915)) + return set_power_wells(power_domains, vlv_power_wells); + else if (IS_I830(i915)) + return set_power_wells(power_domains, i830_power_wells); + else + return set_power_wells(power_domains, i9xx_always_on_power_well); +} + +/** + * intel_display_power_map_cleanup - clean up power domain -> power well mappings + * @power_domains: power domain state + * + * Cleans up all the state that was initialized by intel_display_power_map_init(). + */ +void intel_display_power_map_cleanup(struct i915_power_domains *power_domains) +{ + kfree(power_domains->power_wells); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.h b/drivers/gpu/drm/i915/display/intel_display_power_map.h new file mode 100644 index 000000000000..da8f7055a44c --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_DISPLAY_POWER_MAP_H__ +#define __INTEL_DISPLAY_POWER_MAP_H__ + +struct i915_power_domains; + +int intel_display_power_map_init(struct i915_power_domains *power_domains); +void intel_display_power_map_cleanup(struct i915_power_domains *power_domains); + +#endif From 92f6d062ddc6cb7a6c44e0f45d7ca9ceb82a0bbd Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:43 +0300 Subject: [PATCH 118/219] drm/i915: Move the dg2 fixed_enable_delay power well param to a common bitfield MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The DG2 fixed delay duration is always 600usec, so save some space in the power well descriptors by converting the parameter to a flag. While at it also use a bitfield for both the always_on and fixed_enable_delay flag. This change also lets simplifying the definiton of power wells sharing the same flags in an upcoming patch. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-4-imre.deak@intel.com --- .../gpu/drm/i915/display/intel_display_power_map.c | 10 +++++----- .../drm/i915/display/intel_display_power_well.c | 5 ++--- .../drm/i915/display/intel_display_power_well.h | 14 +++++++------- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 97e0daec9544..e1824936a998 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1920,37 +1920,37 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .name = "AUX A", .domains = ICL_AUX_A_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .fixed_enable_delay = true, .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - .hsw.fixed_enable_delay = 600, }, }, { .name = "AUX B", .domains = ICL_AUX_B_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .fixed_enable_delay = true, .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - .hsw.fixed_enable_delay = 600, }, }, { .name = "AUX C", .domains = TGL_AUX_C_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .fixed_enable_delay = true, .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - .hsw.fixed_enable_delay = 600, }, }, { .name = "AUX D_XELPD", .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .fixed_enable_delay = true, .id = DISP_PW_ID_NONE, { .hsw.idx = XELPD_PW_CTL_IDX_AUX_D, - .hsw.fixed_enable_delay = 600, }, }, { .name = "AUX E_XELPD", @@ -1964,10 +1964,10 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .name = "AUX USBC1", .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .fixed_enable_delay = true, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - .hsw.fixed_enable_delay = 600, }, }, { .name = "AUX USBC2", diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index b49f0de30296..0c90577de5d7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -243,15 +243,14 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = power_well->desc->hsw.idx; - int enable_delay = power_well->desc->hsw.fixed_enable_delay; /* * For some power wells we're not supposed to watch the status bit for * an ack, but rather just wait a fixed amount of time and then * proceed. This is only used on DG2. */ - if (IS_DG2(dev_priv) && enable_delay) { - usleep_range(enable_delay, 2 * enable_delay); + if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) { + usleep_range(600, 1200); return; } diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index c4a8a3d728e0..cb4681d0ffc6 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -50,8 +50,14 @@ enum i915_power_well_id { struct i915_power_well_desc { const char *name; - bool always_on; u64 domains; + u8 always_on:1; + /* + * Instead of waiting for the status bit to ack enables, + * just wait a specific amount of time and then consider + * the well enabled. + */ + u8 fixed_enable_delay:1; /* unique identifier for this power well */ enum i915_power_well_id id; /* @@ -77,12 +83,6 @@ struct i915_power_well_desc { u8 idx; /* Mask of pipes whose IRQ logic is backed by the pw */ u8 irq_pipe_mask; - /* - * Instead of waiting for the status bit to ack enables, - * just wait a specific amount of time and then consider - * the well enabled. - */ - u16 fixed_enable_delay; /* The pw is backing the VGA functionality */ bool has_vga:1; bool has_fuses:1; From 6a006ee93ff829dddce8e82b4becef20f74db885 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:44 +0300 Subject: [PATCH 119/219] drm/i915: Move the HSW power well flags to a common bitfield MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Save some space by grouping the HSW power well descriptor flags along with other flags in one bitfield. This change also lets simplifying the definition of power well descriptors sharing the same flags in an upcoming patch. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-5-imre.deak@intel.com --- .../i915/display/intel_display_power_map.c | 166 +++++++++--------- .../i915/display/intel_display_power_well.c | 16 +- .../i915/display/intel_display_power_well.h | 25 +-- 3 files changed, 104 insertions(+), 103 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index e1824936a998..d566e638ac9b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -76,10 +76,10 @@ static const struct i915_power_well_desc hsw_power_wells[] = { .name = "display", .domains = HSW_DISPLAY_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_vga = true, .id = HSW_DISP_PW_GLOBAL, { .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, - .hsw.has_vga = true, }, }, }; @@ -112,11 +112,11 @@ static const struct i915_power_well_desc bdw_power_wells[] = { .name = "display", .domains = BDW_DISPLAY_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_vga = true, + .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .id = HSW_DISP_PW_GLOBAL, { .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, }, }, }; @@ -368,10 +368,10 @@ static const struct i915_power_well_desc skl_power_wells[] = { .domains = 0, .ops = &hsw_power_well_ops, .always_on = true, + .has_fuses = true, .id = SKL_DISP_PW_1, { .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, }, }, { .name = "MISC IO power well", @@ -392,12 +392,12 @@ static const struct i915_power_well_desc skl_power_wells[] = { .name = "power well 2", .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_vga = true, + .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .has_fuses = true, .id = SKL_DISP_PW_2, { .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, }, }, { .name = "DDI A/E IO power well", @@ -484,10 +484,10 @@ static const struct i915_power_well_desc bxt_power_wells[] = { .domains = 0, .ops = &hsw_power_well_ops, .always_on = true, + .has_fuses = true, .id = SKL_DISP_PW_1, { .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, }, }, { .name = "DC off", @@ -498,12 +498,12 @@ static const struct i915_power_well_desc bxt_power_wells[] = { .name = "power well 2", .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_vga = true, + .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .has_fuses = true, .id = SKL_DISP_PW_2, { .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, }, }, { .name = "dpio-common-a", @@ -594,10 +594,10 @@ static const struct i915_power_well_desc glk_power_wells[] = { .domains = 0, .ops = &hsw_power_well_ops, .always_on = true, + .has_fuses = true, .id = SKL_DISP_PW_1, { .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, }, }, { .name = "DC off", @@ -608,12 +608,12 @@ static const struct i915_power_well_desc glk_power_wells[] = { .name = "power well 2", .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_vga = true, + .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .has_fuses = true, .id = SKL_DISP_PW_2, { .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, }, }, { .name = "dpio-common-a", @@ -789,10 +789,10 @@ static const struct i915_power_well_desc icl_power_wells[] = { .domains = 0, .ops = &hsw_power_well_ops, .always_on = true, + .has_fuses = true, .id = SKL_DISP_PW_1, { .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, }, }, { .name = "DC off", @@ -803,21 +803,21 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "power well 2", .domains = ICL_PW_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_fuses = true, .id = SKL_DISP_PW_2, { .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_fuses = true, }, }, { .name = "power well 3", .domains = ICL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_vga = true, + .irq_pipe_mask = BIT(PIPE_B), + .has_fuses = true, .id = ICL_DISP_PW_3, { .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, }, }, { .name = "DDI A IO", @@ -887,83 +887,83 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "AUX C TC1", .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = false, .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - .hsw.is_tc_tbt = false, }, }, { .name = "AUX D TC2", .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = false, .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_AUX_D, - .hsw.is_tc_tbt = false, }, }, { .name = "AUX E TC3", .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = false, .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_AUX_E, - .hsw.is_tc_tbt = false, }, }, { .name = "AUX F TC4", .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = false, .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_AUX_F, - .hsw.is_tc_tbt = false, }, }, { .name = "AUX C TBT1", .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, - .hsw.is_tc_tbt = true, }, }, { .name = "AUX D TBT2", .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, - .hsw.is_tc_tbt = true, }, }, { .name = "AUX E TBT3", .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, - .hsw.is_tc_tbt = true, }, }, { .name = "AUX F TBT4", .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, - .hsw.is_tc_tbt = true, }, }, { .name = "power well 4", .domains = ICL_PW_4_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_C), + .has_fuses = true, .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), }, }, }; @@ -1077,10 +1077,10 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .domains = 0, .ops = &hsw_power_well_ops, .always_on = true, + .has_fuses = true, .id = SKL_DISP_PW_1, { .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, }, }, { .name = "DC off", @@ -1091,21 +1091,21 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .name = "power well 2", .domains = TGL_PW_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_fuses = true, .id = SKL_DISP_PW_2, { .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_fuses = true, }, }, { .name = "power well 3", .domains = TGL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_vga = true, + .irq_pipe_mask = BIT(PIPE_B), + .has_fuses = true, .id = ICL_DISP_PW_3, { .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, }, }, { .name = "DDI A IO", @@ -1212,129 +1212,129 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .name = "AUX USBC1", .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = false, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - .hsw.is_tc_tbt = false, }, }, { .name = "AUX USBC2", .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = false, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - .hsw.is_tc_tbt = false, }, }, { .name = "AUX USBC3", .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = false, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, - .hsw.is_tc_tbt = false, }, }, { .name = "AUX USBC4", .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = false, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, - .hsw.is_tc_tbt = false, }, }, { .name = "AUX USBC5", .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = false, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, - .hsw.is_tc_tbt = false, }, }, { .name = "AUX USBC6", .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = false, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, - .hsw.is_tc_tbt = false, }, }, { .name = "AUX TBT1", .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, - .hsw.is_tc_tbt = true, }, }, { .name = "AUX TBT2", .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, - .hsw.is_tc_tbt = true, }, }, { .name = "AUX TBT3", .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, - .hsw.is_tc_tbt = true, }, }, { .name = "AUX TBT4", .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, - .hsw.is_tc_tbt = true, }, }, { .name = "AUX TBT5", .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, - .hsw.is_tc_tbt = true, }, }, { .name = "AUX TBT6", .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, - .hsw.is_tc_tbt = true, }, }, { .name = "power well 4", .domains = TGL_PW_4_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_fuses = true, + .irq_pipe_mask = BIT(PIPE_C), .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), } }, { .name = "power well 5", .domains = TGL_PW_5_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_fuses = true, + .irq_pipe_mask = BIT(PIPE_D), .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_PW_5, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_D), }, }, }; @@ -1400,10 +1400,10 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .domains = 0, .ops = &hsw_power_well_ops, .always_on = true, + .has_fuses = true, .id = SKL_DISP_PW_1, { .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, }, }, { .name = "DC off", @@ -1414,22 +1414,22 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .name = "power well 3", .domains = RKL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_B), + .has_vga = true, + .has_fuses = true, .id = ICL_DISP_PW_3, { .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, }, }, { .name = "power well 4", .domains = RKL_PW_4_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_fuses = true, + .irq_pipe_mask = BIT(PIPE_C), .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), } }, { .name = "DDI A IO", @@ -1540,10 +1540,10 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .domains = 0, .ops = &hsw_power_well_ops, .always_on = true, + .has_fuses = true, .id = SKL_DISP_PW_1, { .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, }, }, { .name = "DC off", @@ -1554,21 +1554,21 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .name = "power well 2", .domains = DG1_PW_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_fuses = true, .id = SKL_DISP_PW_2, { .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_fuses = true, }, }, { .name = "power well 3", .domains = DG1_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_B), + .has_vga = true, + .has_fuses = true, .id = ICL_DISP_PW_3, { .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, }, }, { .name = "DDI A IO", @@ -1622,39 +1622,39 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .name = "AUX USBC1", .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = false, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - .hsw.is_tc_tbt = false, }, }, { .name = "AUX USBC2", .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = false, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - .hsw.is_tc_tbt = false, }, }, { .name = "power well 4", .domains = TGL_PW_4_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_fuses = true, + .irq_pipe_mask = BIT(PIPE_C), .id = DISP_PW_ID_NONE, { .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), } }, { .name = "power well 5", .domains = TGL_PW_5_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_fuses = true, + .irq_pipe_mask = BIT(PIPE_D), .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_PW_5, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_D), }, }, }; @@ -1784,10 +1784,10 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .domains = 0, .ops = &hsw_power_well_ops, .always_on = true, + .has_fuses = true, .id = SKL_DISP_PW_1, { .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, }, }, { .name = "DC off", @@ -1798,51 +1798,51 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .name = "power well 2", .domains = XELPD_PW_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .has_vga = true, + .has_fuses = true, .id = SKL_DISP_PW_2, { .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_vga = true, - .hsw.has_fuses = true, }, }, { .name = "power well A", .domains = XELPD_PW_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_A), + .has_fuses = true, .id = DISP_PW_ID_NONE, { .hsw.idx = XELPD_PW_CTL_IDX_PW_A, - .hsw.irq_pipe_mask = BIT(PIPE_A), - .hsw.has_fuses = true, }, }, { .name = "power well B", .domains = XELPD_PW_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_B), + .has_fuses = true, .id = DISP_PW_ID_NONE, { .hsw.idx = XELPD_PW_CTL_IDX_PW_B, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_fuses = true, }, }, { .name = "power well C", .domains = XELPD_PW_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_C), + .has_fuses = true, .id = DISP_PW_ID_NONE, { .hsw.idx = XELPD_PW_CTL_IDX_PW_C, - .hsw.irq_pipe_mask = BIT(PIPE_C), - .hsw.has_fuses = true, }, }, { .name = "power well D", .domains = XELPD_PW_D_POWER_DOMAINS, .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_D), + .has_fuses = true, .id = DISP_PW_ID_NONE, { .hsw.idx = XELPD_PW_CTL_IDX_PW_D, - .hsw.irq_pipe_mask = BIT(PIPE_D), - .hsw.has_fuses = true, }, }, { .name = "DDI A IO", @@ -1997,37 +1997,37 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .name = "AUX TBT1", .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, - .hsw.is_tc_tbt = true, }, }, { .name = "AUX TBT2", .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, - .hsw.is_tc_tbt = true, }, }, { .name = "AUX TBT3", .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, - .hsw.is_tc_tbt = true, }, }, { .name = "AUX TBT4", .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, .id = DISP_PW_ID_NONE, { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, - .hsw.is_tc_tbt = true, }, }, }; diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 0c90577de5d7..b95bc32d6da0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -197,8 +197,8 @@ static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) { int pw_idx = power_well->desc->hsw.idx; - return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : - ICL_AUX_PW_TO_CH(pw_idx); + return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : + ICL_AUX_PW_TO_CH(pw_idx); } static struct intel_digital_port * @@ -326,7 +326,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, int pw_idx = power_well->desc->hsw.idx; u32 val; - if (power_well->desc->hsw.has_fuses) { + if (power_well->desc->has_fuses) { enum skl_power_gate pg; pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : @@ -353,7 +353,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, hsw_wait_for_power_well_enable(dev_priv, power_well, false); - if (power_well->desc->hsw.has_fuses) { + if (power_well->desc->has_fuses) { enum skl_power_gate pg; pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : @@ -362,8 +362,8 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, } hsw_power_well_post_enable(dev_priv, - power_well->desc->hsw.irq_pipe_mask, - power_well->desc->hsw.has_vga); + power_well->desc->irq_pipe_mask, + power_well->desc->has_vga); } static void hsw_power_well_disable(struct drm_i915_private *dev_priv, @@ -374,7 +374,7 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv, u32 val; hsw_power_well_pre_disable(dev_priv, - power_well->desc->hsw.irq_pipe_mask); + power_well->desc->irq_pipe_mask); val = intel_de_read(dev_priv, regs->driver); intel_de_write(dev_priv, regs->driver, @@ -491,7 +491,7 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - bool is_tbt = power_well->desc->hsw.is_tc_tbt; + bool is_tbt = power_well->desc->is_tc_tbt; bool timeout_expected; u32 val; diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index cb4681d0ffc6..26fe9e1048bc 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -51,13 +51,24 @@ enum i915_power_well_id { struct i915_power_well_desc { const char *name; u64 domains; - u8 always_on:1; + /* Mask of pipes whose IRQ logic is backed by the pw */ + u16 irq_pipe_mask:4; + u16 always_on:1; /* * Instead of waiting for the status bit to ack enables, * just wait a specific amount of time and then consider * the well enabled. */ - u8 fixed_enable_delay:1; + u16 fixed_enable_delay:1; + /* The pw is backing the VGA functionality */ + u16 has_vga:1; + u16 has_fuses:1; + /* + * The pw is for an ICL+ TypeC PHY port in + * Thunderbolt mode. + */ + u16 is_tc_tbt:1; + /* unique identifier for this power well */ enum i915_power_well_id id; /* @@ -81,16 +92,6 @@ struct i915_power_well_desc { * constrol/status registers. */ u8 idx; - /* Mask of pipes whose IRQ logic is backed by the pw */ - u8 irq_pipe_mask; - /* The pw is backing the VGA functionality */ - bool has_vga:1; - bool has_fuses:1; - /* - * The pw is for an ICL+ TypeC PHY port in - * Thunderbolt mode. - */ - bool is_tc_tbt:1; } hsw; }; const struct i915_power_well_ops *ops; From 0ba2661db6262da49cf4edee7fc4ab6d17bcc45e Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:45 +0300 Subject: [PATCH 120/219] drm/i915: Rename the power domain names to end with pipes/ports MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make all power domain names end with the pipe/port instance for consistency. No functional changes. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-6-imre.deak@intel.com --- drivers/gpu/drm/i915/display/icl_dsi.c | 8 +- drivers/gpu/drm/i915/display/intel_ddi.c | 2 +- drivers/gpu/drm/i915/display/intel_display.c | 34 ++-- .../drm/i915/display/intel_display_power.c | 116 +++++------ .../drm/i915/display/intel_display_power.h | 66 +++---- .../i915/display/intel_display_power_map.c | 184 +++++++++--------- 6 files changed, 205 insertions(+), 205 deletions(-) diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 084cc51d1c41..d29211b9807c 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -399,8 +399,8 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, intel_dsi->io_wakeref[port] = intel_display_power_get(dev_priv, port == PORT_A ? - POWER_DOMAIN_PORT_DDI_A_IO : - POWER_DOMAIN_PORT_DDI_B_IO); + POWER_DOMAIN_PORT_DDI_IO_A : + POWER_DOMAIN_PORT_DDI_IO_B); } } @@ -1425,8 +1425,8 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]); intel_display_power_put(dev_priv, port == PORT_A ? - POWER_DOMAIN_PORT_DDI_A_IO : - POWER_DOMAIN_PORT_DDI_B_IO, + POWER_DOMAIN_PORT_DDI_IO_A : + POWER_DOMAIN_PORT_DDI_IO_B, wakeref); } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 027cc4cc38d9..01463c4711d3 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4492,7 +4492,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) } drm_WARN_ON(&dev_priv->drm, port > PORT_I); - dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO + + dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_IO_A + port - PORT_A; if (init_dp) { diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index fc5c091c415a..2b4c9a62613a 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -2161,23 +2161,23 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port) { switch (port) { case PORT_A: - return POWER_DOMAIN_PORT_DDI_A_LANES; + return POWER_DOMAIN_PORT_DDI_LANES_A; case PORT_B: - return POWER_DOMAIN_PORT_DDI_B_LANES; + return POWER_DOMAIN_PORT_DDI_LANES_B; case PORT_C: - return POWER_DOMAIN_PORT_DDI_C_LANES; + return POWER_DOMAIN_PORT_DDI_LANES_C; case PORT_D: - return POWER_DOMAIN_PORT_DDI_D_LANES; + return POWER_DOMAIN_PORT_DDI_LANES_D; case PORT_E: - return POWER_DOMAIN_PORT_DDI_E_LANES; + return POWER_DOMAIN_PORT_DDI_LANES_E; case PORT_F: - return POWER_DOMAIN_PORT_DDI_F_LANES; + return POWER_DOMAIN_PORT_DDI_LANES_F; case PORT_G: - return POWER_DOMAIN_PORT_DDI_G_LANES; + return POWER_DOMAIN_PORT_DDI_LANES_G; case PORT_H: - return POWER_DOMAIN_PORT_DDI_H_LANES; + return POWER_DOMAIN_PORT_DDI_LANES_H; case PORT_I: - return POWER_DOMAIN_PORT_DDI_I_LANES; + return POWER_DOMAIN_PORT_DDI_LANES_I; default: MISSING_CASE(port); return POWER_DOMAIN_PORT_OTHER; @@ -2190,22 +2190,22 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) if (intel_tc_port_in_tbt_alt_mode(dig_port)) { switch (dig_port->aux_ch) { case AUX_CH_C: - return POWER_DOMAIN_AUX_C_TBT; + return POWER_DOMAIN_AUX_TBT_C; case AUX_CH_D: - return POWER_DOMAIN_AUX_D_TBT; + return POWER_DOMAIN_AUX_TBT_D; case AUX_CH_E: - return POWER_DOMAIN_AUX_E_TBT; + return POWER_DOMAIN_AUX_TBT_E; case AUX_CH_F: - return POWER_DOMAIN_AUX_F_TBT; + return POWER_DOMAIN_AUX_TBT_F; case AUX_CH_G: - return POWER_DOMAIN_AUX_G_TBT; + return POWER_DOMAIN_AUX_TBT_G; case AUX_CH_H: - return POWER_DOMAIN_AUX_H_TBT; + return POWER_DOMAIN_AUX_TBT_H; case AUX_CH_I: - return POWER_DOMAIN_AUX_I_TBT; + return POWER_DOMAIN_AUX_TBT_I; default: MISSING_CASE(dig_port->aux_ch); - return POWER_DOMAIN_AUX_C_TBT; + return POWER_DOMAIN_AUX_TBT_C; } } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index d2f2fb24ca42..166daede7205 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -44,14 +44,14 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "PIPE_C"; case POWER_DOMAIN_PIPE_D: return "PIPE_D"; - case POWER_DOMAIN_PIPE_A_PANEL_FITTER: - return "PIPE_A_PANEL_FITTER"; - case POWER_DOMAIN_PIPE_B_PANEL_FITTER: - return "PIPE_B_PANEL_FITTER"; - case POWER_DOMAIN_PIPE_C_PANEL_FITTER: - return "PIPE_C_PANEL_FITTER"; - case POWER_DOMAIN_PIPE_D_PANEL_FITTER: - return "PIPE_D_PANEL_FITTER"; + case POWER_DOMAIN_PIPE_PANEL_FITTER_A: + return "PIPE_PANEL_FITTER_A"; + case POWER_DOMAIN_PIPE_PANEL_FITTER_B: + return "PIPE_PANEL_FITTER_B"; + case POWER_DOMAIN_PIPE_PANEL_FITTER_C: + return "PIPE_PANEL_FITTER_C"; + case POWER_DOMAIN_PIPE_PANEL_FITTER_D: + return "PIPE_PANEL_FITTER_D"; case POWER_DOMAIN_TRANSCODER_A: return "TRANSCODER_A"; case POWER_DOMAIN_TRANSCODER_B: @@ -68,42 +68,42 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "TRANSCODER_DSI_C"; case POWER_DOMAIN_TRANSCODER_VDSC_PW2: return "TRANSCODER_VDSC_PW2"; - case POWER_DOMAIN_PORT_DDI_A_LANES: - return "PORT_DDI_A_LANES"; - case POWER_DOMAIN_PORT_DDI_B_LANES: - return "PORT_DDI_B_LANES"; - case POWER_DOMAIN_PORT_DDI_C_LANES: - return "PORT_DDI_C_LANES"; - case POWER_DOMAIN_PORT_DDI_D_LANES: - return "PORT_DDI_D_LANES"; - case POWER_DOMAIN_PORT_DDI_E_LANES: - return "PORT_DDI_E_LANES"; - case POWER_DOMAIN_PORT_DDI_F_LANES: - return "PORT_DDI_F_LANES"; - case POWER_DOMAIN_PORT_DDI_G_LANES: - return "PORT_DDI_G_LANES"; - case POWER_DOMAIN_PORT_DDI_H_LANES: - return "PORT_DDI_H_LANES"; - case POWER_DOMAIN_PORT_DDI_I_LANES: - return "PORT_DDI_I_LANES"; - case POWER_DOMAIN_PORT_DDI_A_IO: - return "PORT_DDI_A_IO"; - case POWER_DOMAIN_PORT_DDI_B_IO: - return "PORT_DDI_B_IO"; - case POWER_DOMAIN_PORT_DDI_C_IO: - return "PORT_DDI_C_IO"; - case POWER_DOMAIN_PORT_DDI_D_IO: - return "PORT_DDI_D_IO"; - case POWER_DOMAIN_PORT_DDI_E_IO: - return "PORT_DDI_E_IO"; - case POWER_DOMAIN_PORT_DDI_F_IO: - return "PORT_DDI_F_IO"; - case POWER_DOMAIN_PORT_DDI_G_IO: - return "PORT_DDI_G_IO"; - case POWER_DOMAIN_PORT_DDI_H_IO: - return "PORT_DDI_H_IO"; - case POWER_DOMAIN_PORT_DDI_I_IO: - return "PORT_DDI_I_IO"; + case POWER_DOMAIN_PORT_DDI_LANES_A: + return "PORT_DDI_LANES_A"; + case POWER_DOMAIN_PORT_DDI_LANES_B: + return "PORT_DDI_LANES_B"; + case POWER_DOMAIN_PORT_DDI_LANES_C: + return "PORT_DDI_LANES_C"; + case POWER_DOMAIN_PORT_DDI_LANES_D: + return "PORT_DDI_LANES_D"; + case POWER_DOMAIN_PORT_DDI_LANES_E: + return "PORT_DDI_LANES_E"; + case POWER_DOMAIN_PORT_DDI_LANES_F: + return "PORT_DDI_LANES_F"; + case POWER_DOMAIN_PORT_DDI_LANES_G: + return "PORT_DDI_LANES_G"; + case POWER_DOMAIN_PORT_DDI_LANES_H: + return "PORT_DDI_LANES_H"; + case POWER_DOMAIN_PORT_DDI_LANES_I: + return "PORT_DDI_LANES_I"; + case POWER_DOMAIN_PORT_DDI_IO_A: + return "PORT_DDI_IO_A"; + case POWER_DOMAIN_PORT_DDI_IO_B: + return "PORT_DDI_IO_B"; + case POWER_DOMAIN_PORT_DDI_IO_C: + return "PORT_DDI_IO_C"; + case POWER_DOMAIN_PORT_DDI_IO_D: + return "PORT_DDI_IO_D"; + case POWER_DOMAIN_PORT_DDI_IO_E: + return "PORT_DDI_IO_E"; + case POWER_DOMAIN_PORT_DDI_IO_F: + return "PORT_DDI_IO_F"; + case POWER_DOMAIN_PORT_DDI_IO_G: + return "PORT_DDI_IO_G"; + case POWER_DOMAIN_PORT_DDI_IO_H: + return "PORT_DDI_IO_H"; + case POWER_DOMAIN_PORT_DDI_IO_I: + return "PORT_DDI_IO_I"; case POWER_DOMAIN_PORT_DSI: return "PORT_DSI"; case POWER_DOMAIN_PORT_CRT: @@ -136,20 +136,20 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "AUX_I"; case POWER_DOMAIN_AUX_IO_A: return "AUX_IO_A"; - case POWER_DOMAIN_AUX_C_TBT: - return "AUX_C_TBT"; - case POWER_DOMAIN_AUX_D_TBT: - return "AUX_D_TBT"; - case POWER_DOMAIN_AUX_E_TBT: - return "AUX_E_TBT"; - case POWER_DOMAIN_AUX_F_TBT: - return "AUX_F_TBT"; - case POWER_DOMAIN_AUX_G_TBT: - return "AUX_G_TBT"; - case POWER_DOMAIN_AUX_H_TBT: - return "AUX_H_TBT"; - case POWER_DOMAIN_AUX_I_TBT: - return "AUX_I_TBT"; + case POWER_DOMAIN_AUX_TBT_C: + return "AUX_TBT_C"; + case POWER_DOMAIN_AUX_TBT_D: + return "AUX_TBT_D"; + case POWER_DOMAIN_AUX_TBT_E: + return "AUX_TBT_E"; + case POWER_DOMAIN_AUX_TBT_F: + return "AUX_TBT_F"; + case POWER_DOMAIN_AUX_TBT_G: + return "AUX_TBT_G"; + case POWER_DOMAIN_AUX_TBT_H: + return "AUX_TBT_H"; + case POWER_DOMAIN_AUX_TBT_I: + return "AUX_TBT_I"; case POWER_DOMAIN_GMBUS: return "GMBUS"; case POWER_DOMAIN_INIT: diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index e80317e7868b..5ae81e330022 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -25,10 +25,10 @@ enum intel_display_power_domain { POWER_DOMAIN_PIPE_B, POWER_DOMAIN_PIPE_C, POWER_DOMAIN_PIPE_D, - POWER_DOMAIN_PIPE_A_PANEL_FITTER, - POWER_DOMAIN_PIPE_B_PANEL_FITTER, - POWER_DOMAIN_PIPE_C_PANEL_FITTER, - POWER_DOMAIN_PIPE_D_PANEL_FITTER, + POWER_DOMAIN_PIPE_PANEL_FITTER_A, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_C, + POWER_DOMAIN_PIPE_PANEL_FITTER_D, POWER_DOMAIN_TRANSCODER_A, POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, @@ -40,17 +40,17 @@ enum intel_display_power_domain { /* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */ POWER_DOMAIN_TRANSCODER_VDSC_PW2, - POWER_DOMAIN_PORT_DDI_A_LANES, - POWER_DOMAIN_PORT_DDI_B_LANES, - POWER_DOMAIN_PORT_DDI_C_LANES, - POWER_DOMAIN_PORT_DDI_D_LANES, - POWER_DOMAIN_PORT_DDI_E_LANES, - POWER_DOMAIN_PORT_DDI_F_LANES, - POWER_DOMAIN_PORT_DDI_G_LANES, - POWER_DOMAIN_PORT_DDI_H_LANES, - POWER_DOMAIN_PORT_DDI_I_LANES, + POWER_DOMAIN_PORT_DDI_LANES_A, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_PORT_DDI_LANES_D, + POWER_DOMAIN_PORT_DDI_LANES_E, + POWER_DOMAIN_PORT_DDI_LANES_F, + POWER_DOMAIN_PORT_DDI_LANES_G, + POWER_DOMAIN_PORT_DDI_LANES_H, + POWER_DOMAIN_PORT_DDI_LANES_I, - POWER_DOMAIN_PORT_DDI_LANES_TC1 = POWER_DOMAIN_PORT_DDI_D_LANES, /* tgl+ */ + POWER_DOMAIN_PORT_DDI_LANES_TC1 = POWER_DOMAIN_PORT_DDI_LANES_D, /* tgl+ */ POWER_DOMAIN_PORT_DDI_LANES_TC2, POWER_DOMAIN_PORT_DDI_LANES_TC3, POWER_DOMAIN_PORT_DDI_LANES_TC4, @@ -60,17 +60,17 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_DDI_LANES_D_XELPD = POWER_DOMAIN_PORT_DDI_LANES_TC5, /* XELPD */ POWER_DOMAIN_PORT_DDI_LANES_E_XELPD, - POWER_DOMAIN_PORT_DDI_A_IO, - POWER_DOMAIN_PORT_DDI_B_IO, - POWER_DOMAIN_PORT_DDI_C_IO, - POWER_DOMAIN_PORT_DDI_D_IO, - POWER_DOMAIN_PORT_DDI_E_IO, - POWER_DOMAIN_PORT_DDI_F_IO, - POWER_DOMAIN_PORT_DDI_G_IO, - POWER_DOMAIN_PORT_DDI_H_IO, - POWER_DOMAIN_PORT_DDI_I_IO, + POWER_DOMAIN_PORT_DDI_IO_A, + POWER_DOMAIN_PORT_DDI_IO_B, + POWER_DOMAIN_PORT_DDI_IO_C, + POWER_DOMAIN_PORT_DDI_IO_D, + POWER_DOMAIN_PORT_DDI_IO_E, + POWER_DOMAIN_PORT_DDI_IO_F, + POWER_DOMAIN_PORT_DDI_IO_G, + POWER_DOMAIN_PORT_DDI_IO_H, + POWER_DOMAIN_PORT_DDI_IO_I, - POWER_DOMAIN_PORT_DDI_IO_TC1 = POWER_DOMAIN_PORT_DDI_D_IO, /* tgl+ */ + POWER_DOMAIN_PORT_DDI_IO_TC1 = POWER_DOMAIN_PORT_DDI_IO_D, /* tgl+ */ POWER_DOMAIN_PORT_DDI_IO_TC2, POWER_DOMAIN_PORT_DDI_IO_TC3, POWER_DOMAIN_PORT_DDI_IO_TC4, @@ -107,15 +107,15 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_E_XELPD, POWER_DOMAIN_AUX_IO_A, - POWER_DOMAIN_AUX_C_TBT, - POWER_DOMAIN_AUX_D_TBT, - POWER_DOMAIN_AUX_E_TBT, - POWER_DOMAIN_AUX_F_TBT, - POWER_DOMAIN_AUX_G_TBT, - POWER_DOMAIN_AUX_H_TBT, - POWER_DOMAIN_AUX_I_TBT, + POWER_DOMAIN_AUX_TBT_C, + POWER_DOMAIN_AUX_TBT_D, + POWER_DOMAIN_AUX_TBT_E, + POWER_DOMAIN_AUX_TBT_F, + POWER_DOMAIN_AUX_TBT_G, + POWER_DOMAIN_AUX_TBT_H, + POWER_DOMAIN_AUX_TBT_I, - POWER_DOMAIN_AUX_TBT1 = POWER_DOMAIN_AUX_D_TBT, /* tgl+ */ + POWER_DOMAIN_AUX_TBT1 = POWER_DOMAIN_AUX_TBT_D, /* tgl+ */ POWER_DOMAIN_AUX_TBT2, POWER_DOMAIN_AUX_TBT3, POWER_DOMAIN_AUX_TBT4, @@ -134,7 +134,7 @@ enum intel_display_power_domain { #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ - ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) + ((pipe) + POWER_DOMAIN_PIPE_PANEL_FITTER_A) #define POWER_DOMAIN_TRANSCODER(tran) \ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ (tran) + POWER_DOMAIN_TRANSCODER_A) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index d566e638ac9b..dc5be70a1781 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -26,8 +26,8 @@ static const struct i915_power_well_desc i9xx_always_on_power_well[] = { #define I830_PIPES_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_A) | \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_INIT)) @@ -50,15 +50,15 @@ static const struct i915_power_well_desc i830_power_wells[] = { #define HSW_DISPLAY_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D) | \ BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ @@ -87,14 +87,14 @@ static const struct i915_power_well_desc hsw_power_wells[] = { #define BDW_DISPLAY_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D) | \ BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ @@ -125,12 +125,12 @@ static const struct i915_power_well_desc bdw_power_wells[] = { BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ BIT_ULL(POWER_DOMAIN_PIPE_A) | \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ BIT_ULL(POWER_DOMAIN_VGA) | \ @@ -142,30 +142,30 @@ static const struct i915_power_well_desc bdw_power_wells[] = { BIT_ULL(POWER_DOMAIN_INIT)) #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ BIT_ULL(POWER_DOMAIN_AUX_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ BIT_ULL(POWER_DOMAIN_AUX_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ BIT_ULL(POWER_DOMAIN_AUX_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) @@ -244,15 +244,15 @@ static const struct i915_power_well_desc vlv_power_wells[] = { BIT_ULL(POWER_DOMAIN_PIPE_A) | \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D) | \ BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ @@ -264,14 +264,14 @@ static const struct i915_power_well_desc vlv_power_wells[] = { BIT_ULL(POWER_DOMAIN_INIT)) #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ BIT_ULL(POWER_DOMAIN_AUX_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D) | \ BIT_ULL(POWER_DOMAIN_AUX_D) | \ BIT_ULL(POWER_DOMAIN_INIT)) @@ -314,15 +314,15 @@ static const struct i915_power_well_desc chv_power_wells[] = { #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E) | \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ @@ -339,20 +339,20 @@ static const struct i915_power_well_desc chv_power_wells[] = { BIT_ULL(POWER_DOMAIN_INIT)) #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_A) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_B) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D) | \ BIT_ULL(POWER_DOMAIN_INIT)) static const struct i915_power_well_desc skl_power_wells[] = { @@ -437,13 +437,13 @@ static const struct i915_power_well_desc skl_power_wells[] = { #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ @@ -460,13 +460,13 @@ static const struct i915_power_well_desc skl_power_wells[] = { BIT_ULL(POWER_DOMAIN_INIT)) #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_A) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ BIT_ULL(POWER_DOMAIN_AUX_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) @@ -527,13 +527,13 @@ static const struct i915_power_well_desc bxt_power_wells[] = { #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ @@ -549,22 +549,22 @@ static const struct i915_power_well_desc bxt_power_wells[] = { BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) -#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) -#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) +#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_A) +#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_B) +#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_C) #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_A) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ BIT_ULL(POWER_DOMAIN_AUX_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) @@ -706,22 +706,22 @@ static const struct i915_power_well_desc glk_power_wells[] = { */ #define ICL_PW_4_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) /* VDSC/joining */ #define ICL_PW_3_POWER_DOMAINS ( \ ICL_PW_4_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_F) | \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ @@ -730,10 +730,10 @@ static const struct i915_power_well_desc glk_power_wells[] = { BIT_ULL(POWER_DOMAIN_AUX_D) | \ BIT_ULL(POWER_DOMAIN_AUX_E) | \ BIT_ULL(POWER_DOMAIN_AUX_F) | \ - BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT_C) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT_D) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT_E) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT_F) | \ BIT_ULL(POWER_DOMAIN_INIT)) /* * - transcoder WD @@ -755,12 +755,12 @@ static const struct i915_power_well_desc glk_power_wells[] = { BIT_ULL(POWER_DOMAIN_DC_OFF) | \ BIT_ULL(POWER_DOMAIN_INIT)) -#define ICL_DDI_IO_A_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) -#define ICL_DDI_IO_B_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) -#define ICL_DDI_IO_C_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) -#define ICL_DDI_IO_D_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) -#define ICL_DDI_IO_E_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) -#define ICL_DDI_IO_F_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) +#define ICL_DDI_IO_A_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_A) +#define ICL_DDI_IO_B_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_B) +#define ICL_DDI_IO_C_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_C) +#define ICL_DDI_IO_D_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D) +#define ICL_DDI_IO_E_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E) +#define ICL_DDI_IO_F_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_F) #define ICL_AUX_A_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ @@ -771,10 +771,10 @@ static const struct i915_power_well_desc glk_power_wells[] = { #define ICL_AUX_D_TC2_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D) #define ICL_AUX_E_TC3_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E) #define ICL_AUX_F_TC4_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_F) -#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_C_TBT) -#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_TBT) -#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_TBT) -#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_F_TBT) +#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT_C) +#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT_D) +#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT_E) +#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT_F) static const struct i915_power_well_desc icl_power_wells[] = { { @@ -970,21 +970,21 @@ static const struct i915_power_well_desc icl_power_wells[] = { #define TGL_PW_5_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_D) | \ - BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_D) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define TGL_PW_4_POWER_DOMAINS ( \ TGL_PW_5_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define TGL_PW_3_POWER_DOMAINS ( \ TGL_PW_4_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ @@ -1341,14 +1341,14 @@ static const struct i915_power_well_desc tgl_power_wells[] = { #define RKL_PW_4_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define RKL_PW_3_POWER_DOMAINS ( \ RKL_PW_4_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ @@ -1504,7 +1504,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { #define DG1_PW_3_POWER_DOMAINS ( \ TGL_PW_4_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ @@ -1679,32 +1679,32 @@ static const struct i915_power_well_desc dg1_power_wells[] = { #define XELPD_PW_D_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_D) | \ - BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_D) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define XELPD_PW_C_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define XELPD_PW_B_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define XELPD_PW_A_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_A) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define XELPD_PW_2_POWER_DOMAINS ( \ XELPD_PW_B_POWER_DOMAINS | \ XELPD_PW_C_POWER_DOMAINS | \ XELPD_PW_D_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ From 5e9deaaf027370de5696c1c66db12604f919b74f Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:46 +0300 Subject: [PATCH 121/219] drm/i915: Sanitize the power well names MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the shortest descriptive name for all power wells for simplicity and to use the same name for the same type of power wells on multiple platforms. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-7-imre.deak@intel.com --- .../i915/display/intel_display_power_map.c | 254 +++++++++--------- 1 file changed, 127 insertions(+), 127 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index dc5be70a1781..42b813cf47db 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -363,7 +363,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .always_on = true, .id = DISP_PW_ID_NONE, }, { - .name = "power well 1", + .name = "PW_1", /* Handled by the DMC firmware */ .domains = 0, .ops = &hsw_power_well_ops, @@ -374,7 +374,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .hsw.idx = SKL_PW_CTL_IDX_PW_1, }, }, { - .name = "MISC IO power well", + .name = "MISC_IO", /* Handled by the DMC firmware */ .domains = 0, .ops = &hsw_power_well_ops, @@ -384,12 +384,12 @@ static const struct i915_power_well_desc skl_power_wells[] = { .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, }, }, { - .name = "DC off", + .name = "DC_off", .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { - .name = "power well 2", + .name = "PW_2", .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_vga = true, @@ -400,7 +400,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .hsw.idx = SKL_PW_CTL_IDX_PW_2, }, }, { - .name = "DDI A/E IO power well", + .name = "DDI_IO_A_E", .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, @@ -408,7 +408,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, }, }, { - .name = "DDI B IO power well", + .name = "DDI_IO_B", .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, @@ -416,7 +416,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .hsw.idx = SKL_PW_CTL_IDX_DDI_B, }, }, { - .name = "DDI C IO power well", + .name = "DDI_IO_C", .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, @@ -424,7 +424,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .hsw.idx = SKL_PW_CTL_IDX_DDI_C, }, }, { - .name = "DDI D IO power well", + .name = "DDI_IO_D", .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, @@ -479,7 +479,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = { .always_on = true, .id = DISP_PW_ID_NONE, }, { - .name = "power well 1", + .name = "PW_1", /* Handled by the DMC firmware */ .domains = 0, .ops = &hsw_power_well_ops, @@ -490,12 +490,12 @@ static const struct i915_power_well_desc bxt_power_wells[] = { .hsw.idx = SKL_PW_CTL_IDX_PW_1, }, }, { - .name = "DC off", + .name = "DC_off", .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { - .name = "power well 2", + .name = "PW_2", .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_vga = true, @@ -589,7 +589,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .always_on = true, .id = DISP_PW_ID_NONE, }, { - .name = "power well 1", + .name = "PW_1", /* Handled by the DMC firmware */ .domains = 0, .ops = &hsw_power_well_ops, @@ -600,12 +600,12 @@ static const struct i915_power_well_desc glk_power_wells[] = { .hsw.idx = SKL_PW_CTL_IDX_PW_1, }, }, { - .name = "DC off", + .name = "DC_off", .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { - .name = "power well 2", + .name = "PW_2", .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_vga = true, @@ -640,7 +640,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .bxt.phy = DPIO_PHY2, }, }, { - .name = "AUX A", + .name = "AUX_A", .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, @@ -648,7 +648,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .hsw.idx = GLK_PW_CTL_IDX_AUX_A, }, }, { - .name = "AUX B", + .name = "AUX_B", .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, @@ -656,7 +656,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .hsw.idx = GLK_PW_CTL_IDX_AUX_B, }, }, { - .name = "AUX C", + .name = "AUX_C", .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, @@ -664,7 +664,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .hsw.idx = GLK_PW_CTL_IDX_AUX_C, }, }, { - .name = "DDI A IO power well", + .name = "DDI_IO_A", .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, @@ -672,7 +672,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .hsw.idx = GLK_PW_CTL_IDX_DDI_A, }, }, { - .name = "DDI B IO power well", + .name = "DDI_IO_B", .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, @@ -680,7 +680,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .hsw.idx = SKL_PW_CTL_IDX_DDI_B, }, }, { - .name = "DDI C IO power well", + .name = "DDI_IO_C", .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, @@ -784,7 +784,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .always_on = true, .id = DISP_PW_ID_NONE, }, { - .name = "power well 1", + .name = "PW_1", /* Handled by the DMC firmware */ .domains = 0, .ops = &hsw_power_well_ops, @@ -795,12 +795,12 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_1, }, }, { - .name = "DC off", + .name = "DC_off", .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { - .name = "power well 2", + .name = "PW_2", .domains = ICL_PW_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_fuses = true, @@ -809,7 +809,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_2, }, }, { - .name = "power well 3", + .name = "PW_3", .domains = ICL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_vga = true, @@ -820,7 +820,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_3, }, }, { - .name = "DDI A IO", + .name = "DDI_IO_A", .domains = ICL_DDI_IO_A_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -828,7 +828,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_A, }, }, { - .name = "DDI B IO", + .name = "DDI_IO_B", .domains = ICL_DDI_IO_B_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -836,7 +836,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_B, }, }, { - .name = "DDI C IO", + .name = "DDI_IO_C", .domains = ICL_DDI_IO_C_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -844,7 +844,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_C, }, }, { - .name = "DDI D IO", + .name = "DDI_IO_D", .domains = ICL_DDI_IO_D_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -852,7 +852,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_D, }, }, { - .name = "DDI E IO", + .name = "DDI_IO_E", .domains = ICL_DDI_IO_E_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -860,7 +860,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_E, }, }, { - .name = "DDI F IO", + .name = "DDI_IO_F", .domains = ICL_DDI_IO_F_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -868,7 +868,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_F, }, }, { - .name = "AUX A", + .name = "AUX_A", .domains = ICL_AUX_A_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -876,7 +876,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_A, }, }, { - .name = "AUX B", + .name = "AUX_B", .domains = ICL_AUX_B_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -884,7 +884,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_B, }, }, { - .name = "AUX C TC1", + .name = "AUX_C", .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, @@ -893,7 +893,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_C, }, }, { - .name = "AUX D TC2", + .name = "AUX_D", .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, @@ -902,7 +902,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_D, }, }, { - .name = "AUX E TC3", + .name = "AUX_E", .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, @@ -911,7 +911,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_E, }, }, { - .name = "AUX F TC4", + .name = "AUX_F", .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, @@ -920,7 +920,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_F, }, }, { - .name = "AUX C TBT1", + .name = "AUX_TBT1", .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -929,7 +929,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, }, }, { - .name = "AUX D TBT2", + .name = "AUX_TBT2", .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -938,7 +938,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, }, }, { - .name = "AUX E TBT3", + .name = "AUX_TBT3", .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -947,7 +947,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, }, }, { - .name = "AUX F TBT4", + .name = "AUX_TBT4", .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -956,7 +956,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, }, }, { - .name = "power well 4", + .name = "PW_4", .domains = ICL_PW_4_POWER_DOMAINS, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_C), @@ -1072,7 +1072,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .always_on = true, .id = DISP_PW_ID_NONE, }, { - .name = "power well 1", + .name = "PW_1", /* Handled by the DMC firmware */ .domains = 0, .ops = &hsw_power_well_ops, @@ -1083,12 +1083,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_1, }, }, { - .name = "DC off", + .name = "DC_off", .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { - .name = "power well 2", + .name = "PW_2", .domains = TGL_PW_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_fuses = true, @@ -1097,7 +1097,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_2, }, }, { - .name = "power well 3", + .name = "PW_3", .domains = TGL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_vga = true, @@ -1108,7 +1108,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_3, }, }, { - .name = "DDI A IO", + .name = "DDI_IO_A", .domains = ICL_DDI_IO_A_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1116,7 +1116,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_A, } }, { - .name = "DDI B IO", + .name = "DDI_IO_B", .domains = ICL_DDI_IO_B_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1124,7 +1124,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_B, } }, { - .name = "DDI C IO", + .name = "DDI_IO_C", .domains = ICL_DDI_IO_C_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1132,7 +1132,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_C, } }, { - .name = "DDI IO TC1", + .name = "DDI_IO_TC1", .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1140,7 +1140,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, }, }, { - .name = "DDI IO TC2", + .name = "DDI_IO_TC2", .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1148,7 +1148,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, }, }, { - .name = "DDI IO TC3", + .name = "DDI_IO_TC3", .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1156,7 +1156,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, }, }, { - .name = "DDI IO TC4", + .name = "DDI_IO_TC4", .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1164,7 +1164,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, }, }, { - .name = "DDI IO TC5", + .name = "DDI_IO_TC5", .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1172,7 +1172,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, }, }, { - .name = "DDI IO TC6", + .name = "DDI_IO_TC6", .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1180,12 +1180,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, }, }, { - .name = "TC cold off", + .name = "TC_cold_off", .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, .ops = &tgl_tc_cold_off_ops, .id = TGL_DISP_PW_TC_COLD_OFF, }, { - .name = "AUX A", + .name = "AUX_A", .domains = TGL_AUX_A_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1193,7 +1193,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_A, }, }, { - .name = "AUX B", + .name = "AUX_B", .domains = TGL_AUX_B_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1201,7 +1201,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_B, }, }, { - .name = "AUX C", + .name = "AUX_C", .domains = TGL_AUX_C_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1209,7 +1209,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_C, }, }, { - .name = "AUX USBC1", + .name = "AUX_USBC1", .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, @@ -1218,7 +1218,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, }, }, { - .name = "AUX USBC2", + .name = "AUX_USBC2", .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, @@ -1227,7 +1227,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, }, }, { - .name = "AUX USBC3", + .name = "AUX_USBC3", .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, @@ -1236,7 +1236,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, }, }, { - .name = "AUX USBC4", + .name = "AUX_USBC4", .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, @@ -1245,7 +1245,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, }, }, { - .name = "AUX USBC5", + .name = "AUX_USBC5", .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, @@ -1254,7 +1254,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, }, }, { - .name = "AUX USBC6", + .name = "AUX_USBC6", .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, @@ -1263,7 +1263,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, }, }, { - .name = "AUX TBT1", + .name = "AUX_TBT1", .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -1272,7 +1272,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, }, }, { - .name = "AUX TBT2", + .name = "AUX_TBT2", .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -1281,7 +1281,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, }, }, { - .name = "AUX TBT3", + .name = "AUX_TBT3", .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -1290,7 +1290,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, }, }, { - .name = "AUX TBT4", + .name = "AUX_TBT4", .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -1299,7 +1299,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, }, }, { - .name = "AUX TBT5", + .name = "AUX_TBT5", .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -1308,7 +1308,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, }, }, { - .name = "AUX TBT6", + .name = "AUX_TBT6", .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -1317,7 +1317,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, }, }, { - .name = "power well 4", + .name = "PW_4", .domains = TGL_PW_4_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_fuses = true, @@ -1327,7 +1327,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_4, } }, { - .name = "power well 5", + .name = "PW_5", .domains = TGL_PW_5_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_fuses = true, @@ -1395,7 +1395,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .always_on = true, .id = DISP_PW_ID_NONE, }, { - .name = "power well 1", + .name = "PW_1", /* Handled by the DMC firmware */ .domains = 0, .ops = &hsw_power_well_ops, @@ -1406,12 +1406,12 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_1, }, }, { - .name = "DC off", + .name = "DC_off", .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { - .name = "power well 3", + .name = "PW_3", .domains = RKL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), @@ -1422,7 +1422,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_3, }, }, { - .name = "power well 4", + .name = "PW_4", .domains = RKL_PW_4_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_fuses = true, @@ -1432,7 +1432,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_4, } }, { - .name = "DDI A IO", + .name = "DDI_IO_A", .domains = ICL_DDI_IO_A_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1440,7 +1440,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_A, } }, { - .name = "DDI B IO", + .name = "DDI_IO_B", .domains = ICL_DDI_IO_B_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1448,7 +1448,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_B, } }, { - .name = "DDI IO TC1", + .name = "DDI_IO_TC1", .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1456,7 +1456,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, }, }, { - .name = "DDI IO TC2", + .name = "DDI_IO_TC2", .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1464,7 +1464,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, }, }, { - .name = "AUX A", + .name = "AUX_A", .domains = ICL_AUX_A_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1472,7 +1472,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_A, }, }, { - .name = "AUX B", + .name = "AUX_B", .domains = ICL_AUX_B_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1480,7 +1480,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_B, }, }, { - .name = "AUX USBC1", + .name = "AUX_USBC1", .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1488,7 +1488,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, }, }, { - .name = "AUX USBC2", + .name = "AUX_USBC2", .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1535,7 +1535,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .always_on = true, .id = DISP_PW_ID_NONE, }, { - .name = "power well 1", + .name = "PW_1", /* Handled by the DMC firmware */ .domains = 0, .ops = &hsw_power_well_ops, @@ -1546,12 +1546,12 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_1, }, }, { - .name = "DC off", + .name = "DC_off", .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { - .name = "power well 2", + .name = "PW_2", .domains = DG1_PW_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_fuses = true, @@ -1560,7 +1560,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_2, }, }, { - .name = "power well 3", + .name = "PW_3", .domains = DG1_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), @@ -1571,7 +1571,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_3, }, }, { - .name = "DDI A IO", + .name = "DDI_IO_A", .domains = ICL_DDI_IO_A_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1579,7 +1579,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_A, } }, { - .name = "DDI B IO", + .name = "DDI_IO_B", .domains = ICL_DDI_IO_B_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1587,7 +1587,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_B, } }, { - .name = "DDI IO TC1", + .name = "DDI_IO_TC1", .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1595,7 +1595,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, }, }, { - .name = "DDI IO TC2", + .name = "DDI_IO_TC2", .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1603,7 +1603,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, }, }, { - .name = "AUX A", + .name = "AUX_A", .domains = TGL_AUX_A_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1611,7 +1611,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_A, }, }, { - .name = "AUX B", + .name = "AUX_B", .domains = TGL_AUX_B_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1619,7 +1619,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_B, }, }, { - .name = "AUX USBC1", + .name = "AUX_USBC1", .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, @@ -1628,7 +1628,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, }, }, { - .name = "AUX USBC2", + .name = "AUX_USBC2", .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, @@ -1637,7 +1637,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, }, }, { - .name = "power well 4", + .name = "PW_4", .domains = TGL_PW_4_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_fuses = true, @@ -1647,7 +1647,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_4, } }, { - .name = "power well 5", + .name = "PW_5", .domains = TGL_PW_5_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_fuses = true, @@ -1779,7 +1779,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .always_on = true, .id = DISP_PW_ID_NONE, }, { - .name = "power well 1", + .name = "PW_1", /* Handled by the DMC firmware */ .domains = 0, .ops = &hsw_power_well_ops, @@ -1790,12 +1790,12 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_1, }, }, { - .name = "DC off", + .name = "DC_off", .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { - .name = "power well 2", + .name = "PW_2", .domains = XELPD_PW_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, .has_vga = true, @@ -1805,7 +1805,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_PW_2, }, }, { - .name = "power well A", + .name = "PW_A", .domains = XELPD_PW_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_A), @@ -1815,7 +1815,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = XELPD_PW_CTL_IDX_PW_A, }, }, { - .name = "power well B", + .name = "PW_B", .domains = XELPD_PW_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), @@ -1825,7 +1825,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = XELPD_PW_CTL_IDX_PW_B, }, }, { - .name = "power well C", + .name = "PW_C", .domains = XELPD_PW_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_C), @@ -1835,7 +1835,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = XELPD_PW_CTL_IDX_PW_C, }, }, { - .name = "power well D", + .name = "PW_D", .domains = XELPD_PW_D_POWER_DOMAINS, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_D), @@ -1845,7 +1845,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = XELPD_PW_CTL_IDX_PW_D, }, }, { - .name = "DDI A IO", + .name = "DDI_IO_A", .domains = ICL_DDI_IO_A_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1853,7 +1853,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_A, } }, { - .name = "DDI B IO", + .name = "DDI_IO_B", .domains = ICL_DDI_IO_B_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1861,7 +1861,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_B, } }, { - .name = "DDI C IO", + .name = "DDI_IO_C", .domains = ICL_DDI_IO_C_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1869,7 +1869,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_DDI_C, } }, { - .name = "DDI IO D_XELPD", + .name = "DDI_IO_D_XELPD", .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1877,7 +1877,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = XELPD_PW_CTL_IDX_DDI_D, } }, { - .name = "DDI IO E_XELPD", + .name = "DDI_IO_E_XELPD", .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1885,7 +1885,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = XELPD_PW_CTL_IDX_DDI_E, } }, { - .name = "DDI IO TC1", + .name = "DDI_IO_TC1", .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1893,7 +1893,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, } }, { - .name = "DDI IO TC2", + .name = "DDI_IO_TC2", .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1901,7 +1901,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, } }, { - .name = "DDI IO TC3", + .name = "DDI_IO_TC3", .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1909,7 +1909,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, } }, { - .name = "DDI IO TC4", + .name = "DDI_IO_TC4", .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1917,7 +1917,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, } }, { - .name = "AUX A", + .name = "AUX_A", .domains = ICL_AUX_A_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, @@ -1926,7 +1926,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_A, }, }, { - .name = "AUX B", + .name = "AUX_B", .domains = ICL_AUX_B_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, @@ -1935,7 +1935,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_B, }, }, { - .name = "AUX C", + .name = "AUX_C", .domains = TGL_AUX_C_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, @@ -1944,7 +1944,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = ICL_PW_CTL_IDX_AUX_C, }, }, { - .name = "AUX D_XELPD", + .name = "AUX_D_XELPD", .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, @@ -1953,7 +1953,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = XELPD_PW_CTL_IDX_AUX_D, }, }, { - .name = "AUX E_XELPD", + .name = "AUX_E_XELPD", .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1961,7 +1961,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = XELPD_PW_CTL_IDX_AUX_E, }, }, { - .name = "AUX USBC1", + .name = "AUX_USBC1", .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, @@ -1970,7 +1970,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, }, }, { - .name = "AUX USBC2", + .name = "AUX_USBC2", .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1978,7 +1978,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, }, }, { - .name = "AUX USBC3", + .name = "AUX_USBC3", .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1986,7 +1986,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, }, }, { - .name = "AUX USBC4", + .name = "AUX_USBC4", .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, @@ -1994,7 +1994,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, }, }, { - .name = "AUX TBT1", + .name = "AUX_TBT1", .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -2003,7 +2003,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, }, }, { - .name = "AUX TBT2", + .name = "AUX_TBT2", .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -2012,7 +2012,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, }, }, { - .name = "AUX TBT3", + .name = "AUX_TBT3", .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, @@ -2021,7 +2021,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, }, }, { - .name = "AUX TBT4", + .name = "AUX_TBT4", .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, From c32ffce42aa59d054c93b2d63a3b11521dd7490b Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:47 +0300 Subject: [PATCH 122/219] drm/i915: Convert the power well descriptor domain mask to an array of domains MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The next patch converts the i915_power_well_desc::domain mask from a u64 mask to a bitmap. I didn't find a reasonably simple way to initialize bitmaps statically, so prepare for the next patch here by converting the masks to an array of domain enums and initing the masks from these arrays during module loading. v2: Clarify list vs. array in the commit message. (Jani) Cc: Jani Nikula Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-8-imre.deak@intel.com --- .../drm/i915/display/intel_display_power.c | 4 +- .../i915/display/intel_display_power_map.c | 1403 +++++++++-------- .../i915/display/intel_display_power_well.c | 2 +- .../i915/display/intel_display_power_well.h | 6 +- 4 files changed, 742 insertions(+), 673 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 166daede7205..791b4e9f9834 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -24,11 +24,11 @@ #define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \ for_each_power_well(__dev_priv, __power_well) \ - for_each_if((__power_well)->desc->domains & (__domain_mask)) + for_each_if((__power_well)->domains & (__domain_mask)) #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \ for_each_power_well_reverse(__dev_priv, __power_well) \ - for_each_if((__power_well)->desc->domains & (__domain_mask)) + for_each_if((__power_well)->domains & (__domain_mask)) const char * intel_display_power_domain_str(enum intel_display_power_domain domain) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 42b813cf47db..b7aa13d6a33f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -11,70 +11,90 @@ #include "intel_display_power_map.h" #include "intel_display_power_well.h" -#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) +#define __LIST_INLINE_ELEMS(__elem_type, ...) \ + ((__elem_type[]) { __VA_ARGS__ }) + +#define __LIST(__elems) { \ + .list = __elems, \ + .count = ARRAY_SIZE(__elems), \ +} + +#define I915_PW_DOMAINS(...) \ + (const struct i915_power_domain_list) \ + __LIST(__LIST_INLINE_ELEMS(enum intel_display_power_domain, __VA_ARGS__)) + +#define I915_DECL_PW_DOMAINS(__name, ...) \ + static const struct i915_power_domain_list __name = I915_PW_DOMAINS(__VA_ARGS__) + +/* Zero-length list assigns all power domains, a NULL list assigns none. */ +#define I915_PW_DOMAINS_NONE NULL +#define I915_PW_DOMAINS_ALL /* zero-length list */ + + +I915_DECL_PW_DOMAINS(i9xx_pwdoms_always_on, I915_PW_DOMAINS_ALL); static const struct i915_power_well_desc i9xx_always_on_power_well[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, }; -#define I830_PIPES_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(i830_pwdoms_pipes, + POWER_DOMAIN_PIPE_A, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_A, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_TRANSCODER_A, + POWER_DOMAIN_TRANSCODER_B, + POWER_DOMAIN_INIT); static const struct i915_power_well_desc i830_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, { .name = "pipes", - .domains = I830_PIPES_POWER_DOMAINS, + .domain_list = &i830_pwdoms_pipes, .ops = &i830_pipes_power_well_ops, .id = DISP_PW_ID_NONE, }, }; -#define HSW_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(hsw_pwdoms_display, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_C, + POWER_DOMAIN_PIPE_PANEL_FITTER_A, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_C, + POWER_DOMAIN_TRANSCODER_A, + POWER_DOMAIN_TRANSCODER_B, + POWER_DOMAIN_TRANSCODER_C, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_PORT_DDI_LANES_D, + POWER_DOMAIN_PORT_CRT, /* DDI E */ + POWER_DOMAIN_VGA, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUDIO_PLAYBACK, + POWER_DOMAIN_INIT); static const struct i915_power_well_desc hsw_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, { .name = "display", - .domains = HSW_DISPLAY_POWER_DOMAINS, + .domain_list = &hsw_pwdoms_display, .ops = &hsw_power_well_ops, .has_vga = true, .id = HSW_DISP_PW_GLOBAL, @@ -84,33 +104,33 @@ static const struct i915_power_well_desc hsw_power_wells[] = { }, }; -#define BDW_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(bdw_pwdoms_display, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_C, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_C, + POWER_DOMAIN_TRANSCODER_A, + POWER_DOMAIN_TRANSCODER_B, + POWER_DOMAIN_TRANSCODER_C, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_PORT_DDI_LANES_D, + POWER_DOMAIN_PORT_CRT, /* DDI E */ + POWER_DOMAIN_VGA, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUDIO_PLAYBACK, + POWER_DOMAIN_INIT); static const struct i915_power_well_desc bdw_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, { .name = "display", - .domains = BDW_DISPLAY_POWER_DOMAINS, + .domain_list = &bdw_pwdoms_display, .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), @@ -121,64 +141,51 @@ static const struct i915_power_well_desc bdw_power_wells[] = { }, }; -#define VLV_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(vlv_pwdoms_display, + POWER_DOMAIN_DISPLAY_CORE, + POWER_DOMAIN_PIPE_A, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_A, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_TRANSCODER_A, + POWER_DOMAIN_TRANSCODER_B, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_PORT_DSI, + POWER_DOMAIN_PORT_CRT, + POWER_DOMAIN_VGA, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUDIO_PLAYBACK, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_GMBUS, + POWER_DOMAIN_INIT); -#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_cmn_bc, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_PORT_CRT, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_INIT); -#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_tx_bc_lanes, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_INIT); static const struct i915_power_well_desc vlv_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, { .name = "display", - .domains = VLV_DISPLAY_POWER_DOMAINS, + .domain_list = &vlv_pwdoms_display, .ops = &vlv_display_power_well_ops, .id = VLV_DISP_PW_DISP2D, { @@ -186,10 +193,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { }, }, { .name = "dpio-tx-b-01", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .domain_list = &vlv_pwdoms_dpio_tx_bc_lanes, .ops = &vlv_dpio_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -197,10 +201,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { }, }, { .name = "dpio-tx-b-23", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .domain_list = &vlv_pwdoms_dpio_tx_bc_lanes, .ops = &vlv_dpio_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -208,10 +209,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { }, }, { .name = "dpio-tx-c-01", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .domain_list = &vlv_pwdoms_dpio_tx_bc_lanes, .ops = &vlv_dpio_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -219,10 +217,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { }, }, { .name = "dpio-tx-c-23", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .domain_list = &vlv_pwdoms_dpio_tx_bc_lanes, .ops = &vlv_dpio_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -230,7 +225,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { }, }, { .name = "dpio-common", - .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, + .domain_list = &vlv_pwdoms_dpio_cmn_bc, .ops = &vlv_dpio_cmn_power_well_ops, .id = VLV_DISP_PW_DPIO_CMN_BC, { @@ -239,46 +234,46 @@ static const struct i915_power_well_desc vlv_power_wells[] = { }, }; -#define CHV_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D) | \ - BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(chv_pwdoms_display, + POWER_DOMAIN_DISPLAY_CORE, + POWER_DOMAIN_PIPE_A, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_C, + POWER_DOMAIN_PIPE_PANEL_FITTER_A, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_C, + POWER_DOMAIN_TRANSCODER_A, + POWER_DOMAIN_TRANSCODER_B, + POWER_DOMAIN_TRANSCODER_C, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_PORT_DDI_LANES_D, + POWER_DOMAIN_PORT_DSI, + POWER_DOMAIN_VGA, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUDIO_PLAYBACK, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_AUX_D, + POWER_DOMAIN_GMBUS, + POWER_DOMAIN_INIT); -#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_bc, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_INIT); -#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_d, + POWER_DOMAIN_PORT_DDI_LANES_D, + POWER_DOMAIN_AUX_D, + POWER_DOMAIN_INIT); static const struct i915_power_well_desc chv_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, @@ -289,12 +284,12 @@ static const struct i915_power_well_desc chv_power_wells[] = { * power wells don't actually exist. Pipe A power well is * required for any pipe to work. */ - .domains = CHV_DISPLAY_POWER_DOMAINS, + .domain_list = &chv_pwdoms_display, .ops = &chv_pipe_power_well_ops, .id = DISP_PW_ID_NONE, }, { .name = "dpio-common-bc", - .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, + .domain_list = &chv_pwdoms_dpio_cmn_bc, .ops = &chv_dpio_cmn_power_well_ops, .id = VLV_DISP_PW_DPIO_CMN_BC, { @@ -302,7 +297,7 @@ static const struct i915_power_well_desc chv_power_wells[] = { }, }, { .name = "dpio-common-d", - .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, + .domain_list = &chv_pwdoms_dpio_cmn_d, .ops = &chv_dpio_cmn_power_well_ops, .id = CHV_DISP_PW_DPIO_CMN_D, { @@ -311,61 +306,64 @@ static const struct i915_power_well_desc chv_power_wells[] = { }, }; -#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define SKL_PW_2_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ + POWER_DOMAIN_TRANSCODER_A, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_TRANSCODER_C, \ + POWER_DOMAIN_PORT_DDI_LANES_B, \ + POWER_DOMAIN_PORT_DDI_LANES_C, \ + POWER_DOMAIN_PORT_DDI_LANES_D, \ + POWER_DOMAIN_PORT_DDI_LANES_E, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_MMIO, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_B, \ + POWER_DOMAIN_AUX_C, \ + POWER_DOMAIN_AUX_D -#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(skl_pwdoms_pw_2, + SKL_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); -#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_A) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(skl_pwdoms_dc_off, + SKL_PW_2_POWER_DOMAINS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_GT_IRQ, + POWER_DOMAIN_INIT); -#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_a_e, + POWER_DOMAIN_PORT_DDI_IO_A, + POWER_DOMAIN_PORT_DDI_IO_E, + POWER_DOMAIN_INIT); -#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_b, + POWER_DOMAIN_PORT_DDI_IO_B, + POWER_DOMAIN_INIT); -#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_c, + POWER_DOMAIN_PORT_DDI_IO_C, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_d, + POWER_DOMAIN_PORT_DDI_IO_D, + POWER_DOMAIN_INIT); static const struct i915_power_well_desc skl_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, { .name = "PW_1", /* Handled by the DMC firmware */ - .domains = 0, + .domain_list = I915_PW_DOMAINS_NONE, .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, @@ -376,7 +374,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { }, { .name = "MISC_IO", /* Handled by the DMC firmware */ - .domains = 0, + .domain_list = I915_PW_DOMAINS_NONE, .ops = &hsw_power_well_ops, .always_on = true, .id = SKL_DISP_PW_MISC_IO, @@ -385,12 +383,12 @@ static const struct i915_power_well_desc skl_power_wells[] = { }, }, { .name = "DC_off", - .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, + .domain_list = &skl_pwdoms_dc_off, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { .name = "PW_2", - .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, + .domain_list = &skl_pwdoms_pw_2, .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), @@ -401,7 +399,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { }, }, { .name = "DDI_IO_A_E", - .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, + .domain_list = &skl_pwdoms_ddi_io_a_e, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -409,7 +407,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { }, }, { .name = "DDI_IO_B", - .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, + .domain_list = &skl_pwdoms_ddi_io_b, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -417,7 +415,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { }, }, { .name = "DDI_IO_C", - .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, + .domain_list = &skl_pwdoms_ddi_io_c, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -425,7 +423,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { }, }, { .name = "DDI_IO_D", - .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, + .domain_list = &skl_pwdoms_ddi_io_d, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -434,54 +432,57 @@ static const struct i915_power_well_desc skl_power_wells[] = { }, }; -#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define BXT_PW_2_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ + POWER_DOMAIN_TRANSCODER_A, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_TRANSCODER_C, \ + POWER_DOMAIN_PORT_DDI_LANES_B, \ + POWER_DOMAIN_PORT_DDI_LANES_C, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_MMIO, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_B, \ + POWER_DOMAIN_AUX_C -#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(bxt_pwdoms_pw_2, + BXT_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); -#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(bxt_pwdoms_dc_off, + BXT_PW_2_POWER_DOMAINS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_GMBUS, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_GT_IRQ, + POWER_DOMAIN_INIT); -#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_a, + POWER_DOMAIN_PORT_DDI_LANES_A, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_bc, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_INIT); static const struct i915_power_well_desc bxt_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, { .name = "PW_1", /* Handled by the DMC firmware */ - .domains = 0, + .domain_list = I915_PW_DOMAINS_NONE, .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, @@ -491,12 +492,12 @@ static const struct i915_power_well_desc bxt_power_wells[] = { }, }, { .name = "DC_off", - .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, + .domain_list = &bxt_pwdoms_dc_off, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { .name = "PW_2", - .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, + .domain_list = &bxt_pwdoms_pw_2, .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), @@ -507,7 +508,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = { }, }, { .name = "dpio-common-a", - .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, + .domain_list = &bxt_pwdoms_dpio_cmn_a, .ops = &bxt_dpio_cmn_power_well_ops, .id = BXT_DISP_PW_DPIO_CMN_A, { @@ -515,7 +516,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = { }, }, { .name = "dpio-common-bc", - .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, + .domain_list = &bxt_pwdoms_dpio_cmn_bc, .ops = &bxt_dpio_cmn_power_well_ops, .id = VLV_DISP_PW_DPIO_CMN_BC, { @@ -524,74 +525,77 @@ static const struct i915_power_well_desc bxt_power_wells[] = { }, }; -#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define GLK_PW_2_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ + POWER_DOMAIN_TRANSCODER_A, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_TRANSCODER_C, \ + POWER_DOMAIN_PORT_DDI_LANES_B, \ + POWER_DOMAIN_PORT_DDI_LANES_C, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_MMIO, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_B, \ + POWER_DOMAIN_AUX_C -#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(glk_pwdoms_pw_2, + GLK_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); -#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_A) -#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_B) -#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_C) +I915_DECL_PW_DOMAINS(glk_pwdoms_dc_off, + GLK_PW_2_POWER_DOMAINS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_GMBUS, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_GT_IRQ, + POWER_DOMAIN_INIT); -#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_a, POWER_DOMAIN_PORT_DDI_IO_A); +I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_b, POWER_DOMAIN_PORT_DDI_IO_B); +I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_c, POWER_DOMAIN_PORT_DDI_IO_C); -#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_a, + POWER_DOMAIN_PORT_DDI_LANES_A, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_INIT); -#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_b, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_INIT); -#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_c, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_INIT); -#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(glk_pwdoms_aux_a, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_IO_A, + POWER_DOMAIN_INIT); -#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(glk_pwdoms_aux_b, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(glk_pwdoms_aux_c, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_INIT); static const struct i915_power_well_desc glk_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, { .name = "PW_1", /* Handled by the DMC firmware */ - .domains = 0, + .domain_list = I915_PW_DOMAINS_NONE, .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, @@ -601,12 +605,12 @@ static const struct i915_power_well_desc glk_power_wells[] = { }, }, { .name = "DC_off", - .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, + .domain_list = &glk_pwdoms_dc_off, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { .name = "PW_2", - .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, + .domain_list = &glk_pwdoms_pw_2, .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), @@ -617,7 +621,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { }, }, { .name = "dpio-common-a", - .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, + .domain_list = &glk_pwdoms_dpio_cmn_a, .ops = &bxt_dpio_cmn_power_well_ops, .id = BXT_DISP_PW_DPIO_CMN_A, { @@ -625,7 +629,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { }, }, { .name = "dpio-common-b", - .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, + .domain_list = &glk_pwdoms_dpio_cmn_b, .ops = &bxt_dpio_cmn_power_well_ops, .id = VLV_DISP_PW_DPIO_CMN_BC, { @@ -633,7 +637,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { }, }, { .name = "dpio-common-c", - .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, + .domain_list = &glk_pwdoms_dpio_cmn_c, .ops = &bxt_dpio_cmn_power_well_ops, .id = GLK_DISP_PW_DPIO_CMN_C, { @@ -641,7 +645,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { }, }, { .name = "AUX_A", - .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, + .domain_list = &glk_pwdoms_aux_a, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -649,7 +653,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { }, }, { .name = "AUX_B", - .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, + .domain_list = &glk_pwdoms_aux_b, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -657,7 +661,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { }, }, { .name = "AUX_C", - .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, + .domain_list = &glk_pwdoms_aux_c, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -665,7 +669,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { }, }, { .name = "DDI_IO_A", - .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, + .domain_list = &glk_pwdoms_ddi_io_a, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -673,7 +677,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { }, }, { .name = "DDI_IO_B", - .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, + .domain_list = &glk_pwdoms_ddi_io_b, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -681,7 +685,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { }, }, { .name = "DDI_IO_C", - .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, + .domain_list = &glk_pwdoms_ddi_io_c, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -704,89 +708,97 @@ static const struct i915_power_well_desc glk_power_wells[] = { * - DDI_A * - FBC */ -#define ICL_PW_4_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define ICL_PW_4_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C + +I915_DECL_PW_DOMAINS(icl_pwdoms_pw_4, + ICL_PW_4_POWER_DOMAINS, + POWER_DOMAIN_INIT); /* VDSC/joining */ -#define ICL_PW_3_POWER_DOMAINS ( \ - ICL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_F) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_AUX_E) | \ - BIT_ULL(POWER_DOMAIN_AUX_F) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT_D) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT_E) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT_F) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define ICL_PW_3_POWER_DOMAINS \ + ICL_PW_4_POWER_DOMAINS, \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_TRANSCODER_A, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_TRANSCODER_C, \ + POWER_DOMAIN_PORT_DDI_LANES_B, \ + POWER_DOMAIN_PORT_DDI_LANES_C, \ + POWER_DOMAIN_PORT_DDI_LANES_D, \ + POWER_DOMAIN_PORT_DDI_LANES_E, \ + POWER_DOMAIN_PORT_DDI_LANES_F, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_MMIO, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_B, \ + POWER_DOMAIN_AUX_C, \ + POWER_DOMAIN_AUX_D, \ + POWER_DOMAIN_AUX_E, \ + POWER_DOMAIN_AUX_F, \ + POWER_DOMAIN_AUX_TBT_C, \ + POWER_DOMAIN_AUX_TBT_D, \ + POWER_DOMAIN_AUX_TBT_E, \ + POWER_DOMAIN_AUX_TBT_F + +I915_DECL_PW_DOMAINS(icl_pwdoms_pw_3, + ICL_PW_3_POWER_DOMAINS, + POWER_DOMAIN_INIT); /* * - transcoder WD * - KVMR (HW control) */ -#define ICL_PW_2_POWER_DOMAINS ( \ - ICL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define ICL_PW_2_POWER_DOMAINS \ + ICL_PW_3_POWER_DOMAINS, \ + POWER_DOMAIN_TRANSCODER_VDSC_PW2 + +I915_DECL_PW_DOMAINS(icl_pwdoms_pw_2, + ICL_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); /* * - KVMR (HW control) */ -#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - ICL_PW_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_DC_OFF) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(icl_pwdoms_dc_off, + ICL_PW_2_POWER_DOMAINS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_DC_OFF, + POWER_DOMAIN_INIT); -#define ICL_DDI_IO_A_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_A) -#define ICL_DDI_IO_B_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_B) -#define ICL_DDI_IO_C_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_C) -#define ICL_DDI_IO_D_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D) -#define ICL_DDI_IO_E_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E) -#define ICL_DDI_IO_F_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_F) +I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_a, POWER_DOMAIN_PORT_DDI_IO_A); +I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_b, POWER_DOMAIN_PORT_DDI_IO_B); +I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_c, POWER_DOMAIN_PORT_DDI_IO_C); +I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_d, POWER_DOMAIN_PORT_DDI_IO_D); +I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_e, POWER_DOMAIN_PORT_DDI_IO_E); +I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_f, POWER_DOMAIN_PORT_DDI_IO_F); -#define ICL_AUX_A_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_IO_A)) - -#define ICL_AUX_B_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_B) -#define ICL_AUX_C_TC1_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_C) -#define ICL_AUX_D_TC2_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D) -#define ICL_AUX_E_TC3_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E) -#define ICL_AUX_F_TC4_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_F) -#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT_C) -#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT_D) -#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT_E) -#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT_F) +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_a, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_IO_A); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_b, POWER_DOMAIN_AUX_B); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_c, POWER_DOMAIN_AUX_C); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_d, POWER_DOMAIN_AUX_D); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_e, POWER_DOMAIN_AUX_E); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_f, POWER_DOMAIN_AUX_F); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt1, POWER_DOMAIN_AUX_TBT_C); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT_D); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT_E); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT_F); static const struct i915_power_well_desc icl_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, { .name = "PW_1", /* Handled by the DMC firmware */ - .domains = 0, + .domain_list = I915_PW_DOMAINS_NONE, .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, @@ -796,12 +808,12 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "DC_off", - .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, + .domain_list = &icl_pwdoms_dc_off, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { .name = "PW_2", - .domains = ICL_PW_2_POWER_DOMAINS, + .domain_list = &icl_pwdoms_pw_2, .ops = &hsw_power_well_ops, .has_fuses = true, .id = SKL_DISP_PW_2, @@ -810,7 +822,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "PW_3", - .domains = ICL_PW_3_POWER_DOMAINS, + .domain_list = &icl_pwdoms_pw_3, .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B), @@ -821,7 +833,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "DDI_IO_A", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_a, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -829,7 +841,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "DDI_IO_B", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_b, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -837,7 +849,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "DDI_IO_C", - .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_c, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -845,7 +857,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "DDI_IO_D", - .domains = ICL_DDI_IO_D_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_d, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -853,7 +865,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "DDI_IO_E", - .domains = ICL_DDI_IO_E_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_e, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -861,7 +873,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "DDI_IO_F", - .domains = ICL_DDI_IO_F_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_f, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -869,7 +881,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "AUX_A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_a, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -877,7 +889,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "AUX_B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_b, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -885,7 +897,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "AUX_C", - .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_c, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, .id = DISP_PW_ID_NONE, @@ -894,7 +906,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "AUX_D", - .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_d, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, .id = DISP_PW_ID_NONE, @@ -903,7 +915,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "AUX_E", - .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_e, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, .id = DISP_PW_ID_NONE, @@ -912,7 +924,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "AUX_F", - .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_f, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, .id = DISP_PW_ID_NONE, @@ -921,7 +933,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "AUX_TBT1", - .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_tbt1, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -930,7 +942,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "AUX_TBT2", - .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_tbt2, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -939,7 +951,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "AUX_TBT3", - .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_tbt3, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -948,7 +960,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "AUX_TBT4", - .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_tbt4, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -957,7 +969,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { .name = "PW_4", - .domains = ICL_PW_4_POWER_DOMAINS, + .domain_list = &icl_pwdoms_pw_4, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_C), .has_fuses = true, @@ -968,113 +980,122 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }; -#define TGL_PW_5_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_D) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_D) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define TGL_PW_5_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_D, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_D, \ + POWER_DOMAIN_TRANSCODER_D -#define TGL_PW_4_POWER_DOMAINS ( \ - TGL_PW_5_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_5, + TGL_PW_5_POWER_DOMAINS, + POWER_DOMAIN_INIT); -#define TGL_PW_3_POWER_DOMAINS ( \ - TGL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define TGL_PW_4_POWER_DOMAINS \ + TGL_PW_5_POWER_DOMAINS, \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ + POWER_DOMAIN_TRANSCODER_C -#define TGL_PW_2_POWER_DOMAINS ( \ - TGL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_4, + TGL_PW_4_POWER_DOMAINS, + POWER_DOMAIN_INIT); -#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - TGL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define TGL_PW_3_POWER_DOMAINS \ + TGL_PW_4_POWER_DOMAINS, \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_PORT_DDI_LANES_TC1, \ + POWER_DOMAIN_PORT_DDI_LANES_TC2, \ + POWER_DOMAIN_PORT_DDI_LANES_TC3, \ + POWER_DOMAIN_PORT_DDI_LANES_TC4, \ + POWER_DOMAIN_PORT_DDI_LANES_TC5, \ + POWER_DOMAIN_PORT_DDI_LANES_TC6, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_MMIO, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_USBC1, \ + POWER_DOMAIN_AUX_USBC2, \ + POWER_DOMAIN_AUX_USBC3, \ + POWER_DOMAIN_AUX_USBC4, \ + POWER_DOMAIN_AUX_USBC5, \ + POWER_DOMAIN_AUX_USBC6, \ + POWER_DOMAIN_AUX_TBT1, \ + POWER_DOMAIN_AUX_TBT2, \ + POWER_DOMAIN_AUX_TBT3, \ + POWER_DOMAIN_AUX_TBT4, \ + POWER_DOMAIN_AUX_TBT5, \ + POWER_DOMAIN_AUX_TBT6 -#define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) -#define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) -#define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) -#define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) -#define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5) -#define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6) +I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_3, + TGL_PW_3_POWER_DOMAINS, + POWER_DOMAIN_INIT); -#define TGL_AUX_A_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_IO_A)) -#define TGL_AUX_B_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_B) -#define TGL_AUX_C_IO_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_C) +I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_2, + TGL_PW_3_POWER_DOMAINS, + POWER_DOMAIN_TRANSCODER_VDSC_PW2, + POWER_DOMAIN_INIT); -#define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) -#define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) -#define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) -#define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) -#define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5) -#define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6) +I915_DECL_PW_DOMAINS(tgl_pwdoms_dc_off, + TGL_PW_3_POWER_DOMAINS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_INIT); -#define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) -#define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) -#define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) -#define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) -#define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5) -#define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6) +I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc1, POWER_DOMAIN_PORT_DDI_IO_TC1); +I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc2, POWER_DOMAIN_PORT_DDI_IO_TC2); +I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc3, POWER_DOMAIN_PORT_DDI_IO_TC3); +I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc4, POWER_DOMAIN_PORT_DDI_IO_TC4); +I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc5, POWER_DOMAIN_PORT_DDI_IO_TC5); +I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc6, POWER_DOMAIN_PORT_DDI_IO_TC6); -#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ - BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_a, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_IO_A); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_b, POWER_DOMAIN_AUX_B); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_c, POWER_DOMAIN_AUX_C); + +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc1, POWER_DOMAIN_AUX_USBC1); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc2, POWER_DOMAIN_AUX_USBC2); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc3, POWER_DOMAIN_AUX_USBC3); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc4, POWER_DOMAIN_AUX_USBC4); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc5, POWER_DOMAIN_AUX_USBC5); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc6, POWER_DOMAIN_AUX_USBC6); + +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt1, POWER_DOMAIN_AUX_TBT1); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT2); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT3); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT4); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt5, POWER_DOMAIN_AUX_TBT5); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt6, POWER_DOMAIN_AUX_TBT6); + +I915_DECL_PW_DOMAINS(tgl_pwdoms_tc_cold_off, + POWER_DOMAIN_AUX_USBC1, + POWER_DOMAIN_AUX_USBC2, + POWER_DOMAIN_AUX_USBC3, + POWER_DOMAIN_AUX_USBC4, + POWER_DOMAIN_AUX_USBC5, + POWER_DOMAIN_AUX_USBC6, + POWER_DOMAIN_AUX_TBT1, + POWER_DOMAIN_AUX_TBT2, + POWER_DOMAIN_AUX_TBT3, + POWER_DOMAIN_AUX_TBT4, + POWER_DOMAIN_AUX_TBT5, + POWER_DOMAIN_AUX_TBT6, + POWER_DOMAIN_TC_COLD_OFF); static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, { .name = "PW_1", /* Handled by the DMC firmware */ - .domains = 0, + .domain_list = I915_PW_DOMAINS_NONE, .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, @@ -1084,12 +1105,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "DC_off", - .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_dc_off, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { .name = "PW_2", - .domains = TGL_PW_2_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_pw_2, .ops = &hsw_power_well_ops, .has_fuses = true, .id = SKL_DISP_PW_2, @@ -1098,7 +1119,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "PW_3", - .domains = TGL_PW_3_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_pw_3, .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B), @@ -1109,7 +1130,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "DDI_IO_A", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_a, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1117,7 +1138,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { } }, { .name = "DDI_IO_B", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_b, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1125,7 +1146,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { } }, { .name = "DDI_IO_C", - .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_c, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1133,7 +1154,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { } }, { .name = "DDI_IO_TC1", - .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_ddi_io_tc1, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1141,7 +1162,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "DDI_IO_TC2", - .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_ddi_io_tc2, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1149,7 +1170,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "DDI_IO_TC3", - .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_ddi_io_tc3, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1157,7 +1178,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "DDI_IO_TC4", - .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_ddi_io_tc4, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1165,7 +1186,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "DDI_IO_TC5", - .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_ddi_io_tc5, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1173,7 +1194,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "DDI_IO_TC6", - .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_ddi_io_tc6, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1181,12 +1202,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "TC_cold_off", - .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_tc_cold_off, .ops = &tgl_tc_cold_off_ops, .id = TGL_DISP_PW_TC_COLD_OFF, }, { .name = "AUX_A", - .domains = TGL_AUX_A_IO_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_a, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1194,7 +1215,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_B", - .domains = TGL_AUX_B_IO_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_b, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1202,7 +1223,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_C", - .domains = TGL_AUX_C_IO_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_c, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1210,7 +1231,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_USBC1", - .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_usbc1, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, .id = DISP_PW_ID_NONE, @@ -1219,7 +1240,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_USBC2", - .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_usbc2, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, .id = DISP_PW_ID_NONE, @@ -1228,7 +1249,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_USBC3", - .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_usbc3, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, .id = DISP_PW_ID_NONE, @@ -1237,7 +1258,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_USBC4", - .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_usbc4, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, .id = DISP_PW_ID_NONE, @@ -1246,7 +1267,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_USBC5", - .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_usbc5, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, .id = DISP_PW_ID_NONE, @@ -1255,7 +1276,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_USBC6", - .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_usbc6, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, .id = DISP_PW_ID_NONE, @@ -1264,7 +1285,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_TBT1", - .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_tbt1, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -1273,7 +1294,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_TBT2", - .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_tbt2, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -1282,7 +1303,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_TBT3", - .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_tbt3, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -1291,7 +1312,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_TBT4", - .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_tbt4, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -1300,7 +1321,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_TBT5", - .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_tbt5, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -1309,7 +1330,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "AUX_TBT6", - .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_tbt6, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -1318,7 +1339,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { .name = "PW_4", - .domains = TGL_PW_4_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_pw_4, .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_C), @@ -1328,7 +1349,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { } }, { .name = "PW_5", - .domains = TGL_PW_5_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_pw_5, .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_D), @@ -1339,25 +1360,31 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }; -#define RKL_PW_4_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define RKL_PW_4_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ + POWER_DOMAIN_TRANSCODER_C -#define RKL_PW_3_POWER_DOMAINS ( \ - RKL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(rkl_pwdoms_pw_4, + RKL_PW_4_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +#define RKL_PW_3_POWER_DOMAINS \ + RKL_PW_4_POWER_DOMAINS, \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_PORT_DDI_LANES_TC1, \ + POWER_DOMAIN_PORT_DDI_LANES_TC2, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_MMIO, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_USBC1, \ + POWER_DOMAIN_AUX_USBC2 + +I915_DECL_PW_DOMAINS(rkl_pwdoms_pw_3, + RKL_PW_3_POWER_DOMAINS, + POWER_DOMAIN_INIT); /* * There is no PW_2/PG_2 on RKL. @@ -1380,24 +1407,24 @@ static const struct i915_power_well_desc tgl_power_wells[] = { * - top-level GTC (DDI-level GTC is in the well associated with the DDI) */ -#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - RKL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(rkl_pwdoms_dc_off, + RKL_PW_3_POWER_DOMAINS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_INIT); static const struct i915_power_well_desc rkl_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, { .name = "PW_1", /* Handled by the DMC firmware */ - .domains = 0, + .domain_list = I915_PW_DOMAINS_NONE, .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, @@ -1407,12 +1434,12 @@ static const struct i915_power_well_desc rkl_power_wells[] = { }, }, { .name = "DC_off", - .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, + .domain_list = &rkl_pwdoms_dc_off, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { .name = "PW_3", - .domains = RKL_PW_3_POWER_DOMAINS, + .domain_list = &rkl_pwdoms_pw_3, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), .has_vga = true, @@ -1423,7 +1450,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { }, }, { .name = "PW_4", - .domains = RKL_PW_4_POWER_DOMAINS, + .domain_list = &rkl_pwdoms_pw_4, .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_C), @@ -1433,7 +1460,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { } }, { .name = "DDI_IO_A", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_a, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1441,7 +1468,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { } }, { .name = "DDI_IO_B", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_b, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1449,7 +1476,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { } }, { .name = "DDI_IO_TC1", - .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_ddi_io_tc1, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1457,7 +1484,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { }, }, { .name = "DDI_IO_TC2", - .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_ddi_io_tc2, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1465,7 +1492,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { }, }, { .name = "AUX_A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_a, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1473,7 +1500,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { }, }, { .name = "AUX_B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_b, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1481,7 +1508,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { }, }, { .name = "AUX_USBC1", - .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_usbc1, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1489,7 +1516,7 @@ static const struct i915_power_well_desc rkl_power_wells[] = { }, }, { .name = "AUX_USBC2", - .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_usbc2, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1501,43 +1528,46 @@ static const struct i915_power_well_desc rkl_power_wells[] = { /* * DG1 onwards Audio MMIO/VERBS lies in PG0 power well. */ -#define DG1_PW_3_POWER_DOMAINS ( \ - TGL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define DG1_PW_3_POWER_DOMAINS \ + TGL_PW_4_POWER_DOMAINS, \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_PORT_DDI_LANES_TC1, \ + POWER_DOMAIN_PORT_DDI_LANES_TC2, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_USBC1, \ + POWER_DOMAIN_AUX_USBC2 -#define DG1_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - DG1_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_3, + DG1_PW_3_POWER_DOMAINS, + POWER_DOMAIN_INIT); -#define DG1_PW_2_POWER_DOMAINS ( \ - DG1_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(dg1_pwdoms_dc_off, + DG1_PW_3_POWER_DOMAINS, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_2, + DG1_PW_3_POWER_DOMAINS, + POWER_DOMAIN_TRANSCODER_VDSC_PW2, + POWER_DOMAIN_INIT); static const struct i915_power_well_desc dg1_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, { .name = "PW_1", /* Handled by the DMC firmware */ - .domains = 0, + .domain_list = I915_PW_DOMAINS_NONE, .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, @@ -1547,12 +1577,12 @@ static const struct i915_power_well_desc dg1_power_wells[] = { }, }, { .name = "DC_off", - .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS, + .domain_list = &dg1_pwdoms_dc_off, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { .name = "PW_2", - .domains = DG1_PW_2_POWER_DOMAINS, + .domain_list = &dg1_pwdoms_pw_2, .ops = &hsw_power_well_ops, .has_fuses = true, .id = SKL_DISP_PW_2, @@ -1561,7 +1591,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { }, }, { .name = "PW_3", - .domains = DG1_PW_3_POWER_DOMAINS, + .domain_list = &dg1_pwdoms_pw_3, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), .has_vga = true, @@ -1572,7 +1602,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { }, }, { .name = "DDI_IO_A", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_a, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1580,7 +1610,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { } }, { .name = "DDI_IO_B", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_b, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1588,7 +1618,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { } }, { .name = "DDI_IO_TC1", - .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_ddi_io_tc1, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1596,7 +1626,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { }, }, { .name = "DDI_IO_TC2", - .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_ddi_io_tc2, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1604,7 +1634,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { }, }, { .name = "AUX_A", - .domains = TGL_AUX_A_IO_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_a, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1612,7 +1642,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { }, }, { .name = "AUX_B", - .domains = TGL_AUX_B_IO_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_b, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1620,7 +1650,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { }, }, { .name = "AUX_USBC1", - .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_usbc1, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, .id = DISP_PW_ID_NONE, @@ -1629,7 +1659,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { }, }, { .name = "AUX_USBC2", - .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_usbc2, .ops = &icl_aux_power_well_ops, .is_tc_tbt = false, .id = DISP_PW_ID_NONE, @@ -1638,7 +1668,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { }, }, { .name = "PW_4", - .domains = TGL_PW_4_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_pw_4, .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_C), @@ -1648,7 +1678,7 @@ static const struct i915_power_well_desc dg1_power_wells[] = { } }, { .name = "PW_5", - .domains = TGL_PW_5_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_pw_5, .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_D), @@ -1677,54 +1707,66 @@ static const struct i915_power_well_desc dg1_power_wells[] = { * to top. This allows pipes to be power gated independently. */ -#define XELPD_PW_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_D) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_D) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define XELPD_PW_D_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_D, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_D, \ + POWER_DOMAIN_TRANSCODER_D -#define XELPD_PW_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_d, + XELPD_PW_D_POWER_DOMAINS, + POWER_DOMAIN_INIT); -#define XELPD_PW_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define XELPD_PW_C_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ + POWER_DOMAIN_TRANSCODER_C -#define XELPD_PW_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_c, + XELPD_PW_C_POWER_DOMAINS, + POWER_DOMAIN_INIT); -#define XELPD_PW_2_POWER_DOMAINS ( \ - XELPD_PW_B_POWER_DOMAINS | \ - XELPD_PW_C_POWER_DOMAINS | \ - XELPD_PW_D_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \ - BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +#define XELPD_PW_B_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_TRANSCODER_B + +I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_b, + XELPD_PW_B_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a, + POWER_DOMAIN_PIPE_A, + POWER_DOMAIN_PIPE_PANEL_FITTER_A, + POWER_DOMAIN_INIT); + +#define XELPD_PW_2_POWER_DOMAINS \ + XELPD_PW_B_POWER_DOMAINS, \ + XELPD_PW_C_POWER_DOMAINS, \ + XELPD_PW_D_POWER_DOMAINS, \ + POWER_DOMAIN_PORT_DDI_LANES_C, \ + POWER_DOMAIN_PORT_DDI_LANES_D_XELPD, \ + POWER_DOMAIN_PORT_DDI_LANES_E_XELPD, \ + POWER_DOMAIN_PORT_DDI_LANES_TC1, \ + POWER_DOMAIN_PORT_DDI_LANES_TC2, \ + POWER_DOMAIN_PORT_DDI_LANES_TC3, \ + POWER_DOMAIN_PORT_DDI_LANES_TC4, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_C, \ + POWER_DOMAIN_AUX_D_XELPD, \ + POWER_DOMAIN_AUX_E_XELPD, \ + POWER_DOMAIN_AUX_USBC1, \ + POWER_DOMAIN_AUX_USBC2, \ + POWER_DOMAIN_AUX_USBC3, \ + POWER_DOMAIN_AUX_USBC4, \ + POWER_DOMAIN_AUX_TBT1, \ + POWER_DOMAIN_AUX_TBT2, \ + POWER_DOMAIN_AUX_TBT3, \ + POWER_DOMAIN_AUX_TBT4 + +I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2, + XELPD_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); /* * XELPD PW_1/PG_1 domains (under HW/DMC control): @@ -1743,45 +1785,46 @@ static const struct i915_power_well_desc dg1_power_wells[] = { * - Top-level GTC (DDI-level GTC is in the well associated with the DDI) */ -#define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - XELPD_PW_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_INIT)) +I915_DECL_PW_DOMAINS(xelpd_pwdoms_dc_off, + XELPD_PW_2_POWER_DOMAINS, + POWER_DOMAIN_PORT_DSI, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_INIT); -#define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) -#define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) -#define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) -#define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) -#define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) -#define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) +I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_d_xelpd, POWER_DOMAIN_AUX_D_XELPD); +I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_e_xelpd, POWER_DOMAIN_AUX_E_XELPD); -#define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) -#define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) -#define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) -#define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) +I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_usbc1, POWER_DOMAIN_AUX_USBC1); +I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_usbc2, POWER_DOMAIN_AUX_USBC2); +I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_usbc3, POWER_DOMAIN_AUX_USBC3); +I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_usbc4, POWER_DOMAIN_AUX_USBC4); -#define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD) -#define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD) -#define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) -#define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) -#define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) -#define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) +I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_tbt1, POWER_DOMAIN_AUX_TBT1); +I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT2); +I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT3); +I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT4); + +I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_d_xelpd, POWER_DOMAIN_PORT_DDI_IO_D_XELPD); +I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_e_xelpd, POWER_DOMAIN_PORT_DDI_IO_E_XELPD); +I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_tc1, POWER_DOMAIN_PORT_DDI_IO_TC1); +I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_tc2, POWER_DOMAIN_PORT_DDI_IO_TC2); +I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_tc3, POWER_DOMAIN_PORT_DDI_IO_TC3); +I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_tc4, POWER_DOMAIN_PORT_DDI_IO_TC4); static const struct i915_power_well_desc xelpd_power_wells[] = { { .name = "always-on", - .domains = POWER_DOMAIN_MASK, + .domain_list = &i9xx_pwdoms_always_on, .ops = &i9xx_always_on_power_well_ops, .always_on = true, .id = DISP_PW_ID_NONE, }, { .name = "PW_1", /* Handled by the DMC firmware */ - .domains = 0, + .domain_list = I915_PW_DOMAINS_NONE, .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, @@ -1791,12 +1834,12 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "DC_off", - .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_dc_off, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_DC_OFF, }, { .name = "PW_2", - .domains = XELPD_PW_2_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_pw_2, .ops = &hsw_power_well_ops, .has_vga = true, .has_fuses = true, @@ -1806,7 +1849,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "PW_A", - .domains = XELPD_PW_A_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_pw_a, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_A), .has_fuses = true, @@ -1816,7 +1859,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "PW_B", - .domains = XELPD_PW_B_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_pw_b, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), .has_fuses = true, @@ -1826,7 +1869,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "PW_C", - .domains = XELPD_PW_C_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_pw_c, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_C), .has_fuses = true, @@ -1836,7 +1879,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "PW_D", - .domains = XELPD_PW_D_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_pw_d, .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_D), .has_fuses = true, @@ -1846,7 +1889,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "DDI_IO_A", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_a, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1854,7 +1897,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { } }, { .name = "DDI_IO_B", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_b, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1862,7 +1905,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { } }, { .name = "DDI_IO_C", - .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .domain_list = &icl_pwdoms_ddi_io_c, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1870,7 +1913,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { } }, { .name = "DDI_IO_D_XELPD", - .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_ddi_io_d_xelpd, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1878,7 +1921,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { } }, { .name = "DDI_IO_E_XELPD", - .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_ddi_io_e_xelpd, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1886,7 +1929,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { } }, { .name = "DDI_IO_TC1", - .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_ddi_io_tc1, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1894,7 +1937,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { } }, { .name = "DDI_IO_TC2", - .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_ddi_io_tc2, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1902,7 +1945,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { } }, { .name = "DDI_IO_TC3", - .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_ddi_io_tc3, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1910,7 +1953,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { } }, { .name = "DDI_IO_TC4", - .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_ddi_io_tc4, .ops = &icl_ddi_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1918,7 +1961,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { } }, { .name = "AUX_A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_a, .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, .id = DISP_PW_ID_NONE, @@ -1927,7 +1970,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "AUX_B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .domain_list = &icl_pwdoms_aux_b, .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, .id = DISP_PW_ID_NONE, @@ -1936,7 +1979,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "AUX_C", - .domains = TGL_AUX_C_IO_POWER_DOMAINS, + .domain_list = &tgl_pwdoms_aux_c, .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, .id = DISP_PW_ID_NONE, @@ -1945,7 +1988,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "AUX_D_XELPD", - .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_aux_d_xelpd, .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, .id = DISP_PW_ID_NONE, @@ -1954,7 +1997,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "AUX_E_XELPD", - .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_aux_e_xelpd, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1962,7 +2005,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "AUX_USBC1", - .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_aux_usbc1, .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, .id = DISP_PW_ID_NONE, @@ -1971,7 +2014,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "AUX_USBC2", - .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_aux_usbc2, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1979,7 +2022,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "AUX_USBC3", - .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_aux_usbc3, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1987,7 +2030,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "AUX_USBC4", - .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_aux_usbc4, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -1995,7 +2038,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "AUX_TBT1", - .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_aux_tbt1, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -2004,7 +2047,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "AUX_TBT2", - .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_aux_tbt2, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -2013,7 +2056,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "AUX_TBT3", - .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_aux_tbt3, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -2022,7 +2065,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }, { .name = "AUX_TBT4", - .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS, + .domain_list = &xelpd_pwdoms_aux_tbt4, .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, .id = DISP_PW_ID_NONE, @@ -2032,6 +2075,24 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }; +static void init_power_well_domains(const struct i915_power_well_desc *desc, + struct i915_power_well *power_well) +{ + int j; + + if (!desc->domain_list) + return; + + if (desc->domain_list->count == 0) { + power_well->domains = GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0); + + return; + } + + for (j = 0; j < desc->domain_list->count; j++) + power_well->domains |= BIT_ULL(desc->domain_list->list[j]); +} + static int __set_power_wells(struct i915_power_domains *power_domains, const struct i915_power_well_desc *power_well_descs, @@ -2062,9 +2123,13 @@ __set_power_wells(struct i915_power_domains *power_domains, if (BIT_ULL(id) & skip_mask) continue; - power_domains->power_wells[plt_idx++].desc = + power_domains->power_wells[plt_idx].desc = &power_well_descs[i]; + init_power_well_domains(&power_well_descs[i], &power_domains->power_wells[plt_idx]); + + plt_idx++; + if (id == DISP_PW_ID_NONE) continue; diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index b95bc32d6da0..778a1bd4ab14 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -156,7 +156,7 @@ const char *intel_power_well_name(struct i915_power_well *power_well) u64 intel_power_well_domains(struct i915_power_well *power_well) { - return power_well->desc->domains; + return power_well->domains; } int intel_power_well_refcount(struct i915_power_well *power_well) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index 26fe9e1048bc..0926b858d715 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -50,7 +50,10 @@ enum i915_power_well_id { struct i915_power_well_desc { const char *name; - u64 domains; + const struct i915_power_domain_list { + const enum intel_display_power_domain *list; + u8 count; + } *domain_list; /* Mask of pipes whose IRQ logic is backed by the pw */ u16 irq_pipe_mask:4; u16 always_on:1; @@ -99,6 +102,7 @@ struct i915_power_well_desc { struct i915_power_well { const struct i915_power_well_desc *desc; + u64 domains; /* power well enable/disable usage count */ int count; /* cached hw enabled state */ From 888a2a6312a14ea6f8f50ec886601db2c43179df Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:48 +0300 Subject: [PATCH 123/219] drm/i915: Convert the u64 power well domains mask to a bitmap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To remove the aliasing of the power domain enum values in a follow-up patch in this patchset (requiring a bigger mask) and allow for defining additional power domains in the future (at least some upcoming TypeC changes requires this) convert the u64 i915_power_well_desc::domains mask to a bitmap. For simplicity I changed the for_each_power_domain_well() macros to accept one domain only instead of a mask, as there isn't any current user passing multiple domains. v2: Don't add a typedef for the bitmap struct. (Jani) Cc: Jani Nikula Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-9-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 65 ++++++----- .../drm/i915/display/intel_display_power.c | 108 +++++++++++------- .../drm/i915/display/intel_display_power.h | 18 +-- .../i915/display/intel_display_power_map.c | 4 +- .../i915/display/intel_display_power_well.c | 4 +- .../i915/display/intel_display_power_well.h | 5 +- 6 files changed, 116 insertions(+), 88 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 2b4c9a62613a..9cd35f4a25df 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -2244,66 +2244,71 @@ intel_legacy_aux_to_power_domain(enum aux_ch aux_ch) } } -static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) +static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, + struct intel_power_domain_mask *mask) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct drm_encoder *encoder; enum pipe pipe = crtc->pipe; - u64 mask; + + bitmap_zero(mask->bits, POWER_DOMAIN_NUM); if (!crtc_state->hw.active) - return 0; + return; - mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); - mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder)); + set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); + set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); if (crtc_state->pch_pfit.enabled || crtc_state->pch_pfit.force_thru) - mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); + set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); drm_for_each_encoder_mask(encoder, &dev_priv->drm, crtc_state->uapi.encoder_mask) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - mask |= BIT_ULL(intel_encoder->power_domain); + set_bit(intel_encoder->power_domain, mask->bits); } if (HAS_DDI(dev_priv) && crtc_state->has_audio) - mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO); + set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); if (crtc_state->shared_dpll) - mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); + set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); if (crtc_state->dsc.compression_enable) - mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder)); - - return mask; + set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); } -static u64 -modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) +static void +modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, + struct intel_power_domain_mask *old_domains) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain domain; - u64 domains, new_domains, old_domains; + struct intel_power_domain_mask domains, new_domains; - domains = get_crtc_power_domains(crtc_state); + get_crtc_power_domains(crtc_state, &domains); - new_domains = domains & ~crtc->enabled_power_domains.mask; - old_domains = crtc->enabled_power_domains.mask & ~domains; + bitmap_andnot(new_domains.bits, + domains.bits, + crtc->enabled_power_domains.mask.bits, + POWER_DOMAIN_NUM); + bitmap_andnot(old_domains->bits, + crtc->enabled_power_domains.mask.bits, + domains.bits, + POWER_DOMAIN_NUM); - for_each_power_domain(domain, new_domains) + for_each_power_domain(domain, &new_domains) intel_display_power_get_in_set(dev_priv, &crtc->enabled_power_domains, domain); - - return old_domains; } static void modeset_put_crtc_power_domains(struct intel_crtc *crtc, - u64 domains) + struct intel_power_domain_mask *domains) { intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), &crtc->enabled_power_domains, @@ -8505,7 +8510,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; - u64 put_domains[I915_MAX_PIPES] = {}; + struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; intel_wakeref_t wakeref = 0; int i; @@ -8522,9 +8527,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) new_crtc_state, i) { if (intel_crtc_needs_modeset(new_crtc_state) || new_crtc_state->update_pipe) { - - put_domains[crtc->pipe] = - modeset_get_crtc_power_domains(new_crtc_state); + modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); } } @@ -8624,7 +8627,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { intel_post_plane_update(state, crtc); - modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]); + modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); @@ -10465,11 +10468,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev, for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - u64 put_domains; + struct intel_power_domain_mask put_domains; - put_domains = modeset_get_crtc_power_domains(crtc_state); - if (drm_WARN_ON(dev, put_domains)) - modeset_put_crtc_power_domains(crtc, put_domains); + modeset_get_crtc_power_domains(crtc_state, &put_domains); + if (drm_WARN_ON(dev, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM))) + modeset_put_crtc_power_domains(crtc, &put_domains); } intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 791b4e9f9834..69f66da00705 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -22,13 +22,13 @@ #include "intel_snps_phy.h" #include "vlv_sideband.h" -#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \ +#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \ for_each_power_well(__dev_priv, __power_well) \ - for_each_if((__power_well)->domains & (__domain_mask)) + for_each_if(test_bit((__domain), (__power_well)->domains.bits)) -#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \ +#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \ for_each_power_well_reverse(__dev_priv, __power_well) \ - for_each_if((__power_well)->domains & (__domain_mask)) + for_each_if(test_bit((__domain), (__power_well)->domains.bits)) const char * intel_display_power_domain_str(enum intel_display_power_domain domain) @@ -191,7 +191,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, is_enabled = true; - for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { + for_each_power_domain_well_reverse(dev_priv, power_well, domain) { if (intel_power_well_is_always_on(power_well)) continue; @@ -307,10 +307,13 @@ unlock: #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) -static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) +static void __async_put_domains_mask(struct i915_power_domains *power_domains, + struct intel_power_domain_mask *mask) { - return power_domains->async_put_domains[0] | - power_domains->async_put_domains[1]; + bitmap_or(mask->bits, + power_domains->async_put_domains[0].bits, + power_domains->async_put_domains[1].bits, + POWER_DOMAIN_NUM); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) @@ -321,8 +324,11 @@ assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, power_domains); - return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] & - power_domains->async_put_domains[1]); + + return !drm_WARN_ON(&i915->drm, + bitmap_intersects(power_domains->async_put_domains[0].bits, + power_domains->async_put_domains[1].bits, + POWER_DOMAIN_NUM)); } static bool @@ -331,14 +337,17 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains) struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, power_domains); + struct intel_power_domain_mask async_put_mask; enum intel_display_power_domain domain; bool err = false; err |= !assert_async_put_domain_masks_disjoint(power_domains); - err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != - !!__async_put_domains_mask(power_domains)); + __async_put_domains_mask(power_domains, &async_put_mask); + err |= drm_WARN_ON(&i915->drm, + !!power_domains->async_put_wakeref != + !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)); - for_each_power_domain(domain, __async_put_domains_mask(power_domains)) + for_each_power_domain(domain, &async_put_mask) err |= drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); @@ -346,14 +355,14 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains) } static void print_power_domains(struct i915_power_domains *power_domains, - const char *prefix, u64 mask) + const char *prefix, struct intel_power_domain_mask *mask) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, power_domains); enum intel_display_power_domain domain; - drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask)); + drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); for_each_power_domain(domain, mask) drm_dbg(&i915->drm, "%s use_count %d\n", intel_display_power_domain_str(domain), @@ -371,9 +380,9 @@ print_async_put_domains_state(struct i915_power_domains *power_domains) power_domains->async_put_wakeref); print_power_domains(power_domains, "async_put_domains[0]", - power_domains->async_put_domains[0]); + &power_domains->async_put_domains[0]); print_power_domains(power_domains, "async_put_domains[1]", - power_domains->async_put_domains[1]); + &power_domains->async_put_domains[1]); } static void @@ -397,11 +406,13 @@ verify_async_put_domains_state(struct i915_power_domains *power_domains) #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ -static u64 async_put_domains_mask(struct i915_power_domains *power_domains) +static void async_put_domains_mask(struct i915_power_domains *power_domains, + struct intel_power_domain_mask *mask) + { assert_async_put_domain_masks_disjoint(power_domains); - return __async_put_domains_mask(power_domains); + __async_put_domains_mask(power_domains, mask); } static void @@ -410,8 +421,8 @@ async_put_domains_clear_domain(struct i915_power_domains *power_domains, { assert_async_put_domain_masks_disjoint(power_domains); - power_domains->async_put_domains[0] &= ~BIT_ULL(domain); - power_domains->async_put_domains[1] &= ~BIT_ULL(domain); + clear_bit(domain, power_domains->async_put_domains[0].bits); + clear_bit(domain, power_domains->async_put_domains[1].bits); } static bool @@ -419,16 +430,19 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct intel_power_domain_mask async_put_mask; bool ret = false; - if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) + async_put_domains_mask(power_domains, &async_put_mask); + if (!test_bit(domain, async_put_mask.bits)) goto out_verify; async_put_domains_clear_domain(power_domains, domain); ret = true; - if (async_put_domains_mask(power_domains)) + async_put_domains_mask(power_domains, &async_put_mask); + if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)) goto out_verify; cancel_delayed_work(&power_domains->async_put_work); @@ -450,7 +464,7 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv, if (intel_display_power_grab_async_put_ref(dev_priv, domain)) return; - for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) + for_each_power_domain_well(dev_priv, power_well, domain) intel_power_well_get(dev_priv, power_well); power_domains->domain_use_count[domain]++; @@ -531,20 +545,22 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv, struct i915_power_domains *power_domains; struct i915_power_well *power_well; const char *name = intel_display_power_domain_str(domain); + struct intel_power_domain_mask async_put_mask; power_domains = &dev_priv->power_domains; drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], "Use count on domain %s is already zero\n", name); + async_put_domains_mask(power_domains, &async_put_mask); drm_WARN(&dev_priv->drm, - async_put_domains_mask(power_domains) & BIT_ULL(domain), + test_bit(domain, async_put_mask.bits), "Async disabling of domain %s is pending\n", name); power_domains->domain_use_count[domain]--; - for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) + for_each_power_domain_well_reverse(dev_priv, power_well, domain) intel_power_well_put(dev_priv, power_well); } @@ -573,7 +589,8 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains, } static void -release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) +release_async_put_domains(struct i915_power_domains *power_domains, + struct intel_power_domain_mask *mask) { struct drm_i915_private *dev_priv = container_of(power_domains, struct drm_i915_private, @@ -621,12 +638,15 @@ intel_display_power_put_async_work(struct work_struct *work) goto out_verify; release_async_put_domains(power_domains, - power_domains->async_put_domains[0]); + &power_domains->async_put_domains[0]); /* Requeue the work if more domains were async put meanwhile. */ - if (power_domains->async_put_domains[1]) { - power_domains->async_put_domains[0] = - fetch_and_zero(&power_domains->async_put_domains[1]); + if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) { + bitmap_copy(power_domains->async_put_domains[0].bits, + power_domains->async_put_domains[1].bits, + POWER_DOMAIN_NUM); + bitmap_zero(power_domains->async_put_domains[1].bits, + POWER_DOMAIN_NUM); queue_async_put_domains_work(power_domains, fetch_and_zero(&new_work_wakeref)); } else { @@ -678,9 +698,9 @@ void __intel_display_power_put_async(struct drm_i915_private *i915, /* Let a pending work requeue itself or queue a new one. */ if (power_domains->async_put_wakeref) { - power_domains->async_put_domains[1] |= BIT_ULL(domain); + set_bit(domain, power_domains->async_put_domains[1].bits); } else { - power_domains->async_put_domains[0] |= BIT_ULL(domain); + set_bit(domain, power_domains->async_put_domains[0].bits); queue_async_put_domains_work(power_domains, fetch_and_zero(&work_wakeref)); } @@ -711,6 +731,7 @@ out_verify: void intel_display_power_flush_work(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->power_domains; + struct intel_power_domain_mask async_put_mask; intel_wakeref_t work_wakeref; mutex_lock(&power_domains->lock); @@ -719,8 +740,8 @@ void intel_display_power_flush_work(struct drm_i915_private *i915) if (!work_wakeref) goto out_verify; - release_async_put_domains(power_domains, - async_put_domains_mask(power_domains)); + async_put_domains_mask(power_domains, &async_put_mask); + release_async_put_domains(power_domains, &async_put_mask); cancel_delayed_work(&power_domains->async_put_work); out_verify: @@ -799,13 +820,13 @@ intel_display_power_get_in_set(struct drm_i915_private *i915, { intel_wakeref_t __maybe_unused wf; - drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); + drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); wf = intel_display_power_get(i915, domain); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) power_domain_set->wakerefs[domain] = wf; #endif - power_domain_set->mask |= BIT_ULL(domain); + set_bit(domain, power_domain_set->mask.bits); } bool @@ -815,7 +836,7 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, { intel_wakeref_t wf; - drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); + drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); wf = intel_display_power_get_if_enabled(i915, domain); if (!wf) @@ -824,7 +845,7 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) power_domain_set->wakerefs[domain] = wf; #endif - power_domain_set->mask |= BIT_ULL(domain); + set_bit(domain, power_domain_set->mask.bits); return true; } @@ -832,11 +853,12 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, void intel_display_power_put_mask_in_set(struct drm_i915_private *i915, struct intel_display_power_domain_set *power_domain_set, - u64 mask) + struct intel_power_domain_mask *mask) { enum intel_display_power_domain domain; - drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask); + drm_WARN_ON(&i915->drm, + !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)); for_each_power_domain(domain, mask) { intel_wakeref_t __maybe_unused wf = -1; @@ -845,7 +867,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); #endif intel_display_power_put(i915, domain, wf); - power_domain_set->mask &= ~BIT_ULL(domain); + clear_bit(domain, power_domain_set->mask.bits); } } @@ -947,8 +969,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) dev_priv->dmc.target_dc_state = sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); - BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); - mutex_init(&power_domains->lock); INIT_DELAYED_WORK(&power_domains->async_put_work, diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 5ae81e330022..66fef12ef3db 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -139,6 +139,10 @@ enum intel_display_power_domain { ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ (tran) + POWER_DOMAIN_TRANSCODER_A) +struct intel_power_domain_mask { + DECLARE_BITMAP(bits, POWER_DOMAIN_NUM); +}; + struct i915_power_domains { /* * Power wells needed for initialization at driver init and suspend @@ -156,21 +160,21 @@ struct i915_power_domains { struct delayed_work async_put_work; intel_wakeref_t async_put_wakeref; - u64 async_put_domains[2]; + struct intel_power_domain_mask async_put_domains[2]; struct i915_power_well *power_wells; }; struct intel_display_power_domain_set { - u64 mask; + struct intel_power_domain_mask mask; #ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM intel_wakeref_t wakerefs[POWER_DOMAIN_NUM]; #endif }; -#define for_each_power_domain(domain, mask) \ - for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ - for_each_if(BIT_ULL(domain) & (mask)) +#define for_each_power_domain(__domain, __mask) \ + for ((__domain) = 0; (__domain) < POWER_DOMAIN_NUM; (__domain)++) \ + for_each_if(test_bit((__domain), (__mask)->bits)) int intel_power_domains_init(struct drm_i915_private *dev_priv); void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); @@ -251,13 +255,13 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, void intel_display_power_put_mask_in_set(struct drm_i915_private *i915, struct intel_display_power_domain_set *power_domain_set, - u64 mask); + struct intel_power_domain_mask *mask); static inline void intel_display_power_put_all_in_set(struct drm_i915_private *i915, struct intel_display_power_domain_set *power_domain_set) { - intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask); + intel_display_power_put_mask_in_set(i915, power_domain_set, &power_domain_set->mask); } void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index b7aa13d6a33f..a9e0ebf18fca 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -2084,13 +2084,13 @@ static void init_power_well_domains(const struct i915_power_well_desc *desc, return; if (desc->domain_list->count == 0) { - power_well->domains = GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0); + bitmap_fill(power_well->domains.bits, POWER_DOMAIN_NUM); return; } for (j = 0; j < desc->domain_list->count; j++) - power_well->domains |= BIT_ULL(desc->domain_list->list[j]); + set_bit(desc->domain_list->list[j], power_well->domains.bits); } static int diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 778a1bd4ab14..b8f31d232cd1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -154,9 +154,9 @@ const char *intel_power_well_name(struct i915_power_well *power_well) return power_well->desc->name; } -u64 intel_power_well_domains(struct i915_power_well *power_well) +struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well) { - return power_well->domains; + return &power_well->domains; } int intel_power_well_refcount(struct i915_power_well *power_well) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index 0926b858d715..bd60fb4166e7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -8,6 +8,7 @@ #include #include "intel_display.h" +#include "intel_display_power.h" struct drm_i915_private; struct i915_power_well; @@ -102,7 +103,7 @@ struct i915_power_well_desc { struct i915_power_well { const struct i915_power_well_desc *desc; - u64 domains; + struct intel_power_domain_mask domains; /* power well enable/disable usage count */ int count; /* cached hw enabled state */ @@ -129,7 +130,7 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); bool intel_power_well_is_always_on(struct i915_power_well *power_well); const char *intel_power_well_name(struct i915_power_well *power_well); -u64 intel_power_well_domains(struct i915_power_well *power_well); +struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well); int intel_power_well_refcount(struct i915_power_well *power_well); void chv_phy_powergate_lanes(struct intel_encoder *encoder, From 4a845ff0c0d445556ec9e32073ac6cf0cd66e117 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:49 +0300 Subject: [PATCH 124/219] drm/i915: Simplify power well definitions by adding power well instances MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All the port specific AUX/DDI_IO power wells share the same power well ops struct and flags, so we can save some space and simplify the definition of these by listing for all such power wells only the params specific to them (name, domains, power well register index, id). Move these params to a new i915_power_well_instance struct and convert the per-platform power well definitions accordingly. For all power well instance the name and power domain list params must be specified, while the register index and id are optional, add the I915_PW() macro that both simplifies the definitions and ensures that the required params are set. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-10-imre.deak@intel.com --- .../i915/display/intel_display_power_map.c | 1503 +++++------------ .../i915/display/intel_display_power_well.c | 72 +- .../i915/display/intel_display_power_well.h | 48 +- 3 files changed, 499 insertions(+), 1124 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index a9e0ebf18fca..c282e05bfc1a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -30,16 +30,23 @@ #define I915_PW_DOMAINS_NONE NULL #define I915_PW_DOMAINS_ALL /* zero-length list */ +#define I915_PW_INSTANCES(...) \ + (const struct i915_power_well_instance_list) \ + __LIST(__LIST_INLINE_ELEMS(struct i915_power_well_instance, __VA_ARGS__)) + +#define I915_PW(_name, _domain_list, ...) \ + { .name = _name, .domain_list = _domain_list, ## __VA_ARGS__ } + I915_DECL_PW_DOMAINS(i9xx_pwdoms_always_on, I915_PW_DOMAINS_ALL); static const struct i915_power_well_desc i9xx_always_on_power_well[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, }; @@ -54,16 +61,16 @@ I915_DECL_PW_DOMAINS(i830_pwdoms_pipes, static const struct i915_power_well_desc i830_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "pipes", - .domain_list = &i830_pwdoms_pipes, + .instances = &I915_PW_INSTANCES( + I915_PW("pipes", &i830_pwdoms_pipes), + ), .ops = &i830_pipes_power_well_ops, - .id = DISP_PW_ID_NONE, }, }; @@ -87,20 +94,19 @@ I915_DECL_PW_DOMAINS(hsw_pwdoms_display, static const struct i915_power_well_desc hsw_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "display", - .domain_list = &hsw_pwdoms_display, + .instances = &I915_PW_INSTANCES( + I915_PW("display", &hsw_pwdoms_display, + .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, + .id = HSW_DISP_PW_GLOBAL), + ), .ops = &hsw_power_well_ops, .has_vga = true, - .id = HSW_DISP_PW_GLOBAL, - { - .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, - }, }, }; @@ -123,21 +129,20 @@ I915_DECL_PW_DOMAINS(bdw_pwdoms_display, static const struct i915_power_well_desc bdw_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "display", - .domain_list = &bdw_pwdoms_display, + .instances = &I915_PW_INSTANCES( + I915_PW("display", &bdw_pwdoms_display, + .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, + .id = HSW_DISP_PW_GLOBAL), + ), .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .id = HSW_DISP_PW_GLOBAL, - { - .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, - }, }, }; @@ -178,59 +183,37 @@ I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_tx_bc_lanes, static const struct i915_power_well_desc vlv_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "display", - .domain_list = &vlv_pwdoms_display, + .instances = &I915_PW_INSTANCES( + I915_PW("display", &vlv_pwdoms_display, + .vlv.idx = PUNIT_PWGT_IDX_DISP2D, + .id = VLV_DISP_PW_DISP2D), + ), .ops = &vlv_display_power_well_ops, - .id = VLV_DISP_PW_DISP2D, - { - .vlv.idx = PUNIT_PWGT_IDX_DISP2D, - }, }, { - .name = "dpio-tx-b-01", - .domain_list = &vlv_pwdoms_dpio_tx_bc_lanes, + .instances = &I915_PW_INSTANCES( + I915_PW("dpio-tx-b-01", &vlv_pwdoms_dpio_tx_bc_lanes, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01), + I915_PW("dpio-tx-b-23", &vlv_pwdoms_dpio_tx_bc_lanes, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23), + I915_PW("dpio-tx-c-01", &vlv_pwdoms_dpio_tx_bc_lanes, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01), + I915_PW("dpio-tx-c-23", &vlv_pwdoms_dpio_tx_bc_lanes, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23), + ), .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, - }, }, { - .name = "dpio-tx-b-23", - .domain_list = &vlv_pwdoms_dpio_tx_bc_lanes, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, - }, - }, { - .name = "dpio-tx-c-01", - .domain_list = &vlv_pwdoms_dpio_tx_bc_lanes, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, - }, - }, { - .name = "dpio-tx-c-23", - .domain_list = &vlv_pwdoms_dpio_tx_bc_lanes, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, - }, - }, { - .name = "dpio-common", - .domain_list = &vlv_pwdoms_dpio_cmn_bc, + .instances = &I915_PW_INSTANCES( + I915_PW("dpio-common", &vlv_pwdoms_dpio_cmn_bc, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, + .id = VLV_DISP_PW_DPIO_CMN_BC), + ), .ops = &vlv_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, - }, }, }; @@ -272,37 +255,31 @@ I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_d, static const struct i915_power_well_desc chv_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "display", /* * Pipe A power well is the new disp2d well. Pipe B and C * power wells don't actually exist. Pipe A power well is * required for any pipe to work. */ - .domain_list = &chv_pwdoms_display, + .instances = &I915_PW_INSTANCES( + I915_PW("display", &chv_pwdoms_display), + ), .ops = &chv_pipe_power_well_ops, - .id = DISP_PW_ID_NONE, }, { - .name = "dpio-common-bc", - .domain_list = &chv_pwdoms_dpio_cmn_bc, + .instances = &I915_PW_INSTANCES( + I915_PW("dpio-common-bc", &chv_pwdoms_dpio_cmn_bc, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, + .id = VLV_DISP_PW_DPIO_CMN_BC), + I915_PW("dpio-common-d", &chv_pwdoms_dpio_cmn_d, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, + .id = CHV_DISP_PW_DPIO_CMN_D), + ), .ops = &chv_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, - }, - }, { - .name = "dpio-common-d", - .domain_list = &chv_pwdoms_dpio_cmn_d, - .ops = &chv_dpio_cmn_power_well_ops, - .id = CHV_DISP_PW_DPIO_CMN_D, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, - }, }, }; @@ -355,80 +332,54 @@ I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_d, static const struct i915_power_well_desc skl_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "PW_1", /* Handled by the DMC firmware */ - .domain_list = I915_PW_DOMAINS_NONE, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_1", I915_PW_DOMAINS_NONE, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, + .id = SKL_DISP_PW_1), + ), .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, - .id = SKL_DISP_PW_1, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - }, }, { - .name = "MISC_IO", /* Handled by the DMC firmware */ - .domain_list = I915_PW_DOMAINS_NONE, + .instances = &I915_PW_INSTANCES( + I915_PW("MISC_IO", I915_PW_DOMAINS_NONE, + .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, + .id = SKL_DISP_PW_MISC_IO), + ), .ops = &hsw_power_well_ops, .always_on = true, - .id = SKL_DISP_PW_MISC_IO, - { - .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, - }, }, { - .name = "DC_off", - .domain_list = &skl_pwdoms_dc_off, + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &skl_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, }, { - .name = "PW_2", - .domain_list = &skl_pwdoms_pw_2, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &skl_pwdoms_pw_2, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .has_fuses = true, - .id = SKL_DISP_PW_2, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - }, }, { - .name = "DDI_IO_A_E", - .domain_list = &skl_pwdoms_ddi_io_a_e, + .instances = &I915_PW_INSTANCES( + I915_PW("DDI_IO_A_E", &skl_pwdoms_ddi_io_a_e, .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E), + I915_PW("DDI_IO_B", &skl_pwdoms_ddi_io_b, .hsw.idx = SKL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &skl_pwdoms_ddi_io_c, .hsw.idx = SKL_PW_CTL_IDX_DDI_C), + I915_PW("DDI_IO_D", &skl_pwdoms_ddi_io_d, .hsw.idx = SKL_PW_CTL_IDX_DDI_D), + ), .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, - }, - }, { - .name = "DDI_IO_B", - .domain_list = &skl_pwdoms_ddi_io_b, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_B, - }, - }, { - .name = "DDI_IO_C", - .domain_list = &skl_pwdoms_ddi_io_c, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_C, - }, - }, { - .name = "DDI_IO_D", - .domain_list = &skl_pwdoms_ddi_io_d, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_D, - }, }, }; @@ -474,54 +425,47 @@ I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_bc, static const struct i915_power_well_desc bxt_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "PW_1", /* Handled by the DMC firmware */ - .domain_list = I915_PW_DOMAINS_NONE, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_1", I915_PW_DOMAINS_NONE, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, + .id = SKL_DISP_PW_1), + ), .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, - .id = SKL_DISP_PW_1, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - }, }, { - .name = "DC_off", - .domain_list = &bxt_pwdoms_dc_off, + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &bxt_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, }, { - .name = "PW_2", - .domain_list = &bxt_pwdoms_pw_2, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &bxt_pwdoms_pw_2, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .has_fuses = true, - .id = SKL_DISP_PW_2, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - }, }, { - .name = "dpio-common-a", - .domain_list = &bxt_pwdoms_dpio_cmn_a, + .instances = &I915_PW_INSTANCES( + I915_PW("dpio-common-a", &bxt_pwdoms_dpio_cmn_a, + .bxt.phy = DPIO_PHY1, + .id = BXT_DISP_PW_DPIO_CMN_A), + I915_PW("dpio-common-bc", &bxt_pwdoms_dpio_cmn_bc, + .bxt.phy = DPIO_PHY0, + .id = VLV_DISP_PW_DPIO_CMN_BC), + ), .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DISP_PW_DPIO_CMN_A, - { - .bxt.phy = DPIO_PHY1, - }, - }, { - .name = "dpio-common-bc", - .domain_list = &bxt_pwdoms_dpio_cmn_bc, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .bxt.phy = DPIO_PHY0, - }, }, }; @@ -587,110 +531,60 @@ I915_DECL_PW_DOMAINS(glk_pwdoms_aux_c, static const struct i915_power_well_desc glk_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "PW_1", /* Handled by the DMC firmware */ - .domain_list = I915_PW_DOMAINS_NONE, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_1", I915_PW_DOMAINS_NONE, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, + .id = SKL_DISP_PW_1), + ), .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, - .id = SKL_DISP_PW_1, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - }, }, { - .name = "DC_off", - .domain_list = &glk_pwdoms_dc_off, + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &glk_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, }, { - .name = "PW_2", - .domain_list = &glk_pwdoms_pw_2, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &glk_pwdoms_pw_2, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .has_fuses = true, - .id = SKL_DISP_PW_2, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - }, }, { - .name = "dpio-common-a", - .domain_list = &glk_pwdoms_dpio_cmn_a, + .instances = &I915_PW_INSTANCES( + I915_PW("dpio-common-a", &glk_pwdoms_dpio_cmn_a, + .bxt.phy = DPIO_PHY1, + .id = BXT_DISP_PW_DPIO_CMN_A), + I915_PW("dpio-common-b", &glk_pwdoms_dpio_cmn_b, + .bxt.phy = DPIO_PHY0, + .id = VLV_DISP_PW_DPIO_CMN_BC), + I915_PW("dpio-common-c", &glk_pwdoms_dpio_cmn_c, + .bxt.phy = DPIO_PHY2, + .id = GLK_DISP_PW_DPIO_CMN_C), + ), .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DISP_PW_DPIO_CMN_A, - { - .bxt.phy = DPIO_PHY1, - }, }, { - .name = "dpio-common-b", - .domain_list = &glk_pwdoms_dpio_cmn_b, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .bxt.phy = DPIO_PHY0, - }, - }, { - .name = "dpio-common-c", - .domain_list = &glk_pwdoms_dpio_cmn_c, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = GLK_DISP_PW_DPIO_CMN_C, - { - .bxt.phy = DPIO_PHY2, - }, - }, { - .name = "AUX_A", - .domain_list = &glk_pwdoms_aux_a, + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &glk_pwdoms_aux_a, .hsw.idx = GLK_PW_CTL_IDX_AUX_A), + I915_PW("AUX_B", &glk_pwdoms_aux_b, .hsw.idx = GLK_PW_CTL_IDX_AUX_B), + I915_PW("AUX_C", &glk_pwdoms_aux_c, .hsw.idx = GLK_PW_CTL_IDX_AUX_C), + I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = GLK_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = SKL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = SKL_PW_CTL_IDX_DDI_C), + ), .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = GLK_PW_CTL_IDX_AUX_A, - }, - }, { - .name = "AUX_B", - .domain_list = &glk_pwdoms_aux_b, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = GLK_PW_CTL_IDX_AUX_B, - }, - }, { - .name = "AUX_C", - .domain_list = &glk_pwdoms_aux_c, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = GLK_PW_CTL_IDX_AUX_C, - }, - }, { - .name = "DDI_IO_A", - .domain_list = &glk_pwdoms_ddi_io_a, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = GLK_PW_CTL_IDX_DDI_A, - }, - }, { - .name = "DDI_IO_B", - .domain_list = &glk_pwdoms_ddi_io_b, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_B, - }, - }, { - .name = "DDI_IO_C", - .domain_list = &glk_pwdoms_ddi_io_c, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_C, - }, }, }; @@ -790,193 +684,82 @@ I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT_F); static const struct i915_power_well_desc icl_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "PW_1", /* Handled by the DMC firmware */ - .domain_list = I915_PW_DOMAINS_NONE, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_1", I915_PW_DOMAINS_NONE, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .id = SKL_DISP_PW_1), + ), .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - }, }, { - .name = "DC_off", - .domain_list = &icl_pwdoms_dc_off, + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &icl_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, }, { - .name = "PW_2", - .domain_list = &icl_pwdoms_pw_2, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &icl_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), .ops = &hsw_power_well_ops, .has_fuses = true, - .id = SKL_DISP_PW_2, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - }, }, { - .name = "PW_3", - .domain_list = &icl_pwdoms_pw_3, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_3", &icl_pwdoms_pw_3, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .id = ICL_DISP_PW_3), + ), .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B), .has_fuses = true, - .id = ICL_DISP_PW_3, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - }, }, { - .name = "DDI_IO_A", - .domain_list = &icl_pwdoms_ddi_io_a, + .instances = &I915_PW_INSTANCES( + I915_PW("DDI_IO_A", &icl_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &icl_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &icl_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), + I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = ICL_PW_CTL_IDX_DDI_D), + I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = ICL_PW_CTL_IDX_DDI_E), + I915_PW("DDI_IO_F", &icl_pwdoms_ddi_io_f, .hsw.idx = ICL_PW_CTL_IDX_DDI_F), + ), .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - }, }, { - .name = "DDI_IO_B", - .domain_list = &icl_pwdoms_ddi_io_b, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - }, - }, { - .name = "DDI_IO_C", - .domain_list = &icl_pwdoms_ddi_io_c, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_C, - }, - }, { - .name = "DDI_IO_D", - .domain_list = &icl_pwdoms_ddi_io_d, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_D, - }, - }, { - .name = "DDI_IO_E", - .domain_list = &icl_pwdoms_ddi_io_e, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_E, - }, - }, { - .name = "DDI_IO_F", - .domain_list = &icl_pwdoms_ddi_io_f, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_F, - }, - }, { - .name = "AUX_A", - .domain_list = &icl_pwdoms_aux_a, + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), + I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), + I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = ICL_PW_CTL_IDX_AUX_D), + I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = ICL_PW_CTL_IDX_AUX_E), + I915_PW("AUX_F", &icl_pwdoms_aux_f, .hsw.idx = ICL_PW_CTL_IDX_AUX_F), + ), .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, }, { - .name = "AUX_B", - .domain_list = &icl_pwdoms_aux_b, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, { - .name = "AUX_C", - .domain_list = &icl_pwdoms_aux_c, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = false, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - }, - }, { - .name = "AUX_D", - .domain_list = &icl_pwdoms_aux_d, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = false, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_D, - }, - }, { - .name = "AUX_E", - .domain_list = &icl_pwdoms_aux_e, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = false, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_E, - }, - }, { - .name = "AUX_F", - .domain_list = &icl_pwdoms_aux_f, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = false, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_F, - }, - }, { - .name = "AUX_TBT1", - .domain_list = &icl_pwdoms_aux_tbt1, + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1), + I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2), + I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3), + I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4), + ), .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, - }, }, { - .name = "AUX_TBT2", - .domain_list = &icl_pwdoms_aux_tbt2, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, - }, - }, { - .name = "AUX_TBT3", - .domain_list = &icl_pwdoms_aux_tbt3, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, - }, - }, { - .name = "AUX_TBT4", - .domain_list = &icl_pwdoms_aux_tbt4, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, - }, - }, { - .name = "PW_4", - .domain_list = &icl_pwdoms_pw_4, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_4", &icl_pwdoms_pw_4, + .hsw.idx = ICL_PW_CTL_IDX_PW_4), + ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_C), .has_fuses = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - }, }, }; @@ -1087,276 +870,104 @@ I915_DECL_PW_DOMAINS(tgl_pwdoms_tc_cold_off, static const struct i915_power_well_desc tgl_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "PW_1", /* Handled by the DMC firmware */ - .domain_list = I915_PW_DOMAINS_NONE, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_1", I915_PW_DOMAINS_NONE, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .id = SKL_DISP_PW_1), + ), .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - }, }, { - .name = "DC_off", - .domain_list = &tgl_pwdoms_dc_off, + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &tgl_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, }, { - .name = "PW_2", - .domain_list = &tgl_pwdoms_pw_2, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &tgl_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), .ops = &hsw_power_well_ops, .has_fuses = true, - .id = SKL_DISP_PW_2, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - }, }, { - .name = "PW_3", - .domain_list = &tgl_pwdoms_pw_3, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_3", &tgl_pwdoms_pw_3, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .id = ICL_DISP_PW_3), + ), .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B), .has_fuses = true, - .id = ICL_DISP_PW_3, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - }, }, { - .name = "DDI_IO_A", - .domain_list = &icl_pwdoms_ddi_io_a, + .instances = &I915_PW_INSTANCES( + I915_PW("DDI_IO_A", &icl_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &icl_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &icl_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), + I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), + I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), + I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3), + I915_PW("DDI_IO_TC4", &tgl_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4), + I915_PW("DDI_IO_TC5", &tgl_pwdoms_ddi_io_tc5, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5), + I915_PW("DDI_IO_TC6", &tgl_pwdoms_ddi_io_tc6, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6), + ), .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - } }, { - .name = "DDI_IO_B", - .domain_list = &icl_pwdoms_ddi_io_b, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - } - }, { - .name = "DDI_IO_C", - .domain_list = &icl_pwdoms_ddi_io_c, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_C, - } - }, { - .name = "DDI_IO_TC1", - .domain_list = &tgl_pwdoms_ddi_io_tc1, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, - }, - }, { - .name = "DDI_IO_TC2", - .domain_list = &tgl_pwdoms_ddi_io_tc2, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, - }, - }, { - .name = "DDI_IO_TC3", - .domain_list = &tgl_pwdoms_ddi_io_tc3, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, - }, - }, { - .name = "DDI_IO_TC4", - .domain_list = &tgl_pwdoms_ddi_io_tc4, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, - }, - }, { - .name = "DDI_IO_TC5", - .domain_list = &tgl_pwdoms_ddi_io_tc5, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, - }, - }, { - .name = "DDI_IO_TC6", - .domain_list = &tgl_pwdoms_ddi_io_tc6, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, - }, - }, { - .name = "TC_cold_off", - .domain_list = &tgl_pwdoms_tc_cold_off, + .instances = &I915_PW_INSTANCES( + I915_PW("TC_cold_off", &tgl_pwdoms_tc_cold_off, + .id = TGL_DISP_PW_TC_COLD_OFF), + ), .ops = &tgl_tc_cold_off_ops, - .id = TGL_DISP_PW_TC_COLD_OFF, }, { - .name = "AUX_A", - .domain_list = &tgl_pwdoms_aux_a, + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &tgl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), + I915_PW("AUX_B", &tgl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), + I915_PW("AUX_C", &tgl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), + I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), + I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), + I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3), + I915_PW("AUX_USBC4", &tgl_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4), + I915_PW("AUX_USBC5", &tgl_pwdoms_aux_usbc5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5), + I915_PW("AUX_USBC6", &tgl_pwdoms_aux_usbc6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6), + ), .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, }, { - .name = "AUX_B", - .domain_list = &tgl_pwdoms_aux_b, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, { - .name = "AUX_C", - .domain_list = &tgl_pwdoms_aux_c, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - }, - }, { - .name = "AUX_USBC1", - .domain_list = &tgl_pwdoms_aux_usbc1, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = false, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - }, - }, { - .name = "AUX_USBC2", - .domain_list = &tgl_pwdoms_aux_usbc2, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = false, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - }, - }, { - .name = "AUX_USBC3", - .domain_list = &tgl_pwdoms_aux_usbc3, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = false, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, - }, - }, { - .name = "AUX_USBC4", - .domain_list = &tgl_pwdoms_aux_usbc4, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = false, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, - }, - }, { - .name = "AUX_USBC5", - .domain_list = &tgl_pwdoms_aux_usbc5, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = false, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, - }, - }, { - .name = "AUX_USBC6", - .domain_list = &tgl_pwdoms_aux_usbc6, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = false, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, - }, - }, { - .name = "AUX_TBT1", - .domain_list = &tgl_pwdoms_aux_tbt1, + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_TBT1", &tgl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1), + I915_PW("AUX_TBT2", &tgl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2), + I915_PW("AUX_TBT3", &tgl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3), + I915_PW("AUX_TBT4", &tgl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4), + I915_PW("AUX_TBT5", &tgl_pwdoms_aux_tbt5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5), + I915_PW("AUX_TBT6", &tgl_pwdoms_aux_tbt6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6), + ), .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, - }, }, { - .name = "AUX_TBT2", - .domain_list = &tgl_pwdoms_aux_tbt2, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, - }, - }, { - .name = "AUX_TBT3", - .domain_list = &tgl_pwdoms_aux_tbt3, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, - }, - }, { - .name = "AUX_TBT4", - .domain_list = &tgl_pwdoms_aux_tbt4, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, - }, - }, { - .name = "AUX_TBT5", - .domain_list = &tgl_pwdoms_aux_tbt5, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, - }, - }, { - .name = "AUX_TBT6", - .domain_list = &tgl_pwdoms_aux_tbt6, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, - }, - }, { - .name = "PW_4", - .domain_list = &tgl_pwdoms_pw_4, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_4", &tgl_pwdoms_pw_4, + .hsw.idx = ICL_PW_CTL_IDX_PW_4), + ), .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_C), - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - } }, { - .name = "PW_5", - .domain_list = &tgl_pwdoms_pw_5, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_5", &tgl_pwdoms_pw_5, + .hsw.idx = TGL_PW_CTL_IDX_PW_5), + ), .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_D), - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_PW_5, - }, }, }; @@ -1416,112 +1027,61 @@ I915_DECL_PW_DOMAINS(rkl_pwdoms_dc_off, static const struct i915_power_well_desc rkl_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "PW_1", /* Handled by the DMC firmware */ - .domain_list = I915_PW_DOMAINS_NONE, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_1", I915_PW_DOMAINS_NONE, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .id = SKL_DISP_PW_1), + ), .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - }, }, { - .name = "DC_off", - .domain_list = &rkl_pwdoms_dc_off, + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &rkl_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, }, { - .name = "PW_3", - .domain_list = &rkl_pwdoms_pw_3, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_3", &rkl_pwdoms_pw_3, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .id = ICL_DISP_PW_3), + ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), .has_vga = true, .has_fuses = true, - .id = ICL_DISP_PW_3, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - }, }, { - .name = "PW_4", - .domain_list = &rkl_pwdoms_pw_4, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_4", &rkl_pwdoms_pw_4, + .hsw.idx = ICL_PW_CTL_IDX_PW_4), + ), .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_C), - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - } }, { - .name = "DDI_IO_A", - .domain_list = &icl_pwdoms_ddi_io_a, + .instances = &I915_PW_INSTANCES( + I915_PW("DDI_IO_A", &icl_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &icl_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), + I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), + ), .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - } }, { - .name = "DDI_IO_B", - .domain_list = &icl_pwdoms_ddi_io_b, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - } - }, { - .name = "DDI_IO_TC1", - .domain_list = &tgl_pwdoms_ddi_io_tc1, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, - }, - }, { - .name = "DDI_IO_TC2", - .domain_list = &tgl_pwdoms_ddi_io_tc2, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, - }, - }, { - .name = "AUX_A", - .domain_list = &icl_pwdoms_aux_a, + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), + I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), + I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), + ), .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, - }, { - .name = "AUX_B", - .domain_list = &icl_pwdoms_aux_b, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, { - .name = "AUX_USBC1", - .domain_list = &tgl_pwdoms_aux_usbc1, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - }, - }, { - .name = "AUX_USBC2", - .domain_list = &tgl_pwdoms_aux_usbc2, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - }, }, }; @@ -1559,133 +1119,77 @@ I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_2, static const struct i915_power_well_desc dg1_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "PW_1", /* Handled by the DMC firmware */ - .domain_list = I915_PW_DOMAINS_NONE, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_1", I915_PW_DOMAINS_NONE, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .id = SKL_DISP_PW_1), + ), .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - }, }, { - .name = "DC_off", - .domain_list = &dg1_pwdoms_dc_off, + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &dg1_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, }, { - .name = "PW_2", - .domain_list = &dg1_pwdoms_pw_2, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &dg1_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), .ops = &hsw_power_well_ops, .has_fuses = true, - .id = SKL_DISP_PW_2, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - }, }, { - .name = "PW_3", - .domain_list = &dg1_pwdoms_pw_3, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_3", &dg1_pwdoms_pw_3, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .id = ICL_DISP_PW_3), + ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), .has_vga = true, .has_fuses = true, - .id = ICL_DISP_PW_3, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - }, }, { - .name = "DDI_IO_A", - .domain_list = &icl_pwdoms_ddi_io_a, + .instances = &I915_PW_INSTANCES( + I915_PW("DDI_IO_A", &icl_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &icl_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), + I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), + ), .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - } }, { - .name = "DDI_IO_B", - .domain_list = &icl_pwdoms_ddi_io_b, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - } - }, { - .name = "DDI_IO_TC1", - .domain_list = &tgl_pwdoms_ddi_io_tc1, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, - }, - }, { - .name = "DDI_IO_TC2", - .domain_list = &tgl_pwdoms_ddi_io_tc2, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, - }, - }, { - .name = "AUX_A", - .domain_list = &tgl_pwdoms_aux_a, + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &tgl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), + I915_PW("AUX_B", &tgl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), + I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), + I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), + ), .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, }, { - .name = "AUX_B", - .domain_list = &tgl_pwdoms_aux_b, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, { - .name = "AUX_USBC1", - .domain_list = &tgl_pwdoms_aux_usbc1, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = false, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - }, - }, { - .name = "AUX_USBC2", - .domain_list = &tgl_pwdoms_aux_usbc2, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = false, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - }, - }, { - .name = "PW_4", - .domain_list = &tgl_pwdoms_pw_4, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_4", &tgl_pwdoms_pw_4, + .hsw.idx = ICL_PW_CTL_IDX_PW_4), + ), .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_C), - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - } }, { - .name = "PW_5", - .domain_list = &tgl_pwdoms_pw_5, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_5", &tgl_pwdoms_pw_5, + .hsw.idx = TGL_PW_CTL_IDX_PW_5), + ), .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_D), - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_PW_5, - }, }, }; @@ -1816,283 +1320,131 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_tc4, POWER_DOMAIN_PORT_DDI_IO_TC4); static const struct i915_power_well_desc xelpd_power_wells[] = { { - .name = "always-on", - .domain_list = &i9xx_pwdoms_always_on, + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, - .id = DISP_PW_ID_NONE, }, { - .name = "PW_1", /* Handled by the DMC firmware */ - .domain_list = I915_PW_DOMAINS_NONE, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_1", I915_PW_DOMAINS_NONE, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .id = SKL_DISP_PW_1), + ), .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - }, }, { - .name = "DC_off", - .domain_list = &xelpd_pwdoms_dc_off, + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &xelpd_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, }, { - .name = "PW_2", - .domain_list = &xelpd_pwdoms_pw_2, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &xelpd_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), .ops = &hsw_power_well_ops, .has_vga = true, .has_fuses = true, - .id = SKL_DISP_PW_2, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - }, }, { - .name = "PW_A", - .domain_list = &xelpd_pwdoms_pw_a, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_A", &xelpd_pwdoms_pw_a, + .hsw.idx = XELPD_PW_CTL_IDX_PW_A), + ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_A), .has_fuses = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_PW_A, - }, }, { - .name = "PW_B", - .domain_list = &xelpd_pwdoms_pw_b, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_B", &xelpd_pwdoms_pw_b, + .hsw.idx = XELPD_PW_CTL_IDX_PW_B), + ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), .has_fuses = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_PW_B, - }, }, { - .name = "PW_C", - .domain_list = &xelpd_pwdoms_pw_c, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_C", &xelpd_pwdoms_pw_c, + .hsw.idx = XELPD_PW_CTL_IDX_PW_C), + ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_C), .has_fuses = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_PW_C, - }, }, { - .name = "PW_D", - .domain_list = &xelpd_pwdoms_pw_d, + .instances = &I915_PW_INSTANCES( + I915_PW("PW_D", &xelpd_pwdoms_pw_d, + .hsw.idx = XELPD_PW_CTL_IDX_PW_D), + ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_D), .has_fuses = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_PW_D, - }, }, { - .name = "DDI_IO_A", - .domain_list = &icl_pwdoms_ddi_io_a, + .instances = &I915_PW_INSTANCES( + I915_PW("DDI_IO_A", &icl_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &icl_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &icl_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), + I915_PW("DDI_IO_D_XELPD", &xelpd_pwdoms_ddi_io_d_xelpd, .hsw.idx = XELPD_PW_CTL_IDX_DDI_D), + I915_PW("DDI_IO_E_XELPD", &xelpd_pwdoms_ddi_io_e_xelpd, .hsw.idx = XELPD_PW_CTL_IDX_DDI_E), + I915_PW("DDI_IO_TC1", &xelpd_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), + I915_PW("DDI_IO_TC2", &xelpd_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), + I915_PW("DDI_IO_TC3", &xelpd_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3), + I915_PW("DDI_IO_TC4", &xelpd_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4), + ), .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - } }, { - .name = "DDI_IO_B", - .domain_list = &icl_pwdoms_ddi_io_b, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - } - }, { - .name = "DDI_IO_C", - .domain_list = &icl_pwdoms_ddi_io_c, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_C, - } - }, { - .name = "DDI_IO_D_XELPD", - .domain_list = &xelpd_pwdoms_ddi_io_d_xelpd, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_DDI_D, - } - }, { - .name = "DDI_IO_E_XELPD", - .domain_list = &xelpd_pwdoms_ddi_io_e_xelpd, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_DDI_E, - } - }, { - .name = "DDI_IO_TC1", - .domain_list = &xelpd_pwdoms_ddi_io_tc1, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, - } - }, { - .name = "DDI_IO_TC2", - .domain_list = &xelpd_pwdoms_ddi_io_tc2, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, - } - }, { - .name = "DDI_IO_TC3", - .domain_list = &xelpd_pwdoms_ddi_io_tc3, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, - } - }, { - .name = "DDI_IO_TC4", - .domain_list = &xelpd_pwdoms_ddi_io_tc4, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, - } - }, { - .name = "AUX_A", - .domain_list = &icl_pwdoms_aux_a, + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), + I915_PW("AUX_C", &tgl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), + I915_PW("AUX_D_XELPD", &xelpd_pwdoms_aux_d_xelpd, .hsw.idx = XELPD_PW_CTL_IDX_AUX_D), + I915_PW("AUX_E_XELPD", &xelpd_pwdoms_aux_e_xelpd, .hsw.idx = XELPD_PW_CTL_IDX_AUX_E), + I915_PW("AUX_USBC1", &xelpd_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), + I915_PW("AUX_USBC2", &xelpd_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), + I915_PW("AUX_USBC3", &xelpd_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3), + I915_PW("AUX_USBC4", &xelpd_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4), + ), .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, }, { - .name = "AUX_B", - .domain_list = &icl_pwdoms_aux_b, - .ops = &icl_aux_power_well_ops, - .fixed_enable_delay = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, { - .name = "AUX_C", - .domain_list = &tgl_pwdoms_aux_c, - .ops = &icl_aux_power_well_ops, - .fixed_enable_delay = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - }, - }, { - .name = "AUX_D_XELPD", - .domain_list = &xelpd_pwdoms_aux_d_xelpd, - .ops = &icl_aux_power_well_ops, - .fixed_enable_delay = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_AUX_D, - }, - }, { - .name = "AUX_E_XELPD", - .domain_list = &xelpd_pwdoms_aux_e_xelpd, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_AUX_E, - }, - }, { - .name = "AUX_USBC1", - .domain_list = &xelpd_pwdoms_aux_usbc1, - .ops = &icl_aux_power_well_ops, - .fixed_enable_delay = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - }, - }, { - .name = "AUX_USBC2", - .domain_list = &xelpd_pwdoms_aux_usbc2, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - }, - }, { - .name = "AUX_USBC3", - .domain_list = &xelpd_pwdoms_aux_usbc3, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, - }, - }, { - .name = "AUX_USBC4", - .domain_list = &xelpd_pwdoms_aux_usbc4, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, - }, - }, { - .name = "AUX_TBT1", - .domain_list = &xelpd_pwdoms_aux_tbt1, + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_TBT1", &xelpd_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1), + I915_PW("AUX_TBT2", &xelpd_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2), + I915_PW("AUX_TBT3", &xelpd_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3), + I915_PW("AUX_TBT4", &xelpd_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4), + ), .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, - }, - }, { - .name = "AUX_TBT2", - .domain_list = &xelpd_pwdoms_aux_tbt2, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, - }, - }, { - .name = "AUX_TBT3", - .domain_list = &xelpd_pwdoms_aux_tbt3, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, - }, - }, { - .name = "AUX_TBT4", - .domain_list = &xelpd_pwdoms_aux_tbt4, - .ops = &icl_aux_power_well_ops, - .is_tc_tbt = true, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, - }, }, }; -static void init_power_well_domains(const struct i915_power_well_desc *desc, +static void init_power_well_domains(const struct i915_power_well_instance *inst, struct i915_power_well *power_well) { int j; - if (!desc->domain_list) + if (!inst->domain_list) return; - if (desc->domain_list->count == 0) { + if (inst->domain_list->count == 0) { bitmap_fill(power_well->domains.bits, POWER_DOMAIN_NUM); return; } - for (j = 0; j < desc->domain_list->count; j++) - set_bit(desc->domain_list->list[j], power_well->domains.bits); + for (j = 0; j < inst->domain_list->count; j++) + set_bit(inst->domain_list->list[j], power_well->domains.bits); } +#define for_each_power_well_instance(_desc_list, _desc_count, _desc, _inst) \ + for ((_desc) = (_desc_list); (_desc) - (_desc_list) < (_desc_count); (_desc)++) \ + for ((_inst) = (_desc)->instances->list; \ + (_inst) - (_desc)->instances->list < (_desc)->instances->count; \ + (_inst)++) + static int __set_power_wells(struct i915_power_domains *power_domains, const struct i915_power_well_desc *power_well_descs, @@ -2102,11 +1454,13 @@ __set_power_wells(struct i915_power_domains *power_domains, struct drm_i915_private, power_domains); u64 power_well_ids = 0; + const struct i915_power_well_desc *desc; + const struct i915_power_well_instance *inst; int power_well_count = 0; - int i, plt_idx = 0; + int plt_idx = 0; - for (i = 0; i < power_well_descs_sz; i++) - if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) + for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc, inst) + if (!(BIT_ULL(inst->id) & skip_mask)) power_well_count++; power_domains->power_well_count = power_well_count; @@ -2117,16 +1471,19 @@ __set_power_wells(struct i915_power_domains *power_domains, if (!power_domains->power_wells) return -ENOMEM; - for (i = 0; i < power_well_descs_sz; i++) { - enum i915_power_well_id id = power_well_descs[i].id; + for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc, inst) { + struct i915_power_well *pw = &power_domains->power_wells[plt_idx]; + enum i915_power_well_id id = inst->id; if (BIT_ULL(id) & skip_mask) continue; - power_domains->power_wells[plt_idx].desc = - &power_well_descs[i]; + pw->desc = desc; + drm_WARN_ON(&i915->drm, + overflows_type(inst - desc->instances->list, pw->instance_idx)); + pw->instance_idx = inst - desc->instances->list; - init_power_well_domains(&power_well_descs[i], &power_domains->power_wells[plt_idx]); + init_power_well_domains(inst, pw); plt_idx++; diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index b8f31d232cd1..000a25ed4a5c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -58,6 +58,12 @@ struct i915_power_well_ops { struct i915_power_well *power_well); }; +static const struct i915_power_well_instance * +i915_power_well_instance(const struct i915_power_well *power_well) +{ + return &power_well->desc->instances->list[power_well->instance_idx]; +} + struct i915_power_well * lookup_power_well(struct drm_i915_private *i915, enum i915_power_well_id power_well_id) @@ -65,7 +71,7 @@ lookup_power_well(struct drm_i915_private *i915, struct i915_power_well *power_well; for_each_power_well(i915, power_well) - if (power_well->desc->id == power_well_id) + if (i915_power_well_instance(power_well)->id == power_well_id) return power_well; /* @@ -84,7 +90,7 @@ lookup_power_well(struct drm_i915_private *i915, void intel_power_well_enable(struct drm_i915_private *i915, struct i915_power_well *power_well) { - drm_dbg_kms(&i915->drm, "enabling %s\n", power_well->desc->name); + drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well)); power_well->desc->ops->enable(i915, power_well); power_well->hw_enabled = true; } @@ -92,7 +98,7 @@ void intel_power_well_enable(struct drm_i915_private *i915, void intel_power_well_disable(struct drm_i915_private *i915, struct i915_power_well *power_well) { - drm_dbg_kms(&i915->drm, "disabling %s\n", power_well->desc->name); + drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well)); power_well->hw_enabled = false; power_well->desc->ops->disable(i915, power_well); } @@ -117,7 +123,7 @@ void intel_power_well_put(struct drm_i915_private *i915, { drm_WARN(&i915->drm, !power_well->count, "Use count on power well %s is already zero", - power_well->desc->name); + i915_power_well_instance(power_well)->name); if (!--power_well->count) intel_power_well_disable(i915, power_well); @@ -151,7 +157,7 @@ bool intel_power_well_is_always_on(struct i915_power_well *power_well) const char *intel_power_well_name(struct i915_power_well *power_well) { - return power_well->desc->name; + return i915_power_well_instance(power_well)->name; } struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well) @@ -195,7 +201,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) { - int pw_idx = power_well->desc->hsw.idx; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : ICL_AUX_PW_TO_CH(pw_idx); @@ -242,7 +248,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, bool timeout_expected) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; /* * For some power wells we're not supposed to watch the status bit for @@ -285,7 +291,7 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; bool disabled; u32 reqs; @@ -323,7 +329,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; u32 val; if (power_well->desc->has_fuses) { @@ -370,7 +376,7 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; u32 val; hsw_power_well_pre_disable(dev_priv, @@ -387,7 +393,7 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); u32 val; @@ -419,7 +425,7 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); u32 val; @@ -505,7 +511,7 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, val = intel_de_read(dev_priv, regs->driver); intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); + val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); /* * An AUX timeout is expected if the TBT DP tunnel is down, @@ -521,7 +527,7 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { enum tc_port tc_port; - tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); + tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx); intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); @@ -583,8 +589,8 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - enum i915_power_well_id id = power_well->desc->id; - int pw_idx = power_well->desc->hsw.idx; + enum i915_power_well_id id = i915_power_well_instance(power_well)->id; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | HSW_PWR_WELL_CTL_STATE(pw_idx); u32 val; @@ -888,7 +894,7 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); u32 bios_req = intel_de_read(dev_priv, regs->bios); @@ -905,19 +911,19 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); + bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy); } static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); + bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy); } static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); + return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy); } static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) @@ -926,18 +932,18 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); if (intel_power_well_refcount(power_well) > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); + bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); if (intel_power_well_refcount(power_well) > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); + bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); if (IS_GEMINILAKE(dev_priv)) { power_well = lookup_power_well(dev_priv, GLK_DISP_PW_DPIO_CMN_C); if (intel_power_well_refcount(power_well) > 0) bxt_ddi_phy_verify_state(dev_priv, - power_well->desc->bxt.phy); + i915_power_well_instance(power_well)->bxt.phy); } } @@ -1070,7 +1076,7 @@ static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, static void vlv_set_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool enable) { - int pw_idx = power_well->desc->vlv.idx; + int pw_idx = i915_power_well_instance(power_well)->vlv.idx; u32 mask; u32 state; u32 ctrl; @@ -1119,7 +1125,7 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv, static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - int pw_idx = power_well->desc->vlv.idx; + int pw_idx = i915_power_well_instance(power_well)->vlv.idx; bool enabled = false; u32 mask; u32 state; @@ -1413,15 +1419,16 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { + enum i915_power_well_id id = i915_power_well_instance(power_well)->id; enum dpio_phy phy; enum pipe pipe; u32 tmp; drm_WARN_ON_ONCE(&dev_priv->drm, - power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && - power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); + id != VLV_DISP_PW_DPIO_CMN_BC && + id != CHV_DISP_PW_DPIO_CMN_D); - if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { + if (id == VLV_DISP_PW_DPIO_CMN_BC) { pipe = PIPE_A; phy = DPIO_PHY0; } else { @@ -1447,7 +1454,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); - if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { + if (id == VLV_DISP_PW_DPIO_CMN_BC) { tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); tmp |= DPIO_DYNPWRDOWNEN_CH1; vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); @@ -1478,13 +1485,14 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { + enum i915_power_well_id id = i915_power_well_instance(power_well)->id; enum dpio_phy phy; drm_WARN_ON_ONCE(&dev_priv->drm, - power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && - power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); + id != VLV_DISP_PW_DPIO_CMN_BC && + id != CHV_DISP_PW_DPIO_CMN_D); - if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { + if (id == VLV_DISP_PW_DPIO_CMN_BC) { phy = DPIO_PHY0; assert_pll_disabled(dev_priv, PIPE_A); assert_pll_disabled(dev_priv, PIPE_B); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index bd60fb4166e7..d0624642dcb6 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -33,7 +33,7 @@ struct i915_power_well; * wells must be assigned DISP_PW_ID_NONE. */ enum i915_power_well_id { - DISP_PW_ID_NONE, + DISP_PW_ID_NONE = 0, /* must be kept zero */ VLV_DISP_PW_DISP2D, BXT_DISP_PW_DPIO_CMN_A, @@ -49,29 +49,12 @@ enum i915_power_well_id { TGL_DISP_PW_TC_COLD_OFF, }; -struct i915_power_well_desc { +struct i915_power_well_instance { const char *name; const struct i915_power_domain_list { const enum intel_display_power_domain *list; u8 count; } *domain_list; - /* Mask of pipes whose IRQ logic is backed by the pw */ - u16 irq_pipe_mask:4; - u16 always_on:1; - /* - * Instead of waiting for the status bit to ack enables, - * just wait a specific amount of time and then consider - * the well enabled. - */ - u16 fixed_enable_delay:1; - /* The pw is backing the VGA functionality */ - u16 has_vga:1; - u16 has_fuses:1; - /* - * The pw is for an ICL+ TypeC PHY port in - * Thunderbolt mode. - */ - u16 is_tc_tbt:1; /* unique identifier for this power well */ enum i915_power_well_id id; @@ -98,7 +81,32 @@ struct i915_power_well_desc { u8 idx; } hsw; }; +}; + +struct i915_power_well_desc { const struct i915_power_well_ops *ops; + const struct i915_power_well_instance_list { + const struct i915_power_well_instance *list; + u8 count; + } *instances; + + /* Mask of pipes whose IRQ logic is backed by the pw */ + u16 irq_pipe_mask:4; + u16 always_on:1; + /* + * Instead of waiting for the status bit to ack enables, + * just wait a specific amount of time and then consider + * the well enabled. + */ + u16 fixed_enable_delay:1; + /* The pw is backing the VGA functionality */ + u16 has_vga:1; + u16 has_fuses:1; + /* + * The pw is for an ICL+ TypeC PHY port in + * Thunderbolt mode. + */ + u16 is_tc_tbt:1; }; struct i915_power_well { @@ -108,6 +116,8 @@ struct i915_power_well { int count; /* cached hw enabled state */ bool hw_enabled; + /* index into desc->instances->list */ + u8 instance_idx; }; struct i915_power_well *lookup_power_well(struct drm_i915_private *i915, From a6394dbbe21e5966e688dde24cc8fa5d0b44b346 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:50 +0300 Subject: [PATCH 125/219] drm/i915: Allow platforms to share power well descriptors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some power wells - like always-on and skl+/icl+ PW_1 - with the same name, domain list, flags, ops are used by multiple platforms, so allow platforms to reuse the descriptors of such power wells. This change also lets the follow up patches to simplify the DG1/RKL power well definitions, and remove the ADL-S skip_mask special casing. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-11-imre.deak@intel.com --- .../i915/display/intel_display_power_map.c | 281 ++++++++---------- 1 file changed, 122 insertions(+), 159 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index c282e05bfc1a..7babe3f1a362 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -38,9 +38,17 @@ { .name = _name, .domain_list = _domain_list, ## __VA_ARGS__ } +struct i915_power_well_desc_list { + const struct i915_power_well_desc *list; + u8 count; +}; + +#define I915_PW_DESCRIPTORS(x) __LIST(x) + + I915_DECL_PW_DOMAINS(i9xx_pwdoms_always_on, I915_PW_DOMAINS_ALL); -static const struct i915_power_well_desc i9xx_always_on_power_well[] = { +static const struct i915_power_well_desc i9xx_power_wells_always_on[] = { { .instances = &I915_PW_INSTANCES( I915_PW("always-on", &i9xx_pwdoms_always_on), @@ -50,6 +58,10 @@ static const struct i915_power_well_desc i9xx_always_on_power_well[] = { }, }; +static const struct i915_power_well_desc_list i9xx_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), +}; + I915_DECL_PW_DOMAINS(i830_pwdoms_pipes, POWER_DOMAIN_PIPE_A, POWER_DOMAIN_PIPE_B, @@ -59,14 +71,8 @@ I915_DECL_PW_DOMAINS(i830_pwdoms_pipes, POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_INIT); -static const struct i915_power_well_desc i830_power_wells[] = { +static const struct i915_power_well_desc i830_power_wells_main[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { .instances = &I915_PW_INSTANCES( I915_PW("pipes", &i830_pwdoms_pipes), ), @@ -74,6 +80,11 @@ static const struct i915_power_well_desc i830_power_wells[] = { }, }; +static const struct i915_power_well_desc_list i830_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(i830_power_wells_main), +}; + I915_DECL_PW_DOMAINS(hsw_pwdoms_display, POWER_DOMAIN_PIPE_B, POWER_DOMAIN_PIPE_C, @@ -92,14 +103,8 @@ I915_DECL_PW_DOMAINS(hsw_pwdoms_display, POWER_DOMAIN_AUDIO_PLAYBACK, POWER_DOMAIN_INIT); -static const struct i915_power_well_desc hsw_power_wells[] = { +static const struct i915_power_well_desc hsw_power_wells_main[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { .instances = &I915_PW_INSTANCES( I915_PW("display", &hsw_pwdoms_display, .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, @@ -110,6 +115,11 @@ static const struct i915_power_well_desc hsw_power_wells[] = { }, }; +static const struct i915_power_well_desc_list hsw_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(hsw_power_wells_main), +}; + I915_DECL_PW_DOMAINS(bdw_pwdoms_display, POWER_DOMAIN_PIPE_B, POWER_DOMAIN_PIPE_C, @@ -127,14 +137,8 @@ I915_DECL_PW_DOMAINS(bdw_pwdoms_display, POWER_DOMAIN_AUDIO_PLAYBACK, POWER_DOMAIN_INIT); -static const struct i915_power_well_desc bdw_power_wells[] = { +static const struct i915_power_well_desc bdw_power_wells_main[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { .instances = &I915_PW_INSTANCES( I915_PW("display", &bdw_pwdoms_display, .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, @@ -146,6 +150,11 @@ static const struct i915_power_well_desc bdw_power_wells[] = { }, }; +static const struct i915_power_well_desc_list bdw_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(bdw_power_wells_main), +}; + I915_DECL_PW_DOMAINS(vlv_pwdoms_display, POWER_DOMAIN_DISPLAY_CORE, POWER_DOMAIN_PIPE_A, @@ -181,14 +190,8 @@ I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_tx_bc_lanes, POWER_DOMAIN_AUX_C, POWER_DOMAIN_INIT); -static const struct i915_power_well_desc vlv_power_wells[] = { +static const struct i915_power_well_desc vlv_power_wells_main[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { .instances = &I915_PW_INSTANCES( I915_PW("display", &vlv_pwdoms_display, .vlv.idx = PUNIT_PWGT_IDX_DISP2D, @@ -217,6 +220,11 @@ static const struct i915_power_well_desc vlv_power_wells[] = { }, }; +static const struct i915_power_well_desc_list vlv_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(vlv_power_wells_main), +}; + I915_DECL_PW_DOMAINS(chv_pwdoms_display, POWER_DOMAIN_DISPLAY_CORE, POWER_DOMAIN_PIPE_A, @@ -253,14 +261,8 @@ I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_d, POWER_DOMAIN_AUX_D, POWER_DOMAIN_INIT); -static const struct i915_power_well_desc chv_power_wells[] = { +static const struct i915_power_well_desc chv_power_wells_main[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { /* * Pipe A power well is the new disp2d well. Pipe B and C * power wells don't actually exist. Pipe A power well is @@ -283,6 +285,11 @@ static const struct i915_power_well_desc chv_power_wells[] = { }, }; +static const struct i915_power_well_desc_list chv_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(chv_power_wells_main), +}; + #define SKL_PW_2_POWER_DOMAINS \ POWER_DOMAIN_PIPE_B, \ POWER_DOMAIN_PIPE_C, \ @@ -330,14 +337,8 @@ I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_d, POWER_DOMAIN_PORT_DDI_IO_D, POWER_DOMAIN_INIT); -static const struct i915_power_well_desc skl_power_wells[] = { +static const struct i915_power_well_desc skl_power_wells_pw_1[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { /* Handled by the DMC firmware */ .instances = &I915_PW_INSTANCES( I915_PW("PW_1", I915_PW_DOMAINS_NONE, @@ -347,7 +348,11 @@ static const struct i915_power_well_desc skl_power_wells[] = { .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, - }, { + }, +}; + +static const struct i915_power_well_desc skl_power_wells_main[] = { + { /* Handled by the DMC firmware */ .instances = &I915_PW_INSTANCES( I915_PW("MISC_IO", I915_PW_DOMAINS_NONE, @@ -383,6 +388,12 @@ static const struct i915_power_well_desc skl_power_wells[] = { }, }; +static const struct i915_power_well_desc_list skl_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(skl_power_wells_pw_1), + I915_PW_DESCRIPTORS(skl_power_wells_main), +}; + #define BXT_PW_2_POWER_DOMAINS \ POWER_DOMAIN_PIPE_B, \ POWER_DOMAIN_PIPE_C, \ @@ -423,24 +434,8 @@ I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_bc, POWER_DOMAIN_AUX_C, POWER_DOMAIN_INIT); -static const struct i915_power_well_desc bxt_power_wells[] = { +static const struct i915_power_well_desc bxt_power_wells_main[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { - /* Handled by the DMC firmware */ - .instances = &I915_PW_INSTANCES( - I915_PW("PW_1", I915_PW_DOMAINS_NONE, - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .id = SKL_DISP_PW_1), - ), - .ops = &hsw_power_well_ops, - .always_on = true, - .has_fuses = true, - }, { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &bxt_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), @@ -469,6 +464,12 @@ static const struct i915_power_well_desc bxt_power_wells[] = { }, }; +static const struct i915_power_well_desc_list bxt_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(skl_power_wells_pw_1), + I915_PW_DESCRIPTORS(bxt_power_wells_main), +}; + #define GLK_PW_2_POWER_DOMAINS \ POWER_DOMAIN_PIPE_B, \ POWER_DOMAIN_PIPE_C, \ @@ -529,24 +530,8 @@ I915_DECL_PW_DOMAINS(glk_pwdoms_aux_c, POWER_DOMAIN_AUX_C, POWER_DOMAIN_INIT); -static const struct i915_power_well_desc glk_power_wells[] = { +static const struct i915_power_well_desc glk_power_wells_main[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { - /* Handled by the DMC firmware */ - .instances = &I915_PW_INSTANCES( - I915_PW("PW_1", I915_PW_DOMAINS_NONE, - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .id = SKL_DISP_PW_1), - ), - .ops = &hsw_power_well_ops, - .always_on = true, - .has_fuses = true, - }, { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &glk_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), @@ -588,6 +573,12 @@ static const struct i915_power_well_desc glk_power_wells[] = { }, }; +static const struct i915_power_well_desc_list glk_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(skl_power_wells_pw_1), + I915_PW_DESCRIPTORS(glk_power_wells_main), +}; + /* * ICL PW_0/PG_0 domains (HW/DMC control): * - PCI @@ -682,14 +673,8 @@ I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT_D); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT_E); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT_F); -static const struct i915_power_well_desc icl_power_wells[] = { +static const struct i915_power_well_desc icl_power_wells_pw_1[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { /* Handled by the DMC firmware */ .instances = &I915_PW_INSTANCES( I915_PW("PW_1", I915_PW_DOMAINS_NONE, @@ -699,7 +684,11 @@ static const struct i915_power_well_desc icl_power_wells[] = { .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, - }, { + }, +}; + +static const struct i915_power_well_desc icl_power_wells_main[] = { + { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &icl_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), @@ -763,6 +752,12 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }; +static const struct i915_power_well_desc_list icl_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(icl_power_wells_main), +}; + #define TGL_PW_5_POWER_DOMAINS \ POWER_DOMAIN_PIPE_D, \ POWER_DOMAIN_PIPE_PANEL_FITTER_D, \ @@ -868,24 +863,8 @@ I915_DECL_PW_DOMAINS(tgl_pwdoms_tc_cold_off, POWER_DOMAIN_AUX_TBT6, POWER_DOMAIN_TC_COLD_OFF); -static const struct i915_power_well_desc tgl_power_wells[] = { +static const struct i915_power_well_desc tgl_power_wells_main[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { - /* Handled by the DMC firmware */ - .instances = &I915_PW_INSTANCES( - I915_PW("PW_1", I915_PW_DOMAINS_NONE, - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .id = SKL_DISP_PW_1), - ), - .ops = &hsw_power_well_ops, - .always_on = true, - .has_fuses = true, - }, { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &tgl_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), @@ -971,6 +950,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }; +static const struct i915_power_well_desc_list tgl_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(tgl_power_wells_main), +}; + #define RKL_PW_4_POWER_DOMAINS \ POWER_DOMAIN_PIPE_C, \ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ @@ -1025,24 +1010,8 @@ I915_DECL_PW_DOMAINS(rkl_pwdoms_dc_off, POWER_DOMAIN_MODESET, POWER_DOMAIN_INIT); -static const struct i915_power_well_desc rkl_power_wells[] = { +static const struct i915_power_well_desc rkl_power_wells_main[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { - /* Handled by the DMC firmware */ - .instances = &I915_PW_INSTANCES( - I915_PW("PW_1", I915_PW_DOMAINS_NONE, - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .id = SKL_DISP_PW_1), - ), - .ops = &hsw_power_well_ops, - .always_on = true, - .has_fuses = true, - }, { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &rkl_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), @@ -1085,6 +1054,12 @@ static const struct i915_power_well_desc rkl_power_wells[] = { }, }; +static const struct i915_power_well_desc_list rkl_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(rkl_power_wells_main), +}; + /* * DG1 onwards Audio MMIO/VERBS lies in PG0 power well. */ @@ -1117,24 +1092,8 @@ I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_2, POWER_DOMAIN_TRANSCODER_VDSC_PW2, POWER_DOMAIN_INIT); -static const struct i915_power_well_desc dg1_power_wells[] = { +static const struct i915_power_well_desc dg1_power_wells_main[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { - /* Handled by the DMC firmware */ - .instances = &I915_PW_INSTANCES( - I915_PW("PW_1", I915_PW_DOMAINS_NONE, - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .id = SKL_DISP_PW_1), - ), - .ops = &hsw_power_well_ops, - .always_on = true, - .has_fuses = true, - }, { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &dg1_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), @@ -1193,6 +1152,12 @@ static const struct i915_power_well_desc dg1_power_wells[] = { }, }; +static const struct i915_power_well_desc_list dg1_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(dg1_power_wells_main), +}; + /* * XE_LPD Power Domains * @@ -1318,24 +1283,8 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_tc2, POWER_DOMAIN_PORT_DDI_IO_TC2); I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_tc3, POWER_DOMAIN_PORT_DDI_IO_TC3); I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_tc4, POWER_DOMAIN_PORT_DDI_IO_TC4); -static const struct i915_power_well_desc xelpd_power_wells[] = { +static const struct i915_power_well_desc xelpd_power_wells_main[] = { { - .instances = &I915_PW_INSTANCES( - I915_PW("always-on", &i9xx_pwdoms_always_on), - ), - .ops = &i9xx_always_on_power_well_ops, - .always_on = true, - }, { - /* Handled by the DMC firmware */ - .instances = &I915_PW_INSTANCES( - I915_PW("PW_1", I915_PW_DOMAINS_NONE, - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .id = SKL_DISP_PW_1), - ), - .ops = &hsw_power_well_ops, - .always_on = true, - .has_fuses = true, - }, { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &xelpd_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), @@ -1421,6 +1370,12 @@ static const struct i915_power_well_desc xelpd_power_wells[] = { }, }; +static const struct i915_power_well_desc_list xelpd_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(xelpd_power_wells_main), +}; + static void init_power_well_domains(const struct i915_power_well_instance *inst, struct i915_power_well *power_well) { @@ -1439,27 +1394,35 @@ static void init_power_well_domains(const struct i915_power_well_instance *inst, set_bit(inst->domain_list->list[j], power_well->domains.bits); } -#define for_each_power_well_instance(_desc_list, _desc_count, _desc, _inst) \ +#define for_each_power_well_instance_in_desc_list(_desc_list, _desc_count, _desc, _inst) \ for ((_desc) = (_desc_list); (_desc) - (_desc_list) < (_desc_count); (_desc)++) \ for ((_inst) = (_desc)->instances->list; \ (_inst) - (_desc)->instances->list < (_desc)->instances->count; \ (_inst)++) +#define for_each_power_well_instance(_desc_list, _desc_count, _descs, _desc, _inst) \ + for ((_descs) = (_desc_list); \ + (_descs) - (_desc_list) < (_desc_count); \ + (_descs)++) \ + for_each_power_well_instance_in_desc_list((_descs)->list, (_descs)->count, \ + (_desc), (_inst)) + static int __set_power_wells(struct i915_power_domains *power_domains, - const struct i915_power_well_desc *power_well_descs, + const struct i915_power_well_desc_list *power_well_descs, int power_well_descs_sz, u64 skip_mask) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, power_domains); u64 power_well_ids = 0; + const struct i915_power_well_desc_list *desc_list; const struct i915_power_well_desc *desc; const struct i915_power_well_instance *inst; int power_well_count = 0; int plt_idx = 0; - for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc, inst) + for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst) if (!(BIT_ULL(inst->id) & skip_mask)) power_well_count++; @@ -1471,7 +1434,7 @@ __set_power_wells(struct i915_power_domains *power_domains, if (!power_domains->power_wells) return -ENOMEM; - for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc, inst) { + for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst) { struct i915_power_well *pw = &power_domains->power_wells[plt_idx]; enum i915_power_well_id id = inst->id; @@ -1557,7 +1520,7 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains) else if (IS_I830(i915)) return set_power_wells(power_domains, i830_power_wells); else - return set_power_wells(power_domains, i9xx_always_on_power_well); + return set_power_wells(power_domains, i9xx_power_wells); } /** From 13344a9bdd38a8938dad385521ce601cecf4f4f7 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:51 +0300 Subject: [PATCH 126/219] drm/i915: Simplify the DG1 power well descriptors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Simplify the definition of DG1 power wells by reusing the identical RKL DDI/AUX descriptors. This reorders the DG1 DDI/AUX vs. PW4/5 power wells, but this shouldn't make a difference (it is the order on RKL and the DDI/AUX power wells don't have a dependency on PW4/5). Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-12-imre.deak@intel.com --- .../i915/display/intel_display_power_map.c | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 7babe3f1a362..4443cf0015d1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1035,7 +1035,11 @@ static const struct i915_power_well_desc rkl_power_wells_main[] = { .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_C), - }, { + }, +}; + +static const struct i915_power_well_desc rkl_power_wells_ddi_aux[] = { + { .instances = &I915_PW_INSTANCES( I915_PW("DDI_IO_A", &icl_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), I915_PW("DDI_IO_B", &icl_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), @@ -1058,6 +1062,7 @@ static const struct i915_power_well_desc_list rkl_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), I915_PW_DESCRIPTORS(rkl_power_wells_main), + I915_PW_DESCRIPTORS(rkl_power_wells_ddi_aux), }; /* @@ -1117,22 +1122,6 @@ static const struct i915_power_well_desc dg1_power_wells_main[] = { .irq_pipe_mask = BIT(PIPE_B), .has_vga = true, .has_fuses = true, - }, { - .instances = &I915_PW_INSTANCES( - I915_PW("DDI_IO_A", &icl_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), - I915_PW("DDI_IO_B", &icl_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), - I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), - I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), - ), - .ops = &icl_ddi_power_well_ops, - }, { - .instances = &I915_PW_INSTANCES( - I915_PW("AUX_A", &tgl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), - I915_PW("AUX_B", &tgl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), - I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), - I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), - ), - .ops = &icl_aux_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_4", &tgl_pwdoms_pw_4, @@ -1156,6 +1145,7 @@ static const struct i915_power_well_desc_list dg1_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), I915_PW_DESCRIPTORS(dg1_power_wells_main), + I915_PW_DESCRIPTORS(rkl_power_wells_ddi_aux), }; /* From eb6dedcc194a5bdbe7e47ad111771abb4ebdd2b8 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:52 +0300 Subject: [PATCH 127/219] drm/i915: Sanitize the ADL-S power well definition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of the skip_mask special casing of the ADL-S power well descriptors, add a power well descriptor list for ADL-S as well reusing the TGL descriptors, w/o the TC-cold power well. ADL-S doesn't have TypeC PHYs, so a better way would be having ADL-S specific AUX descriptors, but I left changing this for a follow-up. This changes the ordering of the AUX and TC-cold vs. PW_4/5 power wells on TGL and ADL-S, but this shouldn't make a difference (PW_4/5 don't depend on the AUX/TC-cold power wells). Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-13-imre.deak@intel.com --- .../i915/display/intel_display_power_map.c | 69 +++++++++++-------- 1 file changed, 39 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 4443cf0015d1..86d937f8bfe1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -902,12 +902,36 @@ static const struct i915_power_well_desc tgl_power_wells_main[] = { ), .ops = &icl_ddi_power_well_ops, }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_4", &tgl_pwdoms_pw_4, + .hsw.idx = ICL_PW_CTL_IDX_PW_4), + ), + .ops = &hsw_power_well_ops, + .has_fuses = true, + .irq_pipe_mask = BIT(PIPE_C), + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_5", &tgl_pwdoms_pw_5, + .hsw.idx = TGL_PW_CTL_IDX_PW_5), + ), + .ops = &hsw_power_well_ops, + .has_fuses = true, + .irq_pipe_mask = BIT(PIPE_D), + }, +}; + +static const struct i915_power_well_desc tgl_power_wells_tc_cold_off[] = { + { .instances = &I915_PW_INSTANCES( I915_PW("TC_cold_off", &tgl_pwdoms_tc_cold_off, .id = TGL_DISP_PW_TC_COLD_OFF), ), .ops = &tgl_tc_cold_off_ops, - }, { + }, +}; + +static const struct i915_power_well_desc tgl_power_wells_aux[] = { + { .instances = &I915_PW_INSTANCES( I915_PW("AUX_A", &tgl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), I915_PW("AUX_B", &tgl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), @@ -931,22 +955,6 @@ static const struct i915_power_well_desc tgl_power_wells_main[] = { ), .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, - }, { - .instances = &I915_PW_INSTANCES( - I915_PW("PW_4", &tgl_pwdoms_pw_4, - .hsw.idx = ICL_PW_CTL_IDX_PW_4), - ), - .ops = &hsw_power_well_ops, - .has_fuses = true, - .irq_pipe_mask = BIT(PIPE_C), - }, { - .instances = &I915_PW_INSTANCES( - I915_PW("PW_5", &tgl_pwdoms_pw_5, - .hsw.idx = TGL_PW_CTL_IDX_PW_5), - ), - .ops = &hsw_power_well_ops, - .has_fuses = true, - .irq_pipe_mask = BIT(PIPE_D), }, }; @@ -954,6 +962,15 @@ static const struct i915_power_well_desc_list tgl_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), I915_PW_DESCRIPTORS(tgl_power_wells_main), + I915_PW_DESCRIPTORS(tgl_power_wells_tc_cold_off), + I915_PW_DESCRIPTORS(tgl_power_wells_aux), +}; + +static const struct i915_power_well_desc_list adls_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(tgl_power_wells_main), + I915_PW_DESCRIPTORS(tgl_power_wells_aux), }; #define RKL_PW_4_POWER_DOMAINS \ @@ -1400,7 +1417,7 @@ static void init_power_well_domains(const struct i915_power_well_instance *inst, static int __set_power_wells(struct i915_power_domains *power_domains, const struct i915_power_well_desc_list *power_well_descs, - int power_well_descs_sz, u64 skip_mask) + int power_well_descs_sz) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, @@ -1413,8 +1430,7 @@ __set_power_wells(struct i915_power_domains *power_domains, int plt_idx = 0; for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst) - if (!(BIT_ULL(inst->id) & skip_mask)) - power_well_count++; + power_well_count++; power_domains->power_well_count = power_well_count; power_domains->power_wells = @@ -1428,9 +1444,6 @@ __set_power_wells(struct i915_power_domains *power_domains, struct i915_power_well *pw = &power_domains->power_wells[plt_idx]; enum i915_power_well_id id = inst->id; - if (BIT_ULL(id) & skip_mask) - continue; - pw->desc = desc; drm_WARN_ON(&i915->drm, overflows_type(inst - desc->instances->list, pw->instance_idx)); @@ -1451,12 +1464,9 @@ __set_power_wells(struct i915_power_domains *power_domains, return 0; } -#define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ - __set_power_wells(power_domains, __power_well_descs, \ - ARRAY_SIZE(__power_well_descs), skip_mask) - #define set_power_wells(power_domains, __power_well_descs) \ - set_power_wells_mask(power_domains, __power_well_descs, 0) + __set_power_wells(power_domains, __power_well_descs, \ + ARRAY_SIZE(__power_well_descs)) /** * intel_display_power_map_init - initialize power domain -> power well mappings @@ -1485,8 +1495,7 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains) else if (IS_DG1(i915)) return set_power_wells(power_domains, dg1_power_wells); else if (IS_ALDERLAKE_S(i915)) - return set_power_wells_mask(power_domains, tgl_power_wells, - BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); + return set_power_wells(power_domains, adls_power_wells); else if (IS_ROCKETLAKE(i915)) return set_power_wells(power_domains, rkl_power_wells); else if (DISPLAY_VER(i915) == 12) From 979e1b32e0e202197f182ec0abfadecbdd53b1ec Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:53 +0300 Subject: [PATCH 128/219] drm/i915: Sanitize the port -> DDI/AUX power domain mapping for each platform MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Atm the port -> DDI and AUX power domain mapping is specified by relying on the aliasing of the platform specific intel_display_power_domain enum values. For instance D12+ platforms refer to the 'D' port and power domain instances, which doesn't match the bspec terminology, on these platforms the corresponding port is TC1. To make it clear what port/domain the code refers to add a mapping between them which matches the bspec terms on different display versions. This also allows for removing the aliasing in enum values in a follow-up patch. v2: Add the functions to intel_display_power.c, use intel_display_power_ prefix. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-14-imre.deak@intel.com --- drivers/gpu/drm/i915/display/g4x_dp.c | 3 +- drivers/gpu/drm/i915/display/g4x_hdmi.c | 3 +- drivers/gpu/drm/i915/display/intel_ddi.c | 6 +- drivers/gpu/drm/i915/display/intel_display.c | 85 +------- drivers/gpu/drm/i915/display/intel_display.h | 4 +- .../drm/i915/display/intel_display_power.c | 206 ++++++++++++++++++ .../drm/i915/display/intel_display_power.h | 12 + drivers/gpu/drm/i915/display/intel_tc.c | 5 +- 8 files changed, 235 insertions(+), 89 deletions(-) diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 16bb21ad898b..5a957acebfd6 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -13,6 +13,7 @@ #include "intel_connector.h" #include "intel_crtc.h" #include "intel_de.h" +#include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" @@ -1375,7 +1376,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, dig_port->max_lanes = 4; intel_encoder->type = INTEL_OUTPUT_DP; - intel_encoder->power_domain = intel_port_to_power_domain(port); + intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port); if (IS_CHERRYVIEW(dev_priv)) { if (port == PORT_D) intel_encoder->pipe_mask = BIT(PIPE_C); diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 8bfef08b7c43..5fbd2ae95869 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -10,6 +10,7 @@ #include "intel_connector.h" #include "intel_crtc.h" #include "intel_de.h" +#include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dpio_phy.h" #include "intel_fifo_underrun.h" @@ -574,7 +575,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, intel_encoder->shutdown = intel_hdmi_encoder_shutdown; intel_encoder->type = INTEL_OUTPUT_HDMI; - intel_encoder->power_domain = intel_port_to_power_domain(port); + intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port); intel_encoder->port = port; if (IS_CHERRYVIEW(dev_priv)) { if (port == PORT_D) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 01463c4711d3..d9f238edf547 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -40,6 +40,7 @@ #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" +#include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" @@ -4364,7 +4365,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->get_power_domains = intel_ddi_get_power_domains; encoder->type = INTEL_OUTPUT_DDI; - encoder->power_domain = intel_port_to_power_domain(port); + encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port); encoder->port = port; encoder->cloneable = 0; encoder->pipe_mask = ~0; @@ -4492,8 +4493,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) } drm_WARN_ON(&dev_priv->drm, port > PORT_I); - dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_IO_A + - port - PORT_A; + dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port); if (init_dp) { if (!intel_ddi_init_dp_connector(dig_port)) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 9cd35f4a25df..0ddfce21a828 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -51,6 +51,7 @@ #include "display/intel_crt.h" #include "display/intel_ddi.h" #include "display/intel_display_debugfs.h" +#include "display/intel_display_power.h" #include "display/intel_dp.h" #include "display/intel_dp_mst.h" #include "display/intel_dpll.h" @@ -2157,91 +2158,15 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) return TC_PORT_1 + port - PORT_C; } -enum intel_display_power_domain intel_port_to_power_domain(enum port port) -{ - switch (port) { - case PORT_A: - return POWER_DOMAIN_PORT_DDI_LANES_A; - case PORT_B: - return POWER_DOMAIN_PORT_DDI_LANES_B; - case PORT_C: - return POWER_DOMAIN_PORT_DDI_LANES_C; - case PORT_D: - return POWER_DOMAIN_PORT_DDI_LANES_D; - case PORT_E: - return POWER_DOMAIN_PORT_DDI_LANES_E; - case PORT_F: - return POWER_DOMAIN_PORT_DDI_LANES_F; - case PORT_G: - return POWER_DOMAIN_PORT_DDI_LANES_G; - case PORT_H: - return POWER_DOMAIN_PORT_DDI_LANES_H; - case PORT_I: - return POWER_DOMAIN_PORT_DDI_LANES_I; - default: - MISSING_CASE(port); - return POWER_DOMAIN_PORT_OTHER; - } -} - enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port) { - if (intel_tc_port_in_tbt_alt_mode(dig_port)) { - switch (dig_port->aux_ch) { - case AUX_CH_C: - return POWER_DOMAIN_AUX_TBT_C; - case AUX_CH_D: - return POWER_DOMAIN_AUX_TBT_D; - case AUX_CH_E: - return POWER_DOMAIN_AUX_TBT_E; - case AUX_CH_F: - return POWER_DOMAIN_AUX_TBT_F; - case AUX_CH_G: - return POWER_DOMAIN_AUX_TBT_G; - case AUX_CH_H: - return POWER_DOMAIN_AUX_TBT_H; - case AUX_CH_I: - return POWER_DOMAIN_AUX_TBT_I; - default: - MISSING_CASE(dig_port->aux_ch); - return POWER_DOMAIN_AUX_TBT_C; - } - } + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - return intel_legacy_aux_to_power_domain(dig_port->aux_ch); -} + if (intel_tc_port_in_tbt_alt_mode(dig_port)) + return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch); -/* - * Converts aux_ch to power_domain without caring about TBT ports for that use - * intel_aux_power_domain() - */ -enum intel_display_power_domain -intel_legacy_aux_to_power_domain(enum aux_ch aux_ch) -{ - switch (aux_ch) { - case AUX_CH_A: - return POWER_DOMAIN_AUX_A; - case AUX_CH_B: - return POWER_DOMAIN_AUX_B; - case AUX_CH_C: - return POWER_DOMAIN_AUX_C; - case AUX_CH_D: - return POWER_DOMAIN_AUX_D; - case AUX_CH_E: - return POWER_DOMAIN_AUX_E; - case AUX_CH_F: - return POWER_DOMAIN_AUX_F; - case AUX_CH_G: - return POWER_DOMAIN_AUX_G; - case AUX_CH_H: - return POWER_DOMAIN_AUX_H; - case AUX_CH_I: - return POWER_DOMAIN_AUX_I; - default: - MISSING_CASE(aux_ch); - return POWER_DOMAIN_AUX_A; - } + return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); } static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 867fa248f042..187910d94ec6 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -635,11 +635,9 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); -enum intel_display_power_domain intel_port_to_power_domain(enum port port); +enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port); enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port); -enum intel_display_power_domain -intel_legacy_aux_to_power_domain(enum aux_ch aux_ch); void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 69f66da00705..b417ca4e5c58 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -2256,3 +2256,209 @@ void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m mutex_unlock(&power_domains->lock); } + +struct intel_ddi_port_domains { + enum port port_start; + enum port port_end; + enum aux_ch aux_ch_start; + enum aux_ch aux_ch_end; + + enum intel_display_power_domain ddi_lanes; + enum intel_display_power_domain ddi_io; + enum intel_display_power_domain aux_legacy_usbc; + enum intel_display_power_domain aux_tbt; +}; + +static const struct intel_ddi_port_domains +i9xx_port_domains[] = { + { + .port_start = PORT_A, + .port_end = PORT_F, + .aux_ch_start = AUX_CH_A, + .aux_ch_end = AUX_CH_F, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, + .aux_legacy_usbc = POWER_DOMAIN_AUX_A, + .aux_tbt = POWER_DOMAIN_INVALID, + }, +}; + +static const struct intel_ddi_port_domains +d11_port_domains[] = { + { + .port_start = PORT_A, + .port_end = PORT_B, + .aux_ch_start = AUX_CH_A, + .aux_ch_end = AUX_CH_B, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, + .aux_legacy_usbc = POWER_DOMAIN_AUX_A, + .aux_tbt = POWER_DOMAIN_INVALID, + }, { + .port_start = PORT_C, + .port_end = PORT_F, + .aux_ch_start = AUX_CH_C, + .aux_ch_end = AUX_CH_F, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C, + .aux_legacy_usbc = POWER_DOMAIN_AUX_C, + .aux_tbt = POWER_DOMAIN_AUX_TBT_C, + }, +}; + +static const struct intel_ddi_port_domains +d12_port_domains[] = { + { + .port_start = PORT_A, + .port_end = PORT_C, + .aux_ch_start = AUX_CH_A, + .aux_ch_end = AUX_CH_C, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, + .aux_legacy_usbc = POWER_DOMAIN_AUX_A, + .aux_tbt = POWER_DOMAIN_INVALID, + }, { + .port_start = PORT_TC1, + .port_end = PORT_TC6, + .aux_ch_start = AUX_CH_USBC1, + .aux_ch_end = AUX_CH_USBC6, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, + .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, + .aux_tbt = POWER_DOMAIN_AUX_TBT1, + }, +}; + +static const struct intel_ddi_port_domains +d13_port_domains[] = { + { + .port_start = PORT_A, + .port_end = PORT_C, + .aux_ch_start = AUX_CH_A, + .aux_ch_end = AUX_CH_C, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, + .aux_legacy_usbc = POWER_DOMAIN_AUX_A, + .aux_tbt = POWER_DOMAIN_INVALID, + }, { + .port_start = PORT_TC1, + .port_end = PORT_TC4, + .aux_ch_start = AUX_CH_USBC1, + .aux_ch_end = AUX_CH_USBC4, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, + .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, + .aux_tbt = POWER_DOMAIN_AUX_TBT1, + }, { + .port_start = PORT_D_XELPD, + .port_end = PORT_E_XELPD, + .aux_ch_start = AUX_CH_D_XELPD, + .aux_ch_end = AUX_CH_E_XELPD, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D_XELPD, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D_XELPD, + .aux_legacy_usbc = POWER_DOMAIN_AUX_D_XELPD, + .aux_tbt = POWER_DOMAIN_INVALID, + }, +}; + +static void +intel_port_domains_for_platform(struct drm_i915_private *i915, + const struct intel_ddi_port_domains **domains, + int *domains_size) +{ + if (DISPLAY_VER(i915) >= 13) { + *domains = d13_port_domains; + *domains_size = ARRAY_SIZE(d13_port_domains); + } else if (DISPLAY_VER(i915) >= 12) { + *domains = d12_port_domains; + *domains_size = ARRAY_SIZE(d12_port_domains); + } else if (DISPLAY_VER(i915) >= 11) { + *domains = d11_port_domains; + *domains_size = ARRAY_SIZE(d11_port_domains); + } else { + *domains = i9xx_port_domains; + *domains_size = ARRAY_SIZE(i9xx_port_domains); + } +} + +static const struct intel_ddi_port_domains * +intel_port_domains_for_port(struct drm_i915_private *i915, enum port port) +{ + const struct intel_ddi_port_domains *domains; + int domains_size; + int i; + + intel_port_domains_for_platform(i915, &domains, &domains_size); + for (i = 0; i < domains_size; i++) + if (port >= domains[i].port_start && port <= domains[i].port_end) + return &domains[i]; + + return NULL; +} + +enum intel_display_power_domain +intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port) +{ + const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); + + if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID) + return POWER_DOMAIN_PORT_DDI_IO_A; + + return domains->ddi_io + port - domains->port_start; +} + +enum intel_display_power_domain +intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port) +{ + const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); + + if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID) + return POWER_DOMAIN_PORT_DDI_LANES_A; + + return domains->ddi_lanes + port - domains->port_start; +} + +static const struct intel_ddi_port_domains * +intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch) +{ + const struct intel_ddi_port_domains *domains; + int domains_size; + int i; + + intel_port_domains_for_platform(i915, &domains, &domains_size); + for (i = 0; i < domains_size; i++) + if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end) + return &domains[i]; + + return NULL; +} + +enum intel_display_power_domain +intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) +{ + const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); + + if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID) + return POWER_DOMAIN_AUX_A; + + return domains->aux_legacy_usbc + aux_ch - domains->aux_ch_start; +} + +enum intel_display_power_domain +intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) +{ + const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); + + if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID) + return POWER_DOMAIN_AUX_TBT1; + + return domains->aux_tbt + aux_ch - domains->aux_ch_start; +} diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 66fef12ef3db..2ea30a4cfaa8 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -8,8 +8,10 @@ #include "intel_runtime_pm.h" +enum aux_ch; enum dpio_channel; enum dpio_phy; +enum port; struct drm_i915_private; struct i915_power_well; struct intel_encoder; @@ -130,6 +132,7 @@ enum intel_display_power_domain { POWER_DOMAIN_INIT, POWER_DOMAIN_NUM, + POWER_DOMAIN_INVALID = POWER_DOMAIN_NUM, }; #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) @@ -266,6 +269,15 @@ intel_display_power_put_all_in_set(struct drm_i915_private *i915, void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m); +enum intel_display_power_domain +intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port); +enum intel_display_power_domain +intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port); +enum intel_display_power_domain +intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch); +enum intel_display_power_domain +intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch); + /* * FIXME: We should probably switch this to a 0-based scheme to be consistent * with how we now name/number DBUF_CTL instances. diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index fc037c027ea5..b8b822ea3755 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "i915_reg.h" #include "intel_display.h" +#include "intel_display_power_map.h" #include "intel_display_types.h" #include "intel_dp_mst.h" #include "intel_tc.h" @@ -61,10 +62,12 @@ bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port) static enum intel_display_power_domain tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode) { + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port)) return POWER_DOMAIN_TC_COLD_OFF; - return intel_legacy_aux_to_power_domain(dig_port->aux_ch); + return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); } static intel_wakeref_t From c97bbab02ad7d174b50541864bfa45cf1e4ee14f Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:54 +0300 Subject: [PATCH 129/219] drm/i915: Remove the aliasing of power domain enum values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Aliasing the intel_display_power_domain enum values was required because of the u64 power domain mask size limit. This makes the dmesg/debugfs printouts of the domain names somewhat unclear, for instance domain names for port D are shown on D12+ platforms where the corresponding port is called TC1. Make this clearer by removing the aliasing which is possible after a previous patch converting the mask to a bitmap. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-15-imre.deak@intel.com --- .../drm/i915/display/intel_display_power.c | 84 +++++++++++++------ .../drm/i915/display/intel_display_power.h | 26 ++---- 2 files changed, 67 insertions(+), 43 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index b417ca4e5c58..f1f4d877a975 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -80,12 +80,22 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "PORT_DDI_LANES_E"; case POWER_DOMAIN_PORT_DDI_LANES_F: return "PORT_DDI_LANES_F"; - case POWER_DOMAIN_PORT_DDI_LANES_G: - return "PORT_DDI_LANES_G"; - case POWER_DOMAIN_PORT_DDI_LANES_H: - return "PORT_DDI_LANES_H"; - case POWER_DOMAIN_PORT_DDI_LANES_I: - return "PORT_DDI_LANES_I"; + case POWER_DOMAIN_PORT_DDI_LANES_TC1: + return "PORT_DDI_LANES_TC1"; + case POWER_DOMAIN_PORT_DDI_LANES_TC2: + return "PORT_DDI_LANES_TC2"; + case POWER_DOMAIN_PORT_DDI_LANES_TC3: + return "PORT_DDI_LANES_TC3"; + case POWER_DOMAIN_PORT_DDI_LANES_TC4: + return "PORT_DDI_LANES_TC4"; + case POWER_DOMAIN_PORT_DDI_LANES_TC5: + return "PORT_DDI_LANES_TC5"; + case POWER_DOMAIN_PORT_DDI_LANES_TC6: + return "PORT_DDI_LANES_TC6"; + case POWER_DOMAIN_PORT_DDI_LANES_D_XELPD: + return "PORT_DDI_LANES_D_XELPD"; + case POWER_DOMAIN_PORT_DDI_LANES_E_XELPD: + return "PORT_DDI_LANES_E_XELPD"; case POWER_DOMAIN_PORT_DDI_IO_A: return "PORT_DDI_IO_A"; case POWER_DOMAIN_PORT_DDI_IO_B: @@ -98,12 +108,22 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "PORT_DDI_IO_E"; case POWER_DOMAIN_PORT_DDI_IO_F: return "PORT_DDI_IO_F"; - case POWER_DOMAIN_PORT_DDI_IO_G: - return "PORT_DDI_IO_G"; - case POWER_DOMAIN_PORT_DDI_IO_H: - return "PORT_DDI_IO_H"; - case POWER_DOMAIN_PORT_DDI_IO_I: - return "PORT_DDI_IO_I"; + case POWER_DOMAIN_PORT_DDI_IO_TC1: + return "PORT_DDI_IO_TC1"; + case POWER_DOMAIN_PORT_DDI_IO_TC2: + return "PORT_DDI_IO_TC2"; + case POWER_DOMAIN_PORT_DDI_IO_TC3: + return "PORT_DDI_IO_TC3"; + case POWER_DOMAIN_PORT_DDI_IO_TC4: + return "PORT_DDI_IO_TC4"; + case POWER_DOMAIN_PORT_DDI_IO_TC5: + return "PORT_DDI_IO_TC5"; + case POWER_DOMAIN_PORT_DDI_IO_TC6: + return "PORT_DDI_IO_TC6"; + case POWER_DOMAIN_PORT_DDI_IO_D_XELPD: + return "PORT_DDI_IO_D_XELPD"; + case POWER_DOMAIN_PORT_DDI_IO_E_XELPD: + return "PORT_DDI_IO_E_XELPD"; case POWER_DOMAIN_PORT_DSI: return "PORT_DSI"; case POWER_DOMAIN_PORT_CRT: @@ -128,12 +148,22 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "AUX_E"; case POWER_DOMAIN_AUX_F: return "AUX_F"; - case POWER_DOMAIN_AUX_G: - return "AUX_G"; - case POWER_DOMAIN_AUX_H: - return "AUX_H"; - case POWER_DOMAIN_AUX_I: - return "AUX_I"; + case POWER_DOMAIN_AUX_USBC1: + return "AUX_USBC1"; + case POWER_DOMAIN_AUX_USBC2: + return "AUX_USBC2"; + case POWER_DOMAIN_AUX_USBC3: + return "AUX_USBC3"; + case POWER_DOMAIN_AUX_USBC4: + return "AUX_USBC4"; + case POWER_DOMAIN_AUX_USBC5: + return "AUX_USBC5"; + case POWER_DOMAIN_AUX_USBC6: + return "AUX_USBC6"; + case POWER_DOMAIN_AUX_D_XELPD: + return "AUX_D_XELPD"; + case POWER_DOMAIN_AUX_E_XELPD: + return "AUX_E_XELPD"; case POWER_DOMAIN_AUX_IO_A: return "AUX_IO_A"; case POWER_DOMAIN_AUX_TBT_C: @@ -144,12 +174,18 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "AUX_TBT_E"; case POWER_DOMAIN_AUX_TBT_F: return "AUX_TBT_F"; - case POWER_DOMAIN_AUX_TBT_G: - return "AUX_TBT_G"; - case POWER_DOMAIN_AUX_TBT_H: - return "AUX_TBT_H"; - case POWER_DOMAIN_AUX_TBT_I: - return "AUX_TBT_I"; + case POWER_DOMAIN_AUX_TBT1: + return "AUX_TBT1"; + case POWER_DOMAIN_AUX_TBT2: + return "AUX_TBT2"; + case POWER_DOMAIN_AUX_TBT3: + return "AUX_TBT3"; + case POWER_DOMAIN_AUX_TBT4: + return "AUX_TBT4"; + case POWER_DOMAIN_AUX_TBT5: + return "AUX_TBT5"; + case POWER_DOMAIN_AUX_TBT6: + return "AUX_TBT6"; case POWER_DOMAIN_GMBUS: return "GMBUS"; case POWER_DOMAIN_INIT: diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 2ea30a4cfaa8..b58c5bada6d8 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -48,18 +48,15 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_DDI_LANES_D, POWER_DOMAIN_PORT_DDI_LANES_E, POWER_DOMAIN_PORT_DDI_LANES_F, - POWER_DOMAIN_PORT_DDI_LANES_G, - POWER_DOMAIN_PORT_DDI_LANES_H, - POWER_DOMAIN_PORT_DDI_LANES_I, - POWER_DOMAIN_PORT_DDI_LANES_TC1 = POWER_DOMAIN_PORT_DDI_LANES_D, /* tgl+ */ + POWER_DOMAIN_PORT_DDI_LANES_TC1, POWER_DOMAIN_PORT_DDI_LANES_TC2, POWER_DOMAIN_PORT_DDI_LANES_TC3, POWER_DOMAIN_PORT_DDI_LANES_TC4, POWER_DOMAIN_PORT_DDI_LANES_TC5, POWER_DOMAIN_PORT_DDI_LANES_TC6, - POWER_DOMAIN_PORT_DDI_LANES_D_XELPD = POWER_DOMAIN_PORT_DDI_LANES_TC5, /* XELPD */ + POWER_DOMAIN_PORT_DDI_LANES_D_XELPD, POWER_DOMAIN_PORT_DDI_LANES_E_XELPD, POWER_DOMAIN_PORT_DDI_IO_A, @@ -68,18 +65,15 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_DDI_IO_D, POWER_DOMAIN_PORT_DDI_IO_E, POWER_DOMAIN_PORT_DDI_IO_F, - POWER_DOMAIN_PORT_DDI_IO_G, - POWER_DOMAIN_PORT_DDI_IO_H, - POWER_DOMAIN_PORT_DDI_IO_I, - POWER_DOMAIN_PORT_DDI_IO_TC1 = POWER_DOMAIN_PORT_DDI_IO_D, /* tgl+ */ + POWER_DOMAIN_PORT_DDI_IO_TC1, POWER_DOMAIN_PORT_DDI_IO_TC2, POWER_DOMAIN_PORT_DDI_IO_TC3, POWER_DOMAIN_PORT_DDI_IO_TC4, POWER_DOMAIN_PORT_DDI_IO_TC5, POWER_DOMAIN_PORT_DDI_IO_TC6, - POWER_DOMAIN_PORT_DDI_IO_D_XELPD = POWER_DOMAIN_PORT_DDI_IO_TC5, /* XELPD */ + POWER_DOMAIN_PORT_DDI_IO_D_XELPD, POWER_DOMAIN_PORT_DDI_IO_E_XELPD, POWER_DOMAIN_PORT_DSI, @@ -94,18 +88,15 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_D, POWER_DOMAIN_AUX_E, POWER_DOMAIN_AUX_F, - POWER_DOMAIN_AUX_G, - POWER_DOMAIN_AUX_H, - POWER_DOMAIN_AUX_I, - POWER_DOMAIN_AUX_USBC1 = POWER_DOMAIN_AUX_D, /* tgl+ */ + POWER_DOMAIN_AUX_USBC1, POWER_DOMAIN_AUX_USBC2, POWER_DOMAIN_AUX_USBC3, POWER_DOMAIN_AUX_USBC4, POWER_DOMAIN_AUX_USBC5, POWER_DOMAIN_AUX_USBC6, - POWER_DOMAIN_AUX_D_XELPD = POWER_DOMAIN_AUX_USBC5, /* XELPD */ + POWER_DOMAIN_AUX_D_XELPD, POWER_DOMAIN_AUX_E_XELPD, POWER_DOMAIN_AUX_IO_A, @@ -113,11 +104,8 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_TBT_D, POWER_DOMAIN_AUX_TBT_E, POWER_DOMAIN_AUX_TBT_F, - POWER_DOMAIN_AUX_TBT_G, - POWER_DOMAIN_AUX_TBT_H, - POWER_DOMAIN_AUX_TBT_I, - POWER_DOMAIN_AUX_TBT1 = POWER_DOMAIN_AUX_TBT_D, /* tgl+ */ + POWER_DOMAIN_AUX_TBT1, POWER_DOMAIN_AUX_TBT2, POWER_DOMAIN_AUX_TBT3, POWER_DOMAIN_AUX_TBT4, From e20b77c14bc491fbaf7ad1bba4b40de6581cb8e0 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:55 +0300 Subject: [PATCH 130/219] drm/i915: Remove the ICL specific TBT power domains MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The spec calls the ICL TBT AUX power well instances TBT1-4 (similarly to all later platforms), align the power domain names with the spec. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-16-imre.deak@intel.com --- .../gpu/drm/i915/display/intel_display_power.c | 10 +--------- .../gpu/drm/i915/display/intel_display_power.h | 4 ---- .../drm/i915/display/intel_display_power_map.c | 16 ++++++++-------- 3 files changed, 9 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index f1f4d877a975..8b7b7a87df03 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -166,14 +166,6 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "AUX_E_XELPD"; case POWER_DOMAIN_AUX_IO_A: return "AUX_IO_A"; - case POWER_DOMAIN_AUX_TBT_C: - return "AUX_TBT_C"; - case POWER_DOMAIN_AUX_TBT_D: - return "AUX_TBT_D"; - case POWER_DOMAIN_AUX_TBT_E: - return "AUX_TBT_E"; - case POWER_DOMAIN_AUX_TBT_F: - return "AUX_TBT_F"; case POWER_DOMAIN_AUX_TBT1: return "AUX_TBT1"; case POWER_DOMAIN_AUX_TBT2: @@ -2341,7 +2333,7 @@ d11_port_domains[] = { .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C, .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C, .aux_legacy_usbc = POWER_DOMAIN_AUX_C, - .aux_tbt = POWER_DOMAIN_AUX_TBT_C, + .aux_tbt = POWER_DOMAIN_AUX_TBT1, }, }; diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index b58c5bada6d8..e04b2ff7b4b9 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -100,10 +100,6 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_E_XELPD, POWER_DOMAIN_AUX_IO_A, - POWER_DOMAIN_AUX_TBT_C, - POWER_DOMAIN_AUX_TBT_D, - POWER_DOMAIN_AUX_TBT_E, - POWER_DOMAIN_AUX_TBT_F, POWER_DOMAIN_AUX_TBT1, POWER_DOMAIN_AUX_TBT2, diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 86d937f8bfe1..d9cf3d3bc02e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -622,10 +622,10 @@ I915_DECL_PW_DOMAINS(icl_pwdoms_pw_4, POWER_DOMAIN_AUX_D, \ POWER_DOMAIN_AUX_E, \ POWER_DOMAIN_AUX_F, \ - POWER_DOMAIN_AUX_TBT_C, \ - POWER_DOMAIN_AUX_TBT_D, \ - POWER_DOMAIN_AUX_TBT_E, \ - POWER_DOMAIN_AUX_TBT_F + POWER_DOMAIN_AUX_TBT1, \ + POWER_DOMAIN_AUX_TBT2, \ + POWER_DOMAIN_AUX_TBT3, \ + POWER_DOMAIN_AUX_TBT4 I915_DECL_PW_DOMAINS(icl_pwdoms_pw_3, ICL_PW_3_POWER_DOMAINS, @@ -668,10 +668,10 @@ I915_DECL_PW_DOMAINS(icl_pwdoms_aux_c, POWER_DOMAIN_AUX_C); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_d, POWER_DOMAIN_AUX_D); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_e, POWER_DOMAIN_AUX_E); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_f, POWER_DOMAIN_AUX_F); -I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt1, POWER_DOMAIN_AUX_TBT_C); -I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT_D); -I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT_E); -I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT_F); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt1, POWER_DOMAIN_AUX_TBT1); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT2); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT3); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT4); static const struct i915_power_well_desc icl_power_wells_pw_1[] = { { From 2431f38c17a6976e2826b1af552dc0ad0a417a3c Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:56 +0300 Subject: [PATCH 131/219] drm/i915: Remove duplicate DDI/AUX power domain mappings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The DDI and AUX domain -> power well mappings are identical for a few platforms/power well instances, reuse the mappings of earlier platforms for these removing the duplicate mapping of new platforms. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-17-imre.deak@intel.com --- .../i915/display/intel_display_power_map.c | 89 +++++++------------ 1 file changed, 31 insertions(+), 58 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index d9cf3d3bc02e..d647fb5da6b4 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -653,9 +653,6 @@ I915_DECL_PW_DOMAINS(icl_pwdoms_dc_off, POWER_DOMAIN_DC_OFF, POWER_DOMAIN_INIT); -I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_a, POWER_DOMAIN_PORT_DDI_IO_A); -I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_b, POWER_DOMAIN_PORT_DDI_IO_B); -I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_c, POWER_DOMAIN_PORT_DDI_IO_C); I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_d, POWER_DOMAIN_PORT_DDI_IO_D); I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_e, POWER_DOMAIN_PORT_DDI_IO_E); I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_f, POWER_DOMAIN_PORT_DDI_IO_F); @@ -714,9 +711,9 @@ static const struct i915_power_well_desc icl_power_wells_main[] = { .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( - I915_PW("DDI_IO_A", &icl_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), - I915_PW("DDI_IO_B", &icl_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), - I915_PW("DDI_IO_C", &icl_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), + I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = ICL_PW_CTL_IDX_DDI_D), I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = ICL_PW_CTL_IDX_DDI_E), I915_PW("DDI_IO_F", &icl_pwdoms_ddi_io_f, .hsw.idx = ICL_PW_CTL_IDX_DDI_F), @@ -828,12 +825,6 @@ I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc4, POWER_DOMAIN_PORT_DDI_IO_TC4); I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc5, POWER_DOMAIN_PORT_DDI_IO_TC5); I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc6, POWER_DOMAIN_PORT_DDI_IO_TC6); -I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_a, - POWER_DOMAIN_AUX_A, - POWER_DOMAIN_AUX_IO_A); -I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_b, POWER_DOMAIN_AUX_B); -I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_c, POWER_DOMAIN_AUX_C); - I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc1, POWER_DOMAIN_AUX_USBC1); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc2, POWER_DOMAIN_AUX_USBC2); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc3, POWER_DOMAIN_AUX_USBC3); @@ -841,10 +832,6 @@ I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc4, POWER_DOMAIN_AUX_USBC4); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc5, POWER_DOMAIN_AUX_USBC5); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc6, POWER_DOMAIN_AUX_USBC6); -I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt1, POWER_DOMAIN_AUX_TBT1); -I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT2); -I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT3); -I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT4); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt5, POWER_DOMAIN_AUX_TBT5); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt6, POWER_DOMAIN_AUX_TBT6); @@ -890,9 +877,9 @@ static const struct i915_power_well_desc tgl_power_wells_main[] = { .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( - I915_PW("DDI_IO_A", &icl_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), - I915_PW("DDI_IO_B", &icl_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), - I915_PW("DDI_IO_C", &icl_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), + I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3), @@ -933,9 +920,9 @@ static const struct i915_power_well_desc tgl_power_wells_tc_cold_off[] = { static const struct i915_power_well_desc tgl_power_wells_aux[] = { { .instances = &I915_PW_INSTANCES( - I915_PW("AUX_A", &tgl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), - I915_PW("AUX_B", &tgl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), - I915_PW("AUX_C", &tgl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), + I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), + I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3), @@ -946,10 +933,10 @@ static const struct i915_power_well_desc tgl_power_wells_aux[] = { .ops = &icl_aux_power_well_ops, }, { .instances = &I915_PW_INSTANCES( - I915_PW("AUX_TBT1", &tgl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1), - I915_PW("AUX_TBT2", &tgl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2), - I915_PW("AUX_TBT3", &tgl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3), - I915_PW("AUX_TBT4", &tgl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4), + I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1), + I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2), + I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3), + I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4), I915_PW("AUX_TBT5", &tgl_pwdoms_aux_tbt5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5), I915_PW("AUX_TBT6", &tgl_pwdoms_aux_tbt6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6), ), @@ -1058,8 +1045,8 @@ static const struct i915_power_well_desc rkl_power_wells_main[] = { static const struct i915_power_well_desc rkl_power_wells_ddi_aux[] = { { .instances = &I915_PW_INSTANCES( - I915_PW("DDI_IO_A", &icl_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), - I915_PW("DDI_IO_B", &icl_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), ), @@ -1273,22 +1260,8 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_dc_off, I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_d_xelpd, POWER_DOMAIN_AUX_D_XELPD); I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_e_xelpd, POWER_DOMAIN_AUX_E_XELPD); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_usbc1, POWER_DOMAIN_AUX_USBC1); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_usbc2, POWER_DOMAIN_AUX_USBC2); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_usbc3, POWER_DOMAIN_AUX_USBC3); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_usbc4, POWER_DOMAIN_AUX_USBC4); - -I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_tbt1, POWER_DOMAIN_AUX_TBT1); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT2); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT3); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT4); - I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_d_xelpd, POWER_DOMAIN_PORT_DDI_IO_D_XELPD); I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_e_xelpd, POWER_DOMAIN_PORT_DDI_IO_E_XELPD); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_tc1, POWER_DOMAIN_PORT_DDI_IO_TC1); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_tc2, POWER_DOMAIN_PORT_DDI_IO_TC2); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_tc3, POWER_DOMAIN_PORT_DDI_IO_TC3); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_tc4, POWER_DOMAIN_PORT_DDI_IO_TC4); static const struct i915_power_well_desc xelpd_power_wells_main[] = { { @@ -1340,37 +1313,37 @@ static const struct i915_power_well_desc xelpd_power_wells_main[] = { .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( - I915_PW("DDI_IO_A", &icl_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), - I915_PW("DDI_IO_B", &icl_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), - I915_PW("DDI_IO_C", &icl_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), + I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), I915_PW("DDI_IO_D_XELPD", &xelpd_pwdoms_ddi_io_d_xelpd, .hsw.idx = XELPD_PW_CTL_IDX_DDI_D), I915_PW("DDI_IO_E_XELPD", &xelpd_pwdoms_ddi_io_e_xelpd, .hsw.idx = XELPD_PW_CTL_IDX_DDI_E), - I915_PW("DDI_IO_TC1", &xelpd_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), - I915_PW("DDI_IO_TC2", &xelpd_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), - I915_PW("DDI_IO_TC3", &xelpd_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3), - I915_PW("DDI_IO_TC4", &xelpd_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4), + I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), + I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), + I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3), + I915_PW("DDI_IO_TC4", &tgl_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4), ), .ops = &icl_ddi_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), - I915_PW("AUX_C", &tgl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), + I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), I915_PW("AUX_D_XELPD", &xelpd_pwdoms_aux_d_xelpd, .hsw.idx = XELPD_PW_CTL_IDX_AUX_D), I915_PW("AUX_E_XELPD", &xelpd_pwdoms_aux_e_xelpd, .hsw.idx = XELPD_PW_CTL_IDX_AUX_E), - I915_PW("AUX_USBC1", &xelpd_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), - I915_PW("AUX_USBC2", &xelpd_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), - I915_PW("AUX_USBC3", &xelpd_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3), - I915_PW("AUX_USBC4", &xelpd_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4), + I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), + I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), + I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3), + I915_PW("AUX_USBC4", &tgl_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4), ), .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, }, { .instances = &I915_PW_INSTANCES( - I915_PW("AUX_TBT1", &xelpd_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1), - I915_PW("AUX_TBT2", &xelpd_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2), - I915_PW("AUX_TBT3", &xelpd_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3), - I915_PW("AUX_TBT4", &xelpd_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4), + I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1), + I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2), + I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3), + I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4), ), .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, From 799da9837d1e9c514da0e67c1f1d23f7e49f45c4 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 15 Apr 2022 00:06:57 +0300 Subject: [PATCH 132/219] drm/i915: Remove the XELPD specific AUX and DDI power domains MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The spec calls the XELPD_D/E ports just D/E, the platform prefix in the domain names was only needed by the port->domain mapping relying on matching enum values for the whole port/domain range (and the corresponding aliasing between the platform specific domain enums). Since a previous patch we can define the port->domain mapping explicitly so do this by reusing the already existing D/E power domain names. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220414210657.1785773-18-imre.deak@intel.com --- .../drm/i915/display/intel_display_power.c | 18 +++------------ .../drm/i915/display/intel_display_power.h | 9 -------- .../i915/display/intel_display_power_map.c | 22 +++++++------------ 3 files changed, 11 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 8b7b7a87df03..1d9bd5808849 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -92,10 +92,6 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "PORT_DDI_LANES_TC5"; case POWER_DOMAIN_PORT_DDI_LANES_TC6: return "PORT_DDI_LANES_TC6"; - case POWER_DOMAIN_PORT_DDI_LANES_D_XELPD: - return "PORT_DDI_LANES_D_XELPD"; - case POWER_DOMAIN_PORT_DDI_LANES_E_XELPD: - return "PORT_DDI_LANES_E_XELPD"; case POWER_DOMAIN_PORT_DDI_IO_A: return "PORT_DDI_IO_A"; case POWER_DOMAIN_PORT_DDI_IO_B: @@ -120,10 +116,6 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "PORT_DDI_IO_TC5"; case POWER_DOMAIN_PORT_DDI_IO_TC6: return "PORT_DDI_IO_TC6"; - case POWER_DOMAIN_PORT_DDI_IO_D_XELPD: - return "PORT_DDI_IO_D_XELPD"; - case POWER_DOMAIN_PORT_DDI_IO_E_XELPD: - return "PORT_DDI_IO_E_XELPD"; case POWER_DOMAIN_PORT_DSI: return "PORT_DSI"; case POWER_DOMAIN_PORT_CRT: @@ -160,10 +152,6 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "AUX_USBC5"; case POWER_DOMAIN_AUX_USBC6: return "AUX_USBC6"; - case POWER_DOMAIN_AUX_D_XELPD: - return "AUX_D_XELPD"; - case POWER_DOMAIN_AUX_E_XELPD: - return "AUX_E_XELPD"; case POWER_DOMAIN_AUX_IO_A: return "AUX_IO_A"; case POWER_DOMAIN_AUX_TBT1: @@ -2390,9 +2378,9 @@ d13_port_domains[] = { .aux_ch_start = AUX_CH_D_XELPD, .aux_ch_end = AUX_CH_E_XELPD, - .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D_XELPD, - .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D_XELPD, - .aux_legacy_usbc = POWER_DOMAIN_AUX_D_XELPD, + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D, + .aux_legacy_usbc = POWER_DOMAIN_AUX_D, .aux_tbt = POWER_DOMAIN_INVALID, }, }; diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index e04b2ff7b4b9..7136ea3f233e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -56,9 +56,6 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_DDI_LANES_TC5, POWER_DOMAIN_PORT_DDI_LANES_TC6, - POWER_DOMAIN_PORT_DDI_LANES_D_XELPD, - POWER_DOMAIN_PORT_DDI_LANES_E_XELPD, - POWER_DOMAIN_PORT_DDI_IO_A, POWER_DOMAIN_PORT_DDI_IO_B, POWER_DOMAIN_PORT_DDI_IO_C, @@ -73,9 +70,6 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_DDI_IO_TC5, POWER_DOMAIN_PORT_DDI_IO_TC6, - POWER_DOMAIN_PORT_DDI_IO_D_XELPD, - POWER_DOMAIN_PORT_DDI_IO_E_XELPD, - POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_OTHER, @@ -96,9 +90,6 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_USBC5, POWER_DOMAIN_AUX_USBC6, - POWER_DOMAIN_AUX_D_XELPD, - POWER_DOMAIN_AUX_E_XELPD, - POWER_DOMAIN_AUX_IO_A, POWER_DOMAIN_AUX_TBT1, diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index d647fb5da6b4..af6f54a26a35 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1207,8 +1207,8 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a, XELPD_PW_C_POWER_DOMAINS, \ XELPD_PW_D_POWER_DOMAINS, \ POWER_DOMAIN_PORT_DDI_LANES_C, \ - POWER_DOMAIN_PORT_DDI_LANES_D_XELPD, \ - POWER_DOMAIN_PORT_DDI_LANES_E_XELPD, \ + POWER_DOMAIN_PORT_DDI_LANES_D, \ + POWER_DOMAIN_PORT_DDI_LANES_E, \ POWER_DOMAIN_PORT_DDI_LANES_TC1, \ POWER_DOMAIN_PORT_DDI_LANES_TC2, \ POWER_DOMAIN_PORT_DDI_LANES_TC3, \ @@ -1216,8 +1216,8 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a, POWER_DOMAIN_VGA, \ POWER_DOMAIN_AUDIO_PLAYBACK, \ POWER_DOMAIN_AUX_C, \ - POWER_DOMAIN_AUX_D_XELPD, \ - POWER_DOMAIN_AUX_E_XELPD, \ + POWER_DOMAIN_AUX_D, \ + POWER_DOMAIN_AUX_E, \ POWER_DOMAIN_AUX_USBC1, \ POWER_DOMAIN_AUX_USBC2, \ POWER_DOMAIN_AUX_USBC3, \ @@ -1257,12 +1257,6 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_dc_off, POWER_DOMAIN_MODESET, POWER_DOMAIN_INIT); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_d_xelpd, POWER_DOMAIN_AUX_D_XELPD); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_aux_e_xelpd, POWER_DOMAIN_AUX_E_XELPD); - -I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_d_xelpd, POWER_DOMAIN_PORT_DDI_IO_D_XELPD); -I915_DECL_PW_DOMAINS(xelpd_pwdoms_ddi_io_e_xelpd, POWER_DOMAIN_PORT_DDI_IO_E_XELPD); - static const struct i915_power_well_desc xelpd_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( @@ -1316,8 +1310,8 @@ static const struct i915_power_well_desc xelpd_power_wells_main[] = { I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), - I915_PW("DDI_IO_D_XELPD", &xelpd_pwdoms_ddi_io_d_xelpd, .hsw.idx = XELPD_PW_CTL_IDX_DDI_D), - I915_PW("DDI_IO_E_XELPD", &xelpd_pwdoms_ddi_io_e_xelpd, .hsw.idx = XELPD_PW_CTL_IDX_DDI_E), + I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = XELPD_PW_CTL_IDX_DDI_D), + I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = XELPD_PW_CTL_IDX_DDI_E), I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3), @@ -1329,8 +1323,8 @@ static const struct i915_power_well_desc xelpd_power_wells_main[] = { I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), - I915_PW("AUX_D_XELPD", &xelpd_pwdoms_aux_d_xelpd, .hsw.idx = XELPD_PW_CTL_IDX_AUX_D), - I915_PW("AUX_E_XELPD", &xelpd_pwdoms_aux_e_xelpd, .hsw.idx = XELPD_PW_CTL_IDX_AUX_E), + I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = XELPD_PW_CTL_IDX_AUX_D), + I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = XELPD_PW_CTL_IDX_AUX_E), I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3), From f5b2cd89d539aa8e16e620eb2d52c905461776bd Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 20 Apr 2022 21:05:41 +0300 Subject: [PATCH 133/219] drm/i915: Fixup merge of the power well refactor patchset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The wrong v2 version of drm/i915: Move per-platform power well hooks to intel_display_power_well.c patch was pushed to drm-intel-next branch instead of v3, fix this up applying the difference between v2 and v3. Signed-off-by: Imre Deak Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20220415082524.1826924-1-imre.deak@intel.com --- .../i915/display/intel_display_power_well.c | 20 +++---------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 000a25ed4a5c..5be18eb94042 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -11,15 +11,15 @@ #include "intel_de.h" #include "intel_display_power_well.h" #include "intel_display_types.h" +#include "intel_dmc.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" -#include "intel_dmc.h" #include "intel_hotplug.h" #include "intel_pcode.h" #include "intel_pm.h" #include "intel_pps.h" -#include "intel_vga.h" #include "intel_tc.h" +#include "intel_vga.h" #include "vlv_sideband.h" #include "vlv_sideband_reg.h" @@ -538,18 +538,6 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, } } -static void -icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); - struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); - - icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); - - hsw_power_well_disable(dev_priv, power_well); -} - static void icl_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) @@ -572,7 +560,7 @@ icl_aux_power_well_disable(struct drm_i915_private *dev_priv, enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); if (intel_phy_is_tc(dev_priv, phy)) - return icl_tc_phy_aux_power_well_disable(dev_priv, power_well); + return hsw_power_well_disable(dev_priv, power_well); else if (IS_ICELAKE(dev_priv)) return icl_combo_phy_aux_power_well_disable(dev_priv, power_well); @@ -1306,8 +1294,6 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, vlv_set_power_well(dev_priv, power_well, false); } -#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) - #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) static void assert_chv_phy_status(struct drm_i915_private *dev_priv) From 8cd9efd1b7fb329681d5a5fdf441c8fc8dc4f5ae Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 11 Apr 2022 23:35:22 -0400 Subject: [PATCH 134/219] drm/ttm: Add common debugfs code for resource managers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drivers duplicate the code required to add debugfs entries for various ttm resource managers. To fix it add common TTM resource manager debugfs code that each driver can reuse. Specific resource managers can overwrite ttm_resource_manager_func::debug to get more information from those debugfs entries. Signed-off-by: Zack Rusin Cc: Huang Rui Cc: David Airlie Cc: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220412033526.369115-2-zack@kde.org Reviewed-by: Christian König --- drivers/gpu/drm/ttm/ttm_resource.c | 34 ++++++++++++++++++++++++++++++ include/drm/ttm/ttm_resource.h | 4 ++++ 2 files changed, 38 insertions(+) diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 7d5a438180e9..65889b3caf50 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -644,3 +644,37 @@ ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, ttm_mem_io_free(bdev, mem); } + +#if defined(CONFIG_DEBUG_FS) + +static int ttm_resource_manager_show(struct seq_file *m, void *unused) +{ + struct ttm_resource_manager *man = + (struct ttm_resource_manager *)m->private; + struct drm_printer p = drm_seq_file_printer(m); + ttm_resource_manager_debug(man, &p); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); + +#endif + +/** + * ttm_resource_manager_create_debugfs - Create debugfs entry for specified + * resource manager. + * @man: The TTM resource manager for which the debugfs stats file be creates + * @parent: debugfs directory in which the file will reside + * @name: The filename to create. + * + * This function setups up a debugfs file that can be used to look + * at debug statistics of the specified ttm_resource_manager. + */ +void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, + struct dentry * parent, + const char *name) +{ +#if defined(CONFIG_DEBUG_FS) + debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); +#endif +} +EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index f82fee18c07f..441653693970 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -381,4 +381,8 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, struct ttm_device *bdev, struct ttm_resource *mem); + +void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, + struct dentry * parent, + const char *name); #endif From af4a25bbe5e7e60ff696ef5c1ec48ab2d51c17c6 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 11 Apr 2022 23:35:23 -0400 Subject: [PATCH 135/219] drm/vmwgfx: Add debugfs entries for various ttm resource managers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the newly added TTM's ability to automatically create debugfs entries for specified placements. This creates debugfs files that can be read to get information about various TTM resource managers which are used by vmwgfx. Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20220412033526.369115-3-zack@kde.org Reviewed-by: Christian König --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 791f9a5f3868..ffd975c14136 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1385,6 +1385,23 @@ static void vmw_remove(struct pci_dev *pdev) vmw_driver_unload(dev); } +static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw) +{ + struct drm_minor *minor = vmw->drm.primary; + struct dentry *root = minor->debugfs_root; + + ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_SYSTEM), + root, "system_ttm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM), + root, "vram_ttm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR), + root, "gmr_ttm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB), + root, "mob_ttm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM), + root, "system_mob_ttm"); +} + static unsigned long vmw_get_unmapped_area(struct file *file, unsigned long uaddr, unsigned long len, unsigned long pgoff, @@ -1632,6 +1649,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_unload; vmw_debugfs_gem_init(vmw); + vmw_debugfs_resource_managers_init(vmw); return 0; out_unload: From 7212d24cec522752dcd5a5f6db9e45c3d87df269 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 11 Apr 2022 23:35:24 -0400 Subject: [PATCH 136/219] drm/amdgpu: Use TTM builtin resource manager debugfs code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switch to using the TTM resource manager debugfs helpers. It's exactly the same functionality but the debugfs code is shared with other drivers. The TTM resource managers need to stay valid for as long as the drm debugfs_root is valid. Signed-off-by: Zack Rusin Cc: Alex Deucher Cc: "Pan, Xinhui" Cc: David Airlie Cc: Daniel Vetter Cc: Felix Kuehling Cc: Nirmoy Das Cc: Thomas Zimmermann Cc: amd-gfx@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20220412033526.369115-4-zack@kde.org Reviewed-by: Christian König --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 86 +++++-------------------- 1 file changed, 16 insertions(+), 70 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 49ffad312d5d..ec26edd4f4d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2161,17 +2161,6 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) #if defined(CONFIG_DEBUG_FS) -static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - TTM_PL_VRAM); - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} - static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) { struct amdgpu_device *adev = (struct amdgpu_device *)m->private; @@ -2179,55 +2168,6 @@ static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) return ttm_pool_debugfs(&adev->mman.bdev.pool, m); } -static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - TTM_PL_TT); - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} - -static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - AMDGPU_PL_GDS); - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} - -static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - AMDGPU_PL_GWS); - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} - -static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - AMDGPU_PL_OA); - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table); -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table); -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table); -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table); -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table); DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool); /* @@ -2437,17 +2377,23 @@ void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size); debugfs_create_file("amdgpu_iomem", 0444, root, adev, &amdgpu_ttm_iomem_fops); - debugfs_create_file("amdgpu_vram_mm", 0444, root, adev, - &amdgpu_mm_vram_table_fops); - debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev, - &amdgpu_mm_tt_table_fops); - debugfs_create_file("amdgpu_gds_mm", 0444, root, adev, - &amdgpu_mm_gds_table_fops); - debugfs_create_file("amdgpu_gws_mm", 0444, root, adev, - &amdgpu_mm_gws_table_fops); - debugfs_create_file("amdgpu_oa_mm", 0444, root, adev, - &amdgpu_mm_oa_table_fops); debugfs_create_file("ttm_page_pool", 0444, root, adev, &amdgpu_ttm_page_pool_fops); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + TTM_PL_VRAM), + root, "amdgpu_vram_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + TTM_PL_TT), + root, "amdgpu_gtt_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_GDS), + root, "amdgpu_gds_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_GWS), + root, "amdgpu_gws_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_OA), + root, "amdgpu_oa_mm"); + #endif } From d0719e09264b2d79f8262a3e9c5d870d29c7b800 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 11 Apr 2022 23:35:25 -0400 Subject: [PATCH 137/219] drm/qxl: Use TTM builtin resource manager debugfs code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switch to using the TTM resource manager debugfs helpers. The functionality is largely the same. The TTM resource managers need to stay valid for as long as the drm debugfs_root is valid. Signed-off-by: Zack Rusin Cc: Dave Airlie Cc: Gerd Hoffmann Cc: Daniel Vetter Cc: virtualization@lists.linux-foundation.org Cc: spice-devel@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20220412033526.369115-5-zack@kde.org Reviewed-by: Christian König --- drivers/gpu/drm/qxl/qxl_ttm.c | 39 ++++++----------------------------- 1 file changed, 6 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 9ba871bd19b1..ee95001e6b5e 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -222,41 +222,14 @@ void qxl_ttm_fini(struct qxl_device *qdev) DRM_INFO("qxl: ttm finalized\n"); } -#define QXL_DEBUGFS_MEM_TYPES 2 - -#if defined(CONFIG_DEBUG_FS) -static int qxl_mm_dump_table(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct ttm_resource_manager *man = (struct ttm_resource_manager *)node->info_ent->data; - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} -#endif - void qxl_ttm_debugfs_init(struct qxl_device *qdev) { #if defined(CONFIG_DEBUG_FS) - static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES]; - static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32]; - unsigned int i; - - for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) { - if (i == 0) - sprintf(qxl_mem_types_names[i], "qxl_mem_mm"); - else - sprintf(qxl_mem_types_names[i], "qxl_surf_mm"); - qxl_mem_types_list[i].name = qxl_mem_types_names[i]; - qxl_mem_types_list[i].show = &qxl_mm_dump_table; - qxl_mem_types_list[i].driver_features = 0; - if (i == 0) - qxl_mem_types_list[i].data = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM); - else - qxl_mem_types_list[i].data = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV); - - } - qxl_debugfs_add_files(qdev, qxl_mem_types_list, i); + ttm_resource_manager_create_debugfs(ttm_manager_type(&qdev->mman.bdev, + TTM_PL_VRAM), + qdev->ddev.primary->debugfs_root, "qxl_mem_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&qdev->mman.bdev, + TTM_PL_PRIV), + qdev->ddev.primary->debugfs_root, "qxl_surf_mm"); #endif } From 40d8d4bd06720aed6c1125bab7296c57de4f1157 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 11 Apr 2022 23:35:26 -0400 Subject: [PATCH 138/219] drm/radeon: Use TTM builtin resource manager debugfs code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switch to using the TTM resource manager debugfs helpers. The functionality is largely the same. The TTM resource managers need to stay valid for as long as the drm debugfs_root is valid. Signed-off-by: Zack Rusin Cc: Alex Deucher Cc: "Pan, Xinhui" Cc: David Airlie Cc: Daniel Vetter Cc: amd-gfx@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20220412033526.369115-6-zack@kde.org Reviewed-by: Christian König --- drivers/gpu/drm/radeon/radeon_ttm.c | 36 +++++------------------------ 1 file changed, 6 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 44594d16611f..d33fec488713 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -781,17 +781,6 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) #if defined(CONFIG_DEBUG_FS) -static int radeon_mm_vram_dump_table_show(struct seq_file *m, void *unused) -{ - struct radeon_device *rdev = (struct radeon_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev, - TTM_PL_VRAM); - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} - static int radeon_ttm_page_pool_show(struct seq_file *m, void *data) { struct radeon_device *rdev = (struct radeon_device *)m->private; @@ -799,19 +788,6 @@ static int radeon_ttm_page_pool_show(struct seq_file *m, void *data) return ttm_pool_debugfs(&rdev->mman.bdev.pool, m); } -static int radeon_mm_gtt_dump_table_show(struct seq_file *m, void *unused) -{ - struct radeon_device *rdev = (struct radeon_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev, - TTM_PL_TT); - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(radeon_mm_vram_dump_table); -DEFINE_SHOW_ATTRIBUTE(radeon_mm_gtt_dump_table); DEFINE_SHOW_ATTRIBUTE(radeon_ttm_page_pool); static int radeon_ttm_vram_open(struct inode *inode, struct file *filep) @@ -930,15 +906,15 @@ static void radeon_ttm_debugfs_init(struct radeon_device *rdev) debugfs_create_file("radeon_vram", 0444, root, rdev, &radeon_ttm_vram_fops); - debugfs_create_file("radeon_gtt", 0444, root, rdev, &radeon_ttm_gtt_fops); - - debugfs_create_file("radeon_vram_mm", 0444, root, rdev, - &radeon_mm_vram_dump_table_fops); - debugfs_create_file("radeon_gtt_mm", 0444, root, rdev, - &radeon_mm_gtt_dump_table_fops); debugfs_create_file("ttm_page_pool", 0444, root, rdev, &radeon_ttm_page_pool_fops); + ttm_resource_manager_create_debugfs(ttm_manager_type(&rdev->mman.bdev, + TTM_PL_VRAM), + root, "radeon_vram_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&rdev->mman.bdev, + TTM_PL_TT), + root, "radeon_gtt_mm"); #endif } From e0f74ed4634d6d662e7dca19115d0da1143a3ec0 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Thu, 7 Apr 2022 03:19:43 -0400 Subject: [PATCH 139/219] i915/gvt: Separate the MMIO tracking table from GVT-g To support the new mdev interfaces and the re-factor patches from Christoph, which moves the GVT-g code into a dedicated module, the GVT-g MMIO tracking table needs to be separated from GVT-g. v9: - Fix a problem might cause kernel panic. - Remove the redaundant definitation of intel_get_device_type(). (Jani) - Sort the list of header reference in intel_gvt_mmio.c (Jani) - Include minimum header insted in intel_gvt_mmio.c (Jani) v8: - Use SPDX header in the intel_gvt_mmio_table.c - Reference the gvt.h with path. (Jani) - Add a missing fix on mmio emulation path during the debug. - Fix a building problem on refreshed gvt-staging branch. (Christoph) v7: - Keep the marcos of device generation in GVT-g. (Christoph, Jani) v6: - Move the mmio_table.c into i915. (Christoph) - Keep init_device_info and related structures in GVT-g. (Christoph) - Refine the callbacks of the iterator. (Christoph) - Move the flags of MMIO register defination to GVT-g. (Chrsitoph) - Move the mmio block handling to GVT-g. v5: - Re-design the mmio table framework. (Christoph) v4: - Fix the errors of patch checking scripts. v3: - Fix the errors when CONFIG_DRM_I915_WERROR is turned on. (Jani) v2: - Implement a mmio table instead of generating it by marco in i915. (Jani) Cc: Christoph Hellwig Cc: Jason Gunthorpe Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Vivi Rodrigo Cc: Zhenyu Wang Cc: Zhi Wang Signed-off-by: Zhi Wang Reviewed-by: Christoph Hellwig Tested-by: Christoph Hellwig Reviewed-by: Zhenyu Wang Acked-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20220407071945.72148-2-zhi.a.wang@intel.com --- drivers/gpu/drm/i915/Makefile | 2 +- drivers/gpu/drm/i915/gvt/gvt.h | 3 +- drivers/gpu/drm/i915/gvt/handlers.c | 1033 ++------------- drivers/gpu/drm/i915/gvt/mmio.h | 1 - drivers/gpu/drm/i915/gvt/reg.h | 9 +- drivers/gpu/drm/i915/intel_gvt.h | 19 + drivers/gpu/drm/i915/intel_gvt_mmio_table.c | 1290 +++++++++++++++++++ 7 files changed, 1458 insertions(+), 899 deletions(-) create mode 100644 drivers/gpu/drm/i915/intel_gvt_mmio_table.c diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 7df74a71d454..a6cab2d9a9d1 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -321,7 +321,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ i915-y += i915_vgpu.o ifeq ($(CONFIG_DRM_I915_GVT),y) -i915-y += intel_gvt.o +i915-y += intel_gvt.o intel_gvt_mmio_table.o include $(src)/gvt/Makefile endif diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 0ebffc327528..bfe07c69cfd2 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -36,6 +36,7 @@ #include #include "i915_drv.h" +#include "intel_gvt.h" #include "debug.h" #include "hypercall.h" @@ -272,7 +273,7 @@ struct intel_gvt_mmio { /* Value of command write of this reg needs to be patched */ #define F_CMD_WRITE_PATCH (1 << 8) - const struct gvt_mmio_block *mmio_block; + struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 520a7e1942f3..b7bab95da3c5 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -101,12 +101,11 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, return NULL; } -static int new_mmio_info(struct intel_gvt *gvt, - u32 offset, u16 flags, u32 size, - u32 addr_mask, u32 ro_mask, u32 device, - gvt_mmio_func read, gvt_mmio_func write) +static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size, + u16 flags, u32 addr_mask, u32 ro_mask, u32 device, + gvt_mmio_func read, gvt_mmio_func write) { - struct intel_gvt_mmio_info *info, *p; + struct intel_gvt_mmio_info *p; u32 start, end, i; if (!intel_gvt_match_device(gvt, device)) @@ -119,32 +118,18 @@ static int new_mmio_info(struct intel_gvt *gvt, end = offset + size; for (i = start; i < end; i += 4) { - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return -ENOMEM; - - info->offset = i; - p = intel_gvt_find_mmio_info(gvt, info->offset); - if (p) { - WARN(1, "dup mmio definition offset %x\n", - info->offset); - kfree(info); - - /* We return -EEXIST here to make GVT-g load fail. - * So duplicated MMIO can be found as soon as - * possible. - */ - return -EEXIST; + p = intel_gvt_find_mmio_info(gvt, i); + if (!p) { + WARN(1, "assign a handler to a non-tracked mmio %x\n", + i); + return -ENODEV; } - - info->ro_mask = ro_mask; - info->device = device; - info->read = read ? read : intel_vgpu_default_mmio_read; - info->write = write ? write : intel_vgpu_default_mmio_write; - gvt->mmio.mmio_attribute[info->offset / 4] = flags; - INIT_HLIST_NODE(&info->node); - hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset); - gvt->mmio.num_tracked_mmio++; + p->ro_mask = ro_mask; + gvt->mmio.mmio_attribute[i / 4] = flags; + if (read) + p->read = read; + if (write) + p->write = write; } return 0; } @@ -2137,15 +2122,12 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, } #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \ - ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \ - f, s, am, rm, d, r, w); \ + ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \ + s, f, am, rm, d, r, w); \ if (ret) \ return ret; \ } while (0) -#define MMIO_D(reg, d) \ - MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL) - #define MMIO_DH(reg, d, r, w) \ MMIO_F(reg, 4, 0, 0, 0, d, r, w) @@ -2170,9 +2152,6 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \ } while (0) -#define MMIO_RING_D(prefix, d) \ - MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL) - #define MMIO_RING_DFH(prefix, d, f, r, w) \ MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w) @@ -2196,7 +2175,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(SDEISR, D_ALL); MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL); @@ -2224,7 +2202,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL); MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL); MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL); - MMIO_D(GEN7_CXT_SIZE, D_ALL); MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL); MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL); @@ -2278,257 +2255,32 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* display */ - MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_D(_MMIO(0x602a0), D_ALL); - - MMIO_D(_MMIO(0x65050), D_ALL); - MMIO_D(_MMIO(0x650b4), D_ALL); - - MMIO_D(_MMIO(0xc4040), D_ALL); - MMIO_D(DERRMR, D_ALL); - - MMIO_D(PIPEDSL(PIPE_A), D_ALL); - MMIO_D(PIPEDSL(PIPE_B), D_ALL); - MMIO_D(PIPEDSL(PIPE_C), D_ALL); - MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL); - MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write); - - MMIO_D(PIPESTAT(PIPE_A), D_ALL); - MMIO_D(PIPESTAT(PIPE_B), D_ALL); - MMIO_D(PIPESTAT(PIPE_C), D_ALL); - MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL); - - MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL); - MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL); - MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL); - MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL); - - MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL); - MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL); - MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL); - MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL); - - MMIO_D(CURCNTR(PIPE_A), D_ALL); - MMIO_D(CURCNTR(PIPE_B), D_ALL); - MMIO_D(CURCNTR(PIPE_C), D_ALL); - - MMIO_D(CURPOS(PIPE_A), D_ALL); - MMIO_D(CURPOS(PIPE_B), D_ALL); - MMIO_D(CURPOS(PIPE_C), D_ALL); - - MMIO_D(CURBASE(PIPE_A), D_ALL); - MMIO_D(CURBASE(PIPE_B), D_ALL); - MMIO_D(CURBASE(PIPE_C), D_ALL); - - MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL); - MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL); - MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL); - - MMIO_D(_MMIO(0x700ac), D_ALL); - MMIO_D(_MMIO(0x710ac), D_ALL); - MMIO_D(_MMIO(0x720ac), D_ALL); - - MMIO_D(_MMIO(0x70090), D_ALL); - MMIO_D(_MMIO(0x70094), D_ALL); - MMIO_D(_MMIO(0x70098), D_ALL); - MMIO_D(_MMIO(0x7009c), D_ALL); - - MMIO_D(DSPCNTR(PIPE_A), D_ALL); - MMIO_D(DSPADDR(PIPE_A), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_A), D_ALL); - MMIO_D(DSPPOS(PIPE_A), D_ALL); - MMIO_D(DSPSIZE(PIPE_A), D_ALL); MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_A), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL); MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(DSPCNTR(PIPE_B), D_ALL); - MMIO_D(DSPADDR(PIPE_B), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_B), D_ALL); - MMIO_D(DSPPOS(PIPE_B), D_ALL); - MMIO_D(DSPSIZE(PIPE_B), D_ALL); MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_B), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL); MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(DSPCNTR(PIPE_C), D_ALL); - MMIO_D(DSPADDR(PIPE_C), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_C), D_ALL); - MMIO_D(DSPPOS(PIPE_C), D_ALL); - MMIO_D(DSPSIZE(PIPE_C), D_ALL); MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_C), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL); MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(SPRCTL(PIPE_A), D_ALL); - MMIO_D(SPRLINOFF(PIPE_A), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_A), D_ALL); - MMIO_D(SPRPOS(PIPE_A), D_ALL); - MMIO_D(SPRSIZE(PIPE_A), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_A), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_A), D_ALL); MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_A), D_ALL); - MMIO_D(SPROFFSET(PIPE_A), D_ALL); - MMIO_D(SPRSCALE(PIPE_A), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL); MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(SPRCTL(PIPE_B), D_ALL); - MMIO_D(SPRLINOFF(PIPE_B), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_B), D_ALL); - MMIO_D(SPRPOS(PIPE_B), D_ALL); - MMIO_D(SPRSIZE(PIPE_B), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_B), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_B), D_ALL); MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_B), D_ALL); - MMIO_D(SPROFFSET(PIPE_B), D_ALL); - MMIO_D(SPRSCALE(PIPE_B), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL); MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(SPRCTL(PIPE_C), D_ALL); - MMIO_D(SPRLINOFF(PIPE_C), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_C), D_ALL); - MMIO_D(SPRPOS(PIPE_C), D_ALL); - MMIO_D(SPRSIZE(PIPE_C), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_C), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_C), D_ALL); MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_C), D_ALL); - MMIO_D(SPROFFSET(PIPE_C), D_ALL); - MMIO_D(SPRSCALE(PIPE_C), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL); MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL, reg50080_mmio_write); - MMIO_D(HTOTAL(TRANSCODER_A), D_ALL); - MMIO_D(HBLANK(TRANSCODER_A), D_ALL); - MMIO_D(HSYNC(TRANSCODER_A), D_ALL); - MMIO_D(VTOTAL(TRANSCODER_A), D_ALL); - MMIO_D(VBLANK(TRANSCODER_A), D_ALL); - MMIO_D(VSYNC(TRANSCODER_A), D_ALL); - MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL); - MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL); - MMIO_D(PIPESRC(TRANSCODER_A), D_ALL); - - MMIO_D(HTOTAL(TRANSCODER_B), D_ALL); - MMIO_D(HBLANK(TRANSCODER_B), D_ALL); - MMIO_D(HSYNC(TRANSCODER_B), D_ALL); - MMIO_D(VTOTAL(TRANSCODER_B), D_ALL); - MMIO_D(VBLANK(TRANSCODER_B), D_ALL); - MMIO_D(VSYNC(TRANSCODER_B), D_ALL); - MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL); - MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL); - MMIO_D(PIPESRC(TRANSCODER_B), D_ALL); - - MMIO_D(HTOTAL(TRANSCODER_C), D_ALL); - MMIO_D(HBLANK(TRANSCODER_C), D_ALL); - MMIO_D(HSYNC(TRANSCODER_C), D_ALL); - MMIO_D(VTOTAL(TRANSCODER_C), D_ALL); - MMIO_D(VBLANK(TRANSCODER_C), D_ALL); - MMIO_D(VSYNC(TRANSCODER_C), D_ALL); - MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL); - MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL); - MMIO_D(PIPESRC(TRANSCODER_C), D_ALL); - - MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL); - MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL); - MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL); - MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL); - MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL); - MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL); - MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL); - MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL); - - MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL); - - MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL); - - MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL); - - MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL); - - MMIO_D(PF_CTL(PIPE_A), D_ALL); - MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL); - MMIO_D(PF_WIN_POS(PIPE_A), D_ALL); - MMIO_D(PF_VSCALE(PIPE_A), D_ALL); - MMIO_D(PF_HSCALE(PIPE_A), D_ALL); - - MMIO_D(PF_CTL(PIPE_B), D_ALL); - MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL); - MMIO_D(PF_WIN_POS(PIPE_B), D_ALL); - MMIO_D(PF_VSCALE(PIPE_B), D_ALL); - MMIO_D(PF_HSCALE(PIPE_B), D_ALL); - - MMIO_D(PF_CTL(PIPE_C), D_ALL); - MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL); - MMIO_D(PF_WIN_POS(PIPE_C), D_ALL); - MMIO_D(PF_VSCALE(PIPE_C), D_ALL); - MMIO_D(PF_HSCALE(PIPE_C), D_ALL); - - MMIO_D(WM0_PIPE_ILK(PIPE_A), D_ALL); - MMIO_D(WM0_PIPE_ILK(PIPE_B), D_ALL); - MMIO_D(WM0_PIPE_ILK(PIPE_C), D_ALL); - MMIO_D(WM1_LP_ILK, D_ALL); - MMIO_D(WM2_LP_ILK, D_ALL); - MMIO_D(WM3_LP_ILK, D_ALL); - MMIO_D(WM1S_LP_ILK, D_ALL); - MMIO_D(WM2S_LP_IVB, D_ALL); - MMIO_D(WM3S_LP_IVB, D_ALL); - - MMIO_D(BLC_PWM_CPU_CTL2, D_ALL); - MMIO_D(BLC_PWM_CPU_CTL, D_ALL); - MMIO_D(BLC_PWM_PCH_CTL1, D_ALL); - MMIO_D(BLC_PWM_PCH_CTL2, D_ALL); - - MMIO_D(_MMIO(0x48268), D_ALL); - MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read, gmbus_mmio_write); MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, dp_aux_ch_ctl_mmio_write); @@ -2551,74 +2303,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status); MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status); MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status); - - MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL); - - MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL); - - MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL); - - MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL); - MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL); - MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL); - - MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL); - MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL); - MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL); - - MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL); - MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL); - MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL); - - MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL); - MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL); - MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL); - - MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL); - MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL); - MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL); - MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL); - MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL); - MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL); - MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write); - MMIO_D(PCH_PP_DIVISOR, D_ALL); - MMIO_D(PCH_PP_STATUS, D_ALL); - MMIO_D(PCH_LVDS, D_ALL); - MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL); - MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL); - MMIO_D(_MMIO(_PCH_FPA0), D_ALL); - MMIO_D(_MMIO(_PCH_FPA1), D_ALL); - MMIO_D(_MMIO(_PCH_FPB0), D_ALL); - MMIO_D(_MMIO(_PCH_FPB1), D_ALL); - MMIO_D(PCH_DREF_CONTROL, D_ALL); - MMIO_D(PCH_RAWCLK_FREQ, D_ALL); - MMIO_D(PCH_DPLL_SEL, D_ALL); - - MMIO_D(_MMIO(0x61208), D_ALL); - MMIO_D(_MMIO(0x6120c), D_ALL); - MMIO_D(PCH_PP_ON_DELAYS, D_ALL); - MMIO_D(PCH_PP_OFF_DELAYS, D_ALL); - MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL); @@ -2634,143 +2319,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) NULL, NULL); MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write); - MMIO_D(FUSE_STRAP, D_ALL); - MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL); - - MMIO_D(DISP_ARB_CTL, D_ALL); - MMIO_D(DISP_ARB_CTL2, D_ALL); - - MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL); - MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL); - MMIO_D(ILK_DSPCLK_GATE_D, D_ALL); - - MMIO_D(SOUTH_CHICKEN1, D_ALL); MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write); - MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL); - MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL); - MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL); - MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL); - MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL); - - MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_FBC_RT_BASE, D_ALL); - - MMIO_D(IPS_CTL, D_ALL); - - MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL); - - MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL); - - MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL); - - MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL); - MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL); - MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL); - MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL); - MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL); - MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL); - MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(_MMIO(0x60110), D_ALL); - MMIO_D(_MMIO(0x61110), D_ALL); - MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - - MMIO_D(WM_LINETIME(PIPE_A), D_ALL); - MMIO_D(WM_LINETIME(PIPE_B), D_ALL); - MMIO_D(WM_LINETIME(PIPE_C), D_ALL); - MMIO_D(SPLL_CTL, D_ALL); - MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL); - MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL); - MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL); - MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL); - MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL); - - MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL); - MMIO_D(_MMIO(0x46508), D_ALL); - - MMIO_D(_MMIO(0x49080), D_ALL); - MMIO_D(_MMIO(0x49180), D_ALL); - MMIO_D(_MMIO(0x49280), D_ALL); - - MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(GAMMA_MODE(PIPE_A), D_ALL); - MMIO_D(GAMMA_MODE(PIPE_B), D_ALL); - MMIO_D(GAMMA_MODE(PIPE_C), D_ALL); - - MMIO_D(PIPE_MULT(PIPE_A), D_ALL); - MMIO_D(PIPE_MULT(PIPE_B), D_ALL); - MMIO_D(PIPE_MULT(PIPE_C), D_ALL); - - MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL); - MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL); - MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL); - MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL); - MMIO_D(SBI_ADDR, D_ALL); MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL); MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write); - MMIO_D(PIXCLK_GATE, D_ALL); MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL, dp_aux_ch_ctl_mmio_write); @@ -2793,65 +2345,18 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write); MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL); - MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL); - MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL); - MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL); - MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL); - MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL); - MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL); - MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL); - MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL); - MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL); - MMIO_D(FORCEWAKE_ACK, D_ALL); - MMIO_D(GEN6_GT_CORE_STATUS, D_ALL); - MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL); MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL); - MMIO_D(ECOBUS, D_ALL); MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL); MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL); - MMIO_D(GEN6_RPNSWREQ, D_ALL); - MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL); - MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL); - MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL); - MMIO_D(GEN6_RPSTAT1, D_ALL); - MMIO_D(GEN6_RP_CONTROL, D_ALL); - MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL); - MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL); - MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL); - MMIO_D(GEN6_RP_CUR_UP, D_ALL); - MMIO_D(GEN6_RP_PREV_UP, D_ALL); - MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL); - MMIO_D(GEN6_RP_CUR_DOWN, D_ALL); - MMIO_D(GEN6_RP_PREV_DOWN, D_ALL); - MMIO_D(GEN6_RP_UP_EI, D_ALL); - MMIO_D(GEN6_RP_DOWN_EI, D_ALL); - MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL); - MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL); - MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL); - MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL); - MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL); - MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL); - MMIO_D(GEN6_RC_SLEEP, D_ALL); - MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL); - MMIO_D(GEN6_RC6_THRESHOLD, D_ALL); - MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL); - MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL); - MMIO_D(GEN6_PMINTRMSK, D_ALL); MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write); @@ -2859,97 +2364,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write); - MMIO_D(RSTDBYCTL, D_ALL); - MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write); MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write); MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write); - MMIO_D(TILECTL, D_ALL); - - MMIO_D(GEN6_UCGCTL1, D_ALL); - MMIO_D(GEN6_UCGCTL2, D_ALL); - - MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(GEN6_PCODE_DATA, D_ALL); - MMIO_D(_MMIO(0x13812c), D_ALL); MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); - MMIO_D(HSW_EDRAM_CAP, D_ALL); - MMIO_D(HSW_IDICR, D_ALL); MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL); - MMIO_D(_MMIO(0x3c), D_ALL); - MMIO_D(_MMIO(0x860), D_ALL); - MMIO_D(ECOSKPD(RENDER_RING_BASE), D_ALL); - MMIO_D(_MMIO(0x121d0), D_ALL); - MMIO_D(ECOSKPD(BLT_RING_BASE), D_ALL); - MMIO_D(_MMIO(0x41d0), D_ALL); - MMIO_D(GAC_ECO_BITS, D_ALL); - MMIO_D(_MMIO(0x6200), D_ALL); - MMIO_D(_MMIO(0x6204), D_ALL); - MMIO_D(_MMIO(0x6208), D_ALL); - MMIO_D(_MMIO(0x7118), D_ALL); - MMIO_D(_MMIO(0x7180), D_ALL); - MMIO_D(_MMIO(0x7408), D_ALL); - MMIO_D(_MMIO(0x7c00), D_ALL); MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write); - MMIO_D(_MMIO(0x911c), D_ALL); - MMIO_D(_MMIO(0x9120), D_ALL); MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_D(GAB_CTL, D_ALL); - MMIO_D(_MMIO(0x48800), D_ALL); - MMIO_D(_MMIO(0xce044), D_ALL); - MMIO_D(_MMIO(0xe6500), D_ALL); - MMIO_D(_MMIO(0xe6504), D_ALL); - MMIO_D(_MMIO(0xe6600), D_ALL); - MMIO_D(_MMIO(0xe6604), D_ALL); - MMIO_D(_MMIO(0xe6700), D_ALL); - MMIO_D(_MMIO(0xe6704), D_ALL); - MMIO_D(_MMIO(0xe6800), D_ALL); - MMIO_D(_MMIO(0xe6804), D_ALL); - MMIO_D(PCH_GMBUS4, D_ALL); - MMIO_D(PCH_GMBUS5, D_ALL); - - MMIO_D(_MMIO(0x902c), D_ALL); - MMIO_D(_MMIO(0xec008), D_ALL); - MMIO_D(_MMIO(0xec00c), D_ALL); - MMIO_D(_MMIO(0xec008 + 0x18), D_ALL); - MMIO_D(_MMIO(0xec00c + 0x18), D_ALL); - MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL); - MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL); - MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL); - MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL); - MMIO_D(_MMIO(0xec408), D_ALL); - MMIO_D(_MMIO(0xec40c), D_ALL); - MMIO_D(_MMIO(0xec408 + 0x18), D_ALL); - MMIO_D(_MMIO(0xec40c + 0x18), D_ALL); - MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL); - MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL); - MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL); - MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL); - MMIO_D(_MMIO(0xfc810), D_ALL); - MMIO_D(_MMIO(0xfc81c), D_ALL); - MMIO_D(_MMIO(0xfc828), D_ALL); - MMIO_D(_MMIO(0xfc834), D_ALL); - MMIO_D(_MMIO(0xfcc00), D_ALL); - MMIO_D(_MMIO(0xfcc0c), D_ALL); - MMIO_D(_MMIO(0xfcc18), D_ALL); - MMIO_D(_MMIO(0xfcc24), D_ALL); - MMIO_D(_MMIO(0xfd000), D_ALL); - MMIO_D(_MMIO(0xfd00c), D_ALL); - MMIO_D(_MMIO(0xfd018), D_ALL); - MMIO_D(_MMIO(0xfd024), D_ALL); - MMIO_D(_MMIO(0xfd034), D_ALL); - MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write); - MMIO_D(_MMIO(0x2054), D_ALL); - MMIO_D(_MMIO(0x12054), D_ALL); - MMIO_D(_MMIO(0x22054), D_ALL); - MMIO_D(_MMIO(0x1a054), D_ALL); - - MMIO_D(_MMIO(0x44070), D_ALL); MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL); @@ -2957,8 +2382,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL); - MMIO_D(_MMIO(0x2b00), D_BDW_PLUS); - MMIO_D(_MMIO(0x2360), D_BDW_PLUS); MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); @@ -3006,28 +2429,23 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) static int init_bdw_mmio_info(struct intel_gvt *gvt) { - struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS); MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS); MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS); MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS); MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); @@ -3035,7 +2453,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS); MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); @@ -3043,7 +2460,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS); MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); @@ -3051,22 +2467,18 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS); MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS); MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS); MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS); MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, intel_vgpu_reg_master_irq_handler); @@ -3101,21 +2513,8 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); #undef RING_REG - MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS); - MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS); - MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS); - MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS); - MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS); - MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS); - MMIO_D(_MMIO(0x1c054), D_BDW_PLUS); - MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); - MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT); - MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); - - MMIO_D(GAMTARBMODE, D_BDW_PLUS); - #define RING_REG(base) _MMIO((base) + 0x270) MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL); #undef RING_REG @@ -3124,24 +2523,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS); - MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS); - MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); - - MMIO_D(WM_MISC, D_BDW); - MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW); - - MMIO_D(_MMIO(0x6671c), D_BDW_PLUS); - MMIO_D(_MMIO(0x66c00), D_BDW_PLUS); - MMIO_D(_MMIO(0x66c04), D_BDW_PLUS); - - MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS); - - MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS); - MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS); - MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); - - MMIO_D(_MMIO(0xfdc), D_BDW_PLUS); MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, @@ -3153,27 +2534,14 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL); - MMIO_D(_MMIO(0xb110), D_BDW); - MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS); MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0, D_BDW_PLUS, NULL, force_nonpriv_write); - MMIO_D(_MMIO(0x44484), D_BDW_PLUS); - MMIO_D(_MMIO(0x4448c), D_BDW_PLUS); - MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL); - MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL); - MMIO_D(_MMIO(0x110000), D_BDW_PLUS); - - MMIO_D(_MMIO(0x48400), D_BDW_PLUS); - - MMIO_D(_MMIO(0x6e570), D_BDW_PLUS); - MMIO_D(_MMIO(0x65f10), D_BDW_PLUS); - MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); @@ -3213,30 +2581,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS); MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write); MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); - MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS); - MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); - MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL); - MMIO_D(DC_STATE_EN, D_SKL_PLUS); - MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS); - MMIO_D(CDCLK_CTL, D_SKL_PLUS); MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write); MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write); - MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS); - MMIO_D(DPLL_CTRL1, D_SKL_PLUS); - MMIO_D(DPLL_CTRL2, D_SKL_PLUS); MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL); MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); @@ -3279,22 +2632,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); @@ -3356,30 +2693,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL); - MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS); - MMIO_D(_MMIO(0x72380), D_SKL_PLUS); - MMIO_D(_MMIO(0x7239c), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS); - - MMIO_D(DMC_SSP_BASE, D_SKL_PLUS); - MMIO_D(DMC_HTP_SKL, D_SKL_PLUS); - MMIO_D(DMC_LAST_WRITE, D_SKL_PLUS); - MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_D(SKL_DFSM, D_SKL_PLUS); - MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS); - MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_D(RPM_CONFIG0, D_SKL_PLUS); - MMIO_D(_MMIO(0xd08), D_SKL_PLUS); - MMIO_D(RC6_LOCATION, D_SKL_PLUS); MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, @@ -3396,40 +2716,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE, NULL, gen9_trtt_chicken_write); - MMIO_D(_MMIO(0x46430), D_SKL_PLUS); - - MMIO_D(_MMIO(0x46520), D_SKL_PLUS); - - MMIO_D(_MMIO(0xc403c), D_SKL_PLUS); MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); - MMIO_D(_MMIO(0x65900), D_SKL_PLUS); - MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS); - MMIO_D(_MMIO(0x4068), D_SKL_PLUS); - MMIO_D(_MMIO(0x67054), D_SKL_PLUS); - MMIO_D(_MMIO(0x6e560), D_SKL_PLUS); - MMIO_D(_MMIO(0x6e554), D_SKL_PLUS); - MMIO_D(_MMIO(0x2b20), D_SKL_PLUS); - MMIO_D(_MMIO(0x65f00), D_SKL_PLUS); - MMIO_D(_MMIO(0x65f08), D_SKL_PLUS); - MMIO_D(_MMIO(0x320f0), D_SKL_PLUS); - - MMIO_D(_MMIO(0x70034), D_SKL_PLUS); - MMIO_D(_MMIO(0x71034), D_SKL_PLUS); - MMIO_D(_MMIO(0x72034), D_SKL_PLUS); - - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); - - MMIO_D(_MMIO(0x44500), D_SKL_PLUS); #define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4) MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, csfe_chicken1_mmio_write); @@ -3440,7 +2729,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) NULL, NULL); MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL); - MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT); MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; @@ -3448,43 +2736,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) static int init_bxt_mmio_info(struct intel_gvt *gvt) { - struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; - MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL); - - MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT); - MMIO_D(GEN7_ROW_INSTDONE, D_BXT); - MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT); - MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT); - MMIO_D(ERROR_GEN6, D_BXT); - MMIO_D(DONE_REG, D_BXT); - MMIO_D(EIR, D_BXT); - MMIO_D(PGTBL_ER, D_BXT); - MMIO_D(_MMIO(0x4194), D_BXT); - MMIO_D(_MMIO(0x4294), D_BXT); - MMIO_D(_MMIO(0x4494), D_BXT); - - MMIO_RING_D(RING_PSMI_CTL, D_BXT); - MMIO_RING_D(RING_DMA_FADD, D_BXT); - MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT); - MMIO_RING_D(RING_IPEHR, D_BXT); - MMIO_RING_D(RING_INSTPS, D_BXT); - MMIO_RING_D(RING_BBADDR_UDW, D_BXT); - MMIO_RING_D(RING_BBSTATE, D_BXT); - MMIO_RING_D(RING_IPEIR, D_BXT); - - MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL); - MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write); - MMIO_D(BXT_RP_STATE_CAP, D_BXT); MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT, NULL, bxt_phy_ctl_family_write); MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT, NULL, bxt_phy_ctl_family_write); - MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT); - MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT); - MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT); MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT, NULL, bxt_port_pll_enable_write); MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT, @@ -3492,128 +2750,19 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL, bxt_port_pll_enable_write); - MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT); - - MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT); - - MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT, NULL, bxt_pcs_dw12_grp_write); - MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT, bxt_port_tx_dw3_read, NULL); - MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT); - - MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT, NULL, bxt_pcs_dw12_grp_write); - MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT, bxt_port_tx_dw3_read, NULL); - MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT); - - MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT, NULL, bxt_pcs_dw12_grp_write); - MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT, bxt_port_tx_dw3_read, NULL); - MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT); - - MMIO_D(BXT_DE_PLL_CTL, D_BXT); MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write); - MMIO_D(BXT_DSI_PLL_CTL, D_BXT); - MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); - - MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); - MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT); - - MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); - MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); - MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); - - MMIO_D(RC6_CTX_BASE, D_BXT); - - MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); - MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT); - MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT); - MMIO_D(GEN6_GFXPAUSE, D_BXT); MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL); @@ -3633,17 +2782,14 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) return 0; } -static const struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, - unsigned int offset) +static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, + unsigned int offset) { - unsigned long device = intel_gvt_get_device_type(gvt); - const struct gvt_mmio_block *block = gvt->mmio.mmio_block; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; int num = gvt->mmio.num_mmio_block; int i; for (i = 0; i < num; i++, block++) { - if (!(device & block->device)) - continue; if (offset >= i915_mmio_reg_offset(block->offset) && offset < i915_mmio_reg_offset(block->offset) + block->size) return block; @@ -3668,23 +2814,117 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node) kfree(e); + kfree(gvt->mmio.mmio_block); + gvt->mmio.mmio_block = NULL; + gvt->mmio.num_mmio_block = 0; + vfree(gvt->mmio.mmio_attribute); gvt->mmio.mmio_attribute = NULL; } -/* Special MMIO blocks. registers in MMIO block ranges should not be command - * accessible (should have no F_CMD_ACCESS flag). - * otherwise, need to update cmd_reg_handler in cmd_parser.c - */ -static const struct gvt_mmio_block mmio_blocks[] = { - {D_SKL_PLUS, _MMIO(DMC_MMIO_START_RANGE), 0x3000, NULL, NULL}, - {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, - {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, - pvinfo_mmio_read, pvinfo_mmio_write}, - {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, - {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL}, - {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL}, -}; +static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset, + u32 size) +{ + struct intel_gvt *gvt = iter->data; + struct intel_gvt_mmio_info *info, *p; + u32 start, end, i; + + if (WARN_ON(!IS_ALIGNED(offset, 4))) + return -EINVAL; + + start = offset; + end = offset + size; + + for (i = start; i < end; i += 4) { + p = intel_gvt_find_mmio_info(gvt, i); + if (p) { + WARN(1, "dup mmio definition offset %x\n", + info->offset); + + /* We return -EEXIST here to make GVT-g load fail. + * So duplicated MMIO can be found as soon as + * possible. + */ + return -EEXIST; + } + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->offset = i; + info->read = intel_vgpu_default_mmio_read; + info->write = intel_vgpu_default_mmio_write; + INIT_HLIST_NODE(&info->node); + hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset); + gvt->mmio.num_tracked_mmio++; + } + return 0; +} + +static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter, + u32 offset, u32 size) +{ + struct intel_gvt *gvt = iter->data; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; + void *ret; + + ret = krealloc(block, + (gvt->mmio.num_mmio_block + 1) * sizeof(*block), + GFP_KERNEL); + if (!ret) + return -ENOMEM; + + gvt->mmio.mmio_block = block = ret; + + block += gvt->mmio.num_mmio_block; + + memset(block, 0, sizeof(*block)); + + block->offset = _MMIO(offset); + block->size = size; + + gvt->mmio.num_mmio_block++; + + return 0; +} + +static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset, + u32 size) +{ + if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0))) + return handle_mmio(iter, offset, size); + else + return handle_mmio_block(iter, offset, size); +} + +static int init_mmio_info(struct intel_gvt *gvt) +{ + struct intel_gvt_mmio_table_iter iter = { + .i915 = gvt->gt->i915, + .data = gvt, + .handle_mmio_cb = handle_mmio_cb, + }; + + return intel_gvt_iterate_mmio_table(&iter); +} + +static int init_mmio_block_handlers(struct intel_gvt *gvt) +{ + struct gvt_mmio_block *block; + + block = find_mmio_block(gvt, VGT_PVINFO_PAGE); + if (!block) { + WARN(1, "fail to assign handlers to mmio block %x\n", + i915_mmio_reg_offset(block->offset)); + return -ENODEV; + } + + block->read = pvinfo_mmio_read; + block->write = pvinfo_mmio_write; + + return 0; +} /** * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device @@ -3707,6 +2947,14 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) if (!gvt->mmio.mmio_attribute) return -ENOMEM; + ret = init_mmio_info(gvt); + if (ret) + goto err; + + ret = init_mmio_block_handlers(gvt); + if (ret) + goto err; + ret = init_generic_mmio_info(gvt); if (ret) goto err; @@ -3737,9 +2985,6 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) goto err; } - gvt->mmio.mmio_block = mmio_blocks; - gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks); - return 0; err: intel_gvt_clean_mmio_info(gvt); @@ -3759,7 +3004,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), void *data) { - const struct gvt_mmio_block *block = gvt->mmio.mmio_block; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; struct intel_gvt_mmio_info *e; int i, j, ret; @@ -3775,9 +3020,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, continue; for (j = 0; j < block->size; j += 4) { - ret = handler(gvt, - i915_mmio_reg_offset(block->offset) + j, - data); + ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data); if (ret) return ret; } @@ -3877,7 +3120,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_mmio_info *mmio_info; - const struct gvt_mmio_block *mmio_block; + struct gvt_mmio_block *mmio_block; gvt_mmio_func func; int ret; diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 7c26af39fbfc..bba154e38705 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -72,7 +72,6 @@ struct intel_gvt_mmio_info { const struct intel_engine_cs * intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg); unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt); -bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device); int intel_gvt_setup_mmio_info(struct intel_gvt *gvt); void intel_gvt_clean_mmio_info(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 7d666d34f9ff..d8216c63c39a 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -132,6 +132,13 @@ #define RING_GFX_MODE(base) _MMIO((base) + 0x29c) #define VF_GUARDBAND _MMIO(0x83a4) - #define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4) + +/* XXX FIXME i915 has changed PP_XXX definition */ +#define PCH_PP_STATUS _MMIO(0xc7200) +#define PCH_PP_CONTROL _MMIO(0xc7204) +#define PCH_PP_ON_DELAYS _MMIO(0xc7208) +#define PCH_PP_OFF_DELAYS _MMIO(0xc720c) +#define PCH_PP_DIVISOR _MMIO(0xc7210) + #endif diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h index d7d3fb6186fd..3a2563ad50f8 100644 --- a/drivers/gpu/drm/i915/intel_gvt.h +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -24,9 +24,19 @@ #ifndef _INTEL_GVT_H_ #define _INTEL_GVT_H_ +#include + struct drm_i915_private; #ifdef CONFIG_DRM_I915_GVT + +struct intel_gvt_mmio_table_iter { + struct drm_i915_private *i915; + void *data; + int (*handle_mmio_cb)(struct intel_gvt_mmio_table_iter *iter, + u32 offset, u32 size); +}; + int intel_gvt_init(struct drm_i915_private *dev_priv); void intel_gvt_driver_remove(struct drm_i915_private *dev_priv); int intel_gvt_init_device(struct drm_i915_private *dev_priv); @@ -34,6 +44,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv); int intel_gvt_init_host(void); void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv); void intel_gvt_resume(struct drm_i915_private *dev_priv); +int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter); #else static inline int intel_gvt_init(struct drm_i915_private *dev_priv) { @@ -51,6 +62,14 @@ static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) static inline void intel_gvt_resume(struct drm_i915_private *dev_priv) { } + +struct intel_gvt_mmio_table_iter { +}; + +static inline int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter) +{ + return 0; +} #endif #endif /* _INTEL_GVT_H_ */ diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c new file mode 100644 index 000000000000..7f7fa9a6a7be --- /dev/null +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -0,0 +1,1290 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include "display/vlv_dsi_pll_regs.h" +#include "gt/intel_gt_regs.h" +#include "gvt/gvt.h" +#include "i915_drv.h" +#include "i915_pvinfo.h" +#include "i915_reg.h" +#include "intel_gvt.h" +#include "intel_mchbar_regs.h" + +#define MMIO_F(reg, s) do { \ + int ret; \ + ret = iter->handle_mmio_cb(iter, i915_mmio_reg_offset(reg), s); \ + if (ret) \ + return ret; \ +} while (0) + +#define MMIO_D(reg) MMIO_F(reg, 4) + +#define MMIO_RING_F(prefix, s) do { \ + MMIO_F(prefix(RENDER_RING_BASE), s); \ + MMIO_F(prefix(BLT_RING_BASE), s); \ + MMIO_F(prefix(GEN6_BSD_RING_BASE), s); \ + MMIO_F(prefix(VEBOX_RING_BASE), s); \ + if (HAS_ENGINE(to_gt(iter->i915), VCS1)) \ + MMIO_F(prefix(GEN8_BSD2_RING_BASE), s); \ +} while (0) + +#define MMIO_RING_D(prefix) \ + MMIO_RING_F(prefix, 4) + +static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *dev_priv = iter->i915; + + MMIO_RING_D(RING_IMR); + MMIO_D(SDEIMR); + MMIO_D(SDEIER); + MMIO_D(SDEIIR); + MMIO_D(SDEISR); + MMIO_RING_D(RING_HWSTAM); + MMIO_D(BSD_HWS_PGA_GEN7); + MMIO_D(BLT_HWS_PGA_GEN7); + MMIO_D(VEBOX_HWS_PGA_GEN7); + +#define RING_REG(base) _MMIO((base) + 0x28) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x134) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x6c) + MMIO_RING_D(RING_REG); +#undef RING_REG + MMIO_D(_MMIO(0x2148)); + MMIO_D(CCID(RENDER_RING_BASE)); + MMIO_D(_MMIO(0x12198)); + MMIO_D(GEN7_CXT_SIZE); + MMIO_RING_D(RING_TAIL); + MMIO_RING_D(RING_HEAD); + MMIO_RING_D(RING_CTL); + MMIO_RING_D(RING_ACTHD); + MMIO_RING_D(RING_START); + + /* RING MODE */ +#define RING_REG(base) _MMIO((base) + 0x29c) + MMIO_RING_D(RING_REG); +#undef RING_REG + + MMIO_RING_D(RING_MI_MODE); + MMIO_RING_D(RING_INSTPM); + MMIO_RING_D(RING_TIMESTAMP); + MMIO_RING_D(RING_TIMESTAMP_UDW); + MMIO_D(GEN7_GT_MODE); + MMIO_D(CACHE_MODE_0_GEN7); + MMIO_D(CACHE_MODE_1); + MMIO_D(CACHE_MODE_0); + MMIO_D(_MMIO(0x2124)); + MMIO_D(_MMIO(0x20dc)); + MMIO_D(_3D_CHICKEN3); + MMIO_D(_MMIO(0x2088)); + MMIO_D(FF_SLICE_CS_CHICKEN2); + MMIO_D(_MMIO(0x2470)); + MMIO_D(GAM_ECOCHK); + MMIO_D(GEN7_COMMON_SLICE_CHICKEN1); + MMIO_D(COMMON_SLICE_CHICKEN2); + MMIO_D(_MMIO(0x9030)); + MMIO_D(_MMIO(0x20a0)); + MMIO_D(_MMIO(0x2420)); + MMIO_D(_MMIO(0x2430)); + MMIO_D(_MMIO(0x2434)); + MMIO_D(_MMIO(0x2438)); + MMIO_D(_MMIO(0x243c)); + MMIO_D(_MMIO(0x7018)); + MMIO_D(HALF_SLICE_CHICKEN3); + MMIO_D(GEN7_HALF_SLICE_CHICKEN1); + /* display */ + MMIO_F(_MMIO(0x60220), 0x20); + MMIO_D(_MMIO(0x602a0)); + MMIO_D(_MMIO(0x65050)); + MMIO_D(_MMIO(0x650b4)); + MMIO_D(_MMIO(0xc4040)); + MMIO_D(DERRMR); + MMIO_D(PIPEDSL(PIPE_A)); + MMIO_D(PIPEDSL(PIPE_B)); + MMIO_D(PIPEDSL(PIPE_C)); + MMIO_D(PIPEDSL(_PIPE_EDP)); + MMIO_D(PIPECONF(PIPE_A)); + MMIO_D(PIPECONF(PIPE_B)); + MMIO_D(PIPECONF(PIPE_C)); + MMIO_D(PIPECONF(_PIPE_EDP)); + MMIO_D(PIPESTAT(PIPE_A)); + MMIO_D(PIPESTAT(PIPE_B)); + MMIO_D(PIPESTAT(PIPE_C)); + MMIO_D(PIPESTAT(_PIPE_EDP)); + MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A)); + MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B)); + MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C)); + MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP)); + MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A)); + MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B)); + MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C)); + MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP)); + MMIO_D(CURCNTR(PIPE_A)); + MMIO_D(CURCNTR(PIPE_B)); + MMIO_D(CURCNTR(PIPE_C)); + MMIO_D(CURPOS(PIPE_A)); + MMIO_D(CURPOS(PIPE_B)); + MMIO_D(CURPOS(PIPE_C)); + MMIO_D(CURBASE(PIPE_A)); + MMIO_D(CURBASE(PIPE_B)); + MMIO_D(CURBASE(PIPE_C)); + MMIO_D(CUR_FBC_CTL(PIPE_A)); + MMIO_D(CUR_FBC_CTL(PIPE_B)); + MMIO_D(CUR_FBC_CTL(PIPE_C)); + MMIO_D(_MMIO(0x700ac)); + MMIO_D(_MMIO(0x710ac)); + MMIO_D(_MMIO(0x720ac)); + MMIO_D(_MMIO(0x70090)); + MMIO_D(_MMIO(0x70094)); + MMIO_D(_MMIO(0x70098)); + MMIO_D(_MMIO(0x7009c)); + MMIO_D(DSPCNTR(PIPE_A)); + MMIO_D(DSPADDR(PIPE_A)); + MMIO_D(DSPSTRIDE(PIPE_A)); + MMIO_D(DSPPOS(PIPE_A)); + MMIO_D(DSPSIZE(PIPE_A)); + MMIO_D(DSPSURF(PIPE_A)); + MMIO_D(DSPOFFSET(PIPE_A)); + MMIO_D(DSPSURFLIVE(PIPE_A)); + MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY)); + MMIO_D(DSPCNTR(PIPE_B)); + MMIO_D(DSPADDR(PIPE_B)); + MMIO_D(DSPSTRIDE(PIPE_B)); + MMIO_D(DSPPOS(PIPE_B)); + MMIO_D(DSPSIZE(PIPE_B)); + MMIO_D(DSPSURF(PIPE_B)); + MMIO_D(DSPOFFSET(PIPE_B)); + MMIO_D(DSPSURFLIVE(PIPE_B)); + MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY)); + MMIO_D(DSPCNTR(PIPE_C)); + MMIO_D(DSPADDR(PIPE_C)); + MMIO_D(DSPSTRIDE(PIPE_C)); + MMIO_D(DSPPOS(PIPE_C)); + MMIO_D(DSPSIZE(PIPE_C)); + MMIO_D(DSPSURF(PIPE_C)); + MMIO_D(DSPOFFSET(PIPE_C)); + MMIO_D(DSPSURFLIVE(PIPE_C)); + MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY)); + MMIO_D(SPRCTL(PIPE_A)); + MMIO_D(SPRLINOFF(PIPE_A)); + MMIO_D(SPRSTRIDE(PIPE_A)); + MMIO_D(SPRPOS(PIPE_A)); + MMIO_D(SPRSIZE(PIPE_A)); + MMIO_D(SPRKEYVAL(PIPE_A)); + MMIO_D(SPRKEYMSK(PIPE_A)); + MMIO_D(SPRSURF(PIPE_A)); + MMIO_D(SPRKEYMAX(PIPE_A)); + MMIO_D(SPROFFSET(PIPE_A)); + MMIO_D(SPRSCALE(PIPE_A)); + MMIO_D(SPRSURFLIVE(PIPE_A)); + MMIO_D(REG_50080(PIPE_A, PLANE_SPRITE0)); + MMIO_D(SPRCTL(PIPE_B)); + MMIO_D(SPRLINOFF(PIPE_B)); + MMIO_D(SPRSTRIDE(PIPE_B)); + MMIO_D(SPRPOS(PIPE_B)); + MMIO_D(SPRSIZE(PIPE_B)); + MMIO_D(SPRKEYVAL(PIPE_B)); + MMIO_D(SPRKEYMSK(PIPE_B)); + MMIO_D(SPRSURF(PIPE_B)); + MMIO_D(SPRKEYMAX(PIPE_B)); + MMIO_D(SPROFFSET(PIPE_B)); + MMIO_D(SPRSCALE(PIPE_B)); + MMIO_D(SPRSURFLIVE(PIPE_B)); + MMIO_D(REG_50080(PIPE_B, PLANE_SPRITE0)); + MMIO_D(SPRCTL(PIPE_C)); + MMIO_D(SPRLINOFF(PIPE_C)); + MMIO_D(SPRSTRIDE(PIPE_C)); + MMIO_D(SPRPOS(PIPE_C)); + MMIO_D(SPRSIZE(PIPE_C)); + MMIO_D(SPRKEYVAL(PIPE_C)); + MMIO_D(SPRKEYMSK(PIPE_C)); + MMIO_D(SPRSURF(PIPE_C)); + MMIO_D(SPRKEYMAX(PIPE_C)); + MMIO_D(SPROFFSET(PIPE_C)); + MMIO_D(SPRSCALE(PIPE_C)); + MMIO_D(SPRSURFLIVE(PIPE_C)); + MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0)); + MMIO_D(HTOTAL(TRANSCODER_A)); + MMIO_D(HBLANK(TRANSCODER_A)); + MMIO_D(HSYNC(TRANSCODER_A)); + MMIO_D(VTOTAL(TRANSCODER_A)); + MMIO_D(VBLANK(TRANSCODER_A)); + MMIO_D(VSYNC(TRANSCODER_A)); + MMIO_D(BCLRPAT(TRANSCODER_A)); + MMIO_D(VSYNCSHIFT(TRANSCODER_A)); + MMIO_D(PIPESRC(TRANSCODER_A)); + MMIO_D(HTOTAL(TRANSCODER_B)); + MMIO_D(HBLANK(TRANSCODER_B)); + MMIO_D(HSYNC(TRANSCODER_B)); + MMIO_D(VTOTAL(TRANSCODER_B)); + MMIO_D(VBLANK(TRANSCODER_B)); + MMIO_D(VSYNC(TRANSCODER_B)); + MMIO_D(BCLRPAT(TRANSCODER_B)); + MMIO_D(VSYNCSHIFT(TRANSCODER_B)); + MMIO_D(PIPESRC(TRANSCODER_B)); + MMIO_D(HTOTAL(TRANSCODER_C)); + MMIO_D(HBLANK(TRANSCODER_C)); + MMIO_D(HSYNC(TRANSCODER_C)); + MMIO_D(VTOTAL(TRANSCODER_C)); + MMIO_D(VBLANK(TRANSCODER_C)); + MMIO_D(VSYNC(TRANSCODER_C)); + MMIO_D(BCLRPAT(TRANSCODER_C)); + MMIO_D(VSYNCSHIFT(TRANSCODER_C)); + MMIO_D(PIPESRC(TRANSCODER_C)); + MMIO_D(HTOTAL(TRANSCODER_EDP)); + MMIO_D(HBLANK(TRANSCODER_EDP)); + MMIO_D(HSYNC(TRANSCODER_EDP)); + MMIO_D(VTOTAL(TRANSCODER_EDP)); + MMIO_D(VBLANK(TRANSCODER_EDP)); + MMIO_D(VSYNC(TRANSCODER_EDP)); + MMIO_D(BCLRPAT(TRANSCODER_EDP)); + MMIO_D(VSYNCSHIFT(TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_M1(TRANSCODER_A)); + MMIO_D(PIPE_DATA_N1(TRANSCODER_A)); + MMIO_D(PIPE_DATA_M2(TRANSCODER_A)); + MMIO_D(PIPE_DATA_N2(TRANSCODER_A)); + MMIO_D(PIPE_LINK_M1(TRANSCODER_A)); + MMIO_D(PIPE_LINK_N1(TRANSCODER_A)); + MMIO_D(PIPE_LINK_M2(TRANSCODER_A)); + MMIO_D(PIPE_LINK_N2(TRANSCODER_A)); + MMIO_D(PIPE_DATA_M1(TRANSCODER_B)); + MMIO_D(PIPE_DATA_N1(TRANSCODER_B)); + MMIO_D(PIPE_DATA_M2(TRANSCODER_B)); + MMIO_D(PIPE_DATA_N2(TRANSCODER_B)); + MMIO_D(PIPE_LINK_M1(TRANSCODER_B)); + MMIO_D(PIPE_LINK_N1(TRANSCODER_B)); + MMIO_D(PIPE_LINK_M2(TRANSCODER_B)); + MMIO_D(PIPE_LINK_N2(TRANSCODER_B)); + MMIO_D(PIPE_DATA_M1(TRANSCODER_C)); + MMIO_D(PIPE_DATA_N1(TRANSCODER_C)); + MMIO_D(PIPE_DATA_M2(TRANSCODER_C)); + MMIO_D(PIPE_DATA_N2(TRANSCODER_C)); + MMIO_D(PIPE_LINK_M1(TRANSCODER_C)); + MMIO_D(PIPE_LINK_N1(TRANSCODER_C)); + MMIO_D(PIPE_LINK_M2(TRANSCODER_C)); + MMIO_D(PIPE_LINK_N2(TRANSCODER_C)); + MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP)); + MMIO_D(PF_CTL(PIPE_A)); + MMIO_D(PF_WIN_SZ(PIPE_A)); + MMIO_D(PF_WIN_POS(PIPE_A)); + MMIO_D(PF_VSCALE(PIPE_A)); + MMIO_D(PF_HSCALE(PIPE_A)); + MMIO_D(PF_CTL(PIPE_B)); + MMIO_D(PF_WIN_SZ(PIPE_B)); + MMIO_D(PF_WIN_POS(PIPE_B)); + MMIO_D(PF_VSCALE(PIPE_B)); + MMIO_D(PF_HSCALE(PIPE_B)); + MMIO_D(PF_CTL(PIPE_C)); + MMIO_D(PF_WIN_SZ(PIPE_C)); + MMIO_D(PF_WIN_POS(PIPE_C)); + MMIO_D(PF_VSCALE(PIPE_C)); + MMIO_D(PF_HSCALE(PIPE_C)); + MMIO_D(WM0_PIPE_ILK(PIPE_A)); + MMIO_D(WM0_PIPE_ILK(PIPE_B)); + MMIO_D(WM0_PIPE_ILK(PIPE_C)); + MMIO_D(WM1_LP_ILK); + MMIO_D(WM2_LP_ILK); + MMIO_D(WM3_LP_ILK); + MMIO_D(WM1S_LP_ILK); + MMIO_D(WM2S_LP_IVB); + MMIO_D(WM3S_LP_IVB); + MMIO_D(BLC_PWM_CPU_CTL2); + MMIO_D(BLC_PWM_CPU_CTL); + MMIO_D(BLC_PWM_PCH_CTL1); + MMIO_D(BLC_PWM_PCH_CTL2); + MMIO_D(_MMIO(0x48268)); + MMIO_F(PCH_GMBUS0, 4 * 4); + MMIO_F(PCH_GPIO_BASE, 6 * 4); + MMIO_F(_MMIO(0xe4f00), 0x28); + MMIO_D(_MMIO(_PCH_TRANSACONF)); + MMIO_D(_MMIO(_PCH_TRANSBCONF)); + MMIO_D(FDI_RX_IIR(PIPE_A)); + MMIO_D(FDI_RX_IIR(PIPE_B)); + MMIO_D(FDI_RX_IIR(PIPE_C)); + MMIO_D(FDI_RX_IMR(PIPE_A)); + MMIO_D(FDI_RX_IMR(PIPE_B)); + MMIO_D(FDI_RX_IMR(PIPE_C)); + MMIO_D(FDI_RX_CTL(PIPE_A)); + MMIO_D(FDI_RX_CTL(PIPE_B)); + MMIO_D(FDI_RX_CTL(PIPE_C)); + MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A)); + MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A)); + MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A)); + MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A)); + MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A)); + MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A)); + MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A)); + MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B)); + MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B)); + MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B)); + MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B)); + MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B)); + MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B)); + MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B)); + MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1)); + MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1)); + MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2)); + MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2)); + MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1)); + MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1)); + MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2)); + MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2)); + MMIO_D(TRANS_DP_CTL(PIPE_A)); + MMIO_D(TRANS_DP_CTL(PIPE_B)); + MMIO_D(TRANS_DP_CTL(PIPE_C)); + MMIO_D(TVIDEO_DIP_CTL(PIPE_A)); + MMIO_D(TVIDEO_DIP_DATA(PIPE_A)); + MMIO_D(TVIDEO_DIP_GCP(PIPE_A)); + MMIO_D(TVIDEO_DIP_CTL(PIPE_B)); + MMIO_D(TVIDEO_DIP_DATA(PIPE_B)); + MMIO_D(TVIDEO_DIP_GCP(PIPE_B)); + MMIO_D(TVIDEO_DIP_CTL(PIPE_C)); + MMIO_D(TVIDEO_DIP_DATA(PIPE_C)); + MMIO_D(TVIDEO_DIP_GCP(PIPE_C)); + MMIO_D(_MMIO(_FDI_RXA_MISC)); + MMIO_D(_MMIO(_FDI_RXB_MISC)); + MMIO_D(_MMIO(_FDI_RXA_TUSIZE1)); + MMIO_D(_MMIO(_FDI_RXA_TUSIZE2)); + MMIO_D(_MMIO(_FDI_RXB_TUSIZE1)); + MMIO_D(_MMIO(_FDI_RXB_TUSIZE2)); + MMIO_D(PCH_PP_CONTROL); + MMIO_D(PCH_PP_DIVISOR); + MMIO_D(PCH_PP_STATUS); + MMIO_D(PCH_LVDS); + MMIO_D(_MMIO(_PCH_DPLL_A)); + MMIO_D(_MMIO(_PCH_DPLL_B)); + MMIO_D(_MMIO(_PCH_FPA0)); + MMIO_D(_MMIO(_PCH_FPA1)); + MMIO_D(_MMIO(_PCH_FPB0)); + MMIO_D(_MMIO(_PCH_FPB1)); + MMIO_D(PCH_DREF_CONTROL); + MMIO_D(PCH_RAWCLK_FREQ); + MMIO_D(PCH_DPLL_SEL); + MMIO_D(_MMIO(0x61208)); + MMIO_D(_MMIO(0x6120c)); + MMIO_D(PCH_PP_ON_DELAYS); + MMIO_D(PCH_PP_OFF_DELAYS); + MMIO_D(_MMIO(0xe651c)); + MMIO_D(_MMIO(0xe661c)); + MMIO_D(_MMIO(0xe671c)); + MMIO_D(_MMIO(0xe681c)); + MMIO_D(_MMIO(0xe6c04)); + MMIO_D(_MMIO(0xe6e1c)); + MMIO_D(PCH_PORT_HOTPLUG); + MMIO_D(LCPLL_CTL); + MMIO_D(FUSE_STRAP); + MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL); + MMIO_D(DISP_ARB_CTL); + MMIO_D(DISP_ARB_CTL2); + MMIO_D(ILK_DISPLAY_CHICKEN1); + MMIO_D(ILK_DISPLAY_CHICKEN2); + MMIO_D(ILK_DSPCLK_GATE_D); + MMIO_D(SOUTH_CHICKEN1); + MMIO_D(SOUTH_CHICKEN2); + MMIO_D(_MMIO(_TRANSA_CHICKEN1)); + MMIO_D(_MMIO(_TRANSB_CHICKEN1)); + MMIO_D(SOUTH_DSPCLK_GATE_D); + MMIO_D(_MMIO(_TRANSA_CHICKEN2)); + MMIO_D(_MMIO(_TRANSB_CHICKEN2)); + MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A)); + MMIO_D(ILK_FBC_RT_BASE); + MMIO_D(IPS_CTL); + MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A)); + MMIO_D(PIPE_CSC_MODE(PIPE_A)); + MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A)); + MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A)); + MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A)); + MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A)); + MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A)); + MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B)); + MMIO_D(PIPE_CSC_MODE(PIPE_B)); + MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B)); + MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B)); + MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B)); + MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B)); + MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B)); + MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C)); + MMIO_D(PIPE_CSC_MODE(PIPE_C)); + MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C)); + MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C)); + MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C)); + MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C)); + MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C)); + MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C)); + MMIO_D(PREC_PAL_INDEX(PIPE_A)); + MMIO_D(PREC_PAL_DATA(PIPE_A)); + MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3); + MMIO_D(PREC_PAL_INDEX(PIPE_B)); + MMIO_D(PREC_PAL_DATA(PIPE_B)); + MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3); + MMIO_D(PREC_PAL_INDEX(PIPE_C)); + MMIO_D(PREC_PAL_DATA(PIPE_C)); + MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3); + MMIO_D(_MMIO(0x60110)); + MMIO_D(_MMIO(0x61110)); + MMIO_F(_MMIO(0x70400), 0x40); + MMIO_F(_MMIO(0x71400), 0x40); + MMIO_F(_MMIO(0x72400), 0x40); + MMIO_D(WM_LINETIME(PIPE_A)); + MMIO_D(WM_LINETIME(PIPE_B)); + MMIO_D(WM_LINETIME(PIPE_C)); + MMIO_D(SPLL_CTL); + MMIO_D(_MMIO(_WRPLL_CTL1)); + MMIO_D(_MMIO(_WRPLL_CTL2)); + MMIO_D(PORT_CLK_SEL(PORT_A)); + MMIO_D(PORT_CLK_SEL(PORT_B)); + MMIO_D(PORT_CLK_SEL(PORT_C)); + MMIO_D(PORT_CLK_SEL(PORT_D)); + MMIO_D(PORT_CLK_SEL(PORT_E)); + MMIO_D(TRANS_CLK_SEL(TRANSCODER_A)); + MMIO_D(TRANS_CLK_SEL(TRANSCODER_B)); + MMIO_D(TRANS_CLK_SEL(TRANSCODER_C)); + MMIO_D(HSW_NDE_RSTWRN_OPT); + MMIO_D(_MMIO(0x46508)); + MMIO_D(_MMIO(0x49080)); + MMIO_D(_MMIO(0x49180)); + MMIO_D(_MMIO(0x49280)); + MMIO_F(_MMIO(0x49090), 0x14); + MMIO_F(_MMIO(0x49190), 0x14); + MMIO_F(_MMIO(0x49290), 0x14); + MMIO_D(GAMMA_MODE(PIPE_A)); + MMIO_D(GAMMA_MODE(PIPE_B)); + MMIO_D(GAMMA_MODE(PIPE_C)); + MMIO_D(PIPE_MULT(PIPE_A)); + MMIO_D(PIPE_MULT(PIPE_B)); + MMIO_D(PIPE_MULT(PIPE_C)); + MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A)); + MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B)); + MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C)); + MMIO_D(SFUSE_STRAP); + MMIO_D(SBI_ADDR); + MMIO_D(SBI_DATA); + MMIO_D(SBI_CTL_STAT); + MMIO_D(PIXCLK_GATE); + MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4); + MMIO_D(DDI_BUF_CTL(PORT_A)); + MMIO_D(DDI_BUF_CTL(PORT_B)); + MMIO_D(DDI_BUF_CTL(PORT_C)); + MMIO_D(DDI_BUF_CTL(PORT_D)); + MMIO_D(DDI_BUF_CTL(PORT_E)); + MMIO_D(DP_TP_CTL(PORT_A)); + MMIO_D(DP_TP_CTL(PORT_B)); + MMIO_D(DP_TP_CTL(PORT_C)); + MMIO_D(DP_TP_CTL(PORT_D)); + MMIO_D(DP_TP_CTL(PORT_E)); + MMIO_D(DP_TP_STATUS(PORT_A)); + MMIO_D(DP_TP_STATUS(PORT_B)); + MMIO_D(DP_TP_STATUS(PORT_C)); + MMIO_D(DP_TP_STATUS(PORT_D)); + MMIO_D(DP_TP_STATUS(PORT_E)); + MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50); + MMIO_F(_MMIO(0x64e60), 0x50); + MMIO_F(_MMIO(0x64eC0), 0x50); + MMIO_F(_MMIO(0x64f20), 0x50); + MMIO_F(_MMIO(0x64f80), 0x50); + MMIO_D(HSW_AUD_CFG(PIPE_A)); + MMIO_D(HSW_AUD_PIN_ELD_CP_VLD); + MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A)); + MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_A)); + MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_B)); + MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_C)); + MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_EDP)); + MMIO_D(_MMIO(_TRANSA_MSA_MISC)); + MMIO_D(_MMIO(_TRANSB_MSA_MISC)); + MMIO_D(_MMIO(_TRANSC_MSA_MISC)); + MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC)); + MMIO_D(FORCEWAKE); + MMIO_D(FORCEWAKE_ACK); + MMIO_D(GEN6_GT_CORE_STATUS); + MMIO_D(GEN6_GT_THREAD_STATUS_REG); + MMIO_D(GTFIFODBG); + MMIO_D(GTFIFOCTL); + MMIO_D(ECOBUS); + MMIO_D(GEN6_RC_CONTROL); + MMIO_D(GEN6_RC_STATE); + MMIO_D(GEN6_RPNSWREQ); + MMIO_D(GEN6_RC_VIDEO_FREQ); + MMIO_D(GEN6_RP_DOWN_TIMEOUT); + MMIO_D(GEN6_RP_INTERRUPT_LIMITS); + MMIO_D(GEN6_RPSTAT1); + MMIO_D(GEN6_RP_CONTROL); + MMIO_D(GEN6_RP_UP_THRESHOLD); + MMIO_D(GEN6_RP_DOWN_THRESHOLD); + MMIO_D(GEN6_RP_CUR_UP_EI); + MMIO_D(GEN6_RP_CUR_UP); + MMIO_D(GEN6_RP_PREV_UP); + MMIO_D(GEN6_RP_CUR_DOWN_EI); + MMIO_D(GEN6_RP_CUR_DOWN); + MMIO_D(GEN6_RP_PREV_DOWN); + MMIO_D(GEN6_RP_UP_EI); + MMIO_D(GEN6_RP_DOWN_EI); + MMIO_D(GEN6_RP_IDLE_HYSTERSIS); + MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT); + MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT); + MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT); + MMIO_D(GEN6_RC_EVALUATION_INTERVAL); + MMIO_D(GEN6_RC_IDLE_HYSTERSIS); + MMIO_D(GEN6_RC_SLEEP); + MMIO_D(GEN6_RC1e_THRESHOLD); + MMIO_D(GEN6_RC6_THRESHOLD); + MMIO_D(GEN6_RC6p_THRESHOLD); + MMIO_D(GEN6_RC6pp_THRESHOLD); + MMIO_D(GEN6_PMINTRMSK); + + MMIO_D(RSTDBYCTL); + MMIO_D(GEN6_GDRST); + MMIO_F(FENCE_REG_GEN6_LO(0), 0x80); + MMIO_D(CPU_VGACNTRL); + MMIO_D(TILECTL); + MMIO_D(GEN6_UCGCTL1); + MMIO_D(GEN6_UCGCTL2); + MMIO_F(_MMIO(0x4f000), 0x90); + MMIO_D(GEN6_PCODE_DATA); + MMIO_D(_MMIO(0x13812c)); + MMIO_D(GEN7_ERR_INT); + MMIO_D(HSW_EDRAM_CAP); + MMIO_D(HSW_IDICR); + MMIO_D(GFX_FLSH_CNTL_GEN6); + MMIO_D(_MMIO(0x3c)); + MMIO_D(_MMIO(0x860)); + MMIO_D(ECOSKPD(RENDER_RING_BASE)); + MMIO_D(_MMIO(0x121d0)); + MMIO_D(ECOSKPD(BLT_RING_BASE)); + MMIO_D(_MMIO(0x41d0)); + MMIO_D(GAC_ECO_BITS); + MMIO_D(_MMIO(0x6200)); + MMIO_D(_MMIO(0x6204)); + MMIO_D(_MMIO(0x6208)); + MMIO_D(_MMIO(0x7118)); + MMIO_D(_MMIO(0x7180)); + MMIO_D(_MMIO(0x7408)); + MMIO_D(_MMIO(0x7c00)); + MMIO_D(GEN6_MBCTL); + MMIO_D(_MMIO(0x911c)); + MMIO_D(_MMIO(0x9120)); + MMIO_D(GEN7_UCGCTL4); + MMIO_D(GAB_CTL); + MMIO_D(_MMIO(0x48800)); + MMIO_D(_MMIO(0xce044)); + MMIO_D(_MMIO(0xe6500)); + MMIO_D(_MMIO(0xe6504)); + MMIO_D(_MMIO(0xe6600)); + MMIO_D(_MMIO(0xe6604)); + MMIO_D(_MMIO(0xe6700)); + MMIO_D(_MMIO(0xe6704)); + MMIO_D(_MMIO(0xe6800)); + MMIO_D(_MMIO(0xe6804)); + MMIO_D(PCH_GMBUS4); + MMIO_D(PCH_GMBUS5); + MMIO_D(_MMIO(0x902c)); + MMIO_D(_MMIO(0xec008)); + MMIO_D(_MMIO(0xec00c)); + MMIO_D(_MMIO(0xec008 + 0x18)); + MMIO_D(_MMIO(0xec00c + 0x18)); + MMIO_D(_MMIO(0xec008 + 0x18 * 2)); + MMIO_D(_MMIO(0xec00c + 0x18 * 2)); + MMIO_D(_MMIO(0xec008 + 0x18 * 3)); + MMIO_D(_MMIO(0xec00c + 0x18 * 3)); + MMIO_D(_MMIO(0xec408)); + MMIO_D(_MMIO(0xec40c)); + MMIO_D(_MMIO(0xec408 + 0x18)); + MMIO_D(_MMIO(0xec40c + 0x18)); + MMIO_D(_MMIO(0xec408 + 0x18 * 2)); + MMIO_D(_MMIO(0xec40c + 0x18 * 2)); + MMIO_D(_MMIO(0xec408 + 0x18 * 3)); + MMIO_D(_MMIO(0xec40c + 0x18 * 3)); + MMIO_D(_MMIO(0xfc810)); + MMIO_D(_MMIO(0xfc81c)); + MMIO_D(_MMIO(0xfc828)); + MMIO_D(_MMIO(0xfc834)); + MMIO_D(_MMIO(0xfcc00)); + MMIO_D(_MMIO(0xfcc0c)); + MMIO_D(_MMIO(0xfcc18)); + MMIO_D(_MMIO(0xfcc24)); + MMIO_D(_MMIO(0xfd000)); + MMIO_D(_MMIO(0xfd00c)); + MMIO_D(_MMIO(0xfd018)); + MMIO_D(_MMIO(0xfd024)); + MMIO_D(_MMIO(0xfd034)); + MMIO_D(FPGA_DBG); + MMIO_D(_MMIO(0x2054)); + MMIO_D(_MMIO(0x12054)); + MMIO_D(_MMIO(0x22054)); + MMIO_D(_MMIO(0x1a054)); + MMIO_D(_MMIO(0x44070)); + MMIO_D(_MMIO(0x2178)); + MMIO_D(_MMIO(0x217c)); + MMIO_D(_MMIO(0x12178)); + MMIO_D(_MMIO(0x1217c)); + MMIO_F(_MMIO(0x5200), 32); + MMIO_F(_MMIO(0x5240), 32); + MMIO_F(_MMIO(0x5280), 16); + MMIO_D(BCS_SWCTRL); + MMIO_F(HS_INVOCATION_COUNT, 8); + MMIO_F(DS_INVOCATION_COUNT, 8); + MMIO_F(IA_VERTICES_COUNT, 8); + MMIO_F(IA_PRIMITIVES_COUNT, 8); + MMIO_F(VS_INVOCATION_COUNT, 8); + MMIO_F(GS_INVOCATION_COUNT, 8); + MMIO_F(GS_PRIMITIVES_COUNT, 8); + MMIO_F(CL_INVOCATION_COUNT, 8); + MMIO_F(CL_PRIMITIVES_COUNT, 8); + MMIO_F(PS_INVOCATION_COUNT, 8); + MMIO_F(PS_DEPTH_COUNT, 8); + MMIO_D(ARB_MODE); + MMIO_RING_D(RING_BBADDR); + MMIO_D(_MMIO(0x2220)); + MMIO_D(_MMIO(0x12220)); + MMIO_D(_MMIO(0x22220)); + MMIO_RING_D(RING_SYNC_1); + MMIO_RING_D(RING_SYNC_0); + MMIO_D(GUC_STATUS); + + MMIO_F(_MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000); + MMIO_F(_MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE); + MMIO_F(LGC_PALETTE(PIPE_A, 0), 1024); + MMIO_F(LGC_PALETTE(PIPE_B, 0), 1024); + MMIO_F(LGC_PALETTE(PIPE_C, 0), 1024); + + return 0; +} + +static int iterate_bdw_only_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + MMIO_D(HSW_PWR_WELL_CTL1); + MMIO_D(HSW_PWR_WELL_CTL2); + MMIO_D(HSW_PWR_WELL_CTL3); + MMIO_D(HSW_PWR_WELL_CTL4); + MMIO_D(HSW_PWR_WELL_CTL5); + MMIO_D(HSW_PWR_WELL_CTL6); + + MMIO_D(WM_MISC); + MMIO_D(_MMIO(_SRD_CTL_EDP)); + + MMIO_D(_MMIO(0xb1f0)); + MMIO_D(_MMIO(0xb1c0)); + MMIO_D(_MMIO(0xb100)); + MMIO_D(_MMIO(0xb10c)); + MMIO_D(_MMIO(0xb110)); + MMIO_D(_MMIO(0x83a4)); + MMIO_D(_MMIO(0x8430)); + MMIO_D(_MMIO(0x2248)); + MMIO_D(FORCEWAKE_ACK_HSW); + + return 0; +} + +static int iterate_bdw_plus_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *dev_priv = iter->i915; + + MMIO_D(GEN8_GT_IMR(0)); + MMIO_D(GEN8_GT_IER(0)); + MMIO_D(GEN8_GT_IIR(0)); + MMIO_D(GEN8_GT_ISR(0)); + MMIO_D(GEN8_GT_IMR(1)); + MMIO_D(GEN8_GT_IER(1)); + MMIO_D(GEN8_GT_IIR(1)); + MMIO_D(GEN8_GT_ISR(1)); + MMIO_D(GEN8_GT_IMR(2)); + MMIO_D(GEN8_GT_IER(2)); + MMIO_D(GEN8_GT_IIR(2)); + MMIO_D(GEN8_GT_ISR(2)); + MMIO_D(GEN8_GT_IMR(3)); + MMIO_D(GEN8_GT_IER(3)); + MMIO_D(GEN8_GT_IIR(3)); + MMIO_D(GEN8_GT_ISR(3)); + MMIO_D(GEN8_DE_PIPE_IMR(PIPE_A)); + MMIO_D(GEN8_DE_PIPE_IER(PIPE_A)); + MMIO_D(GEN8_DE_PIPE_IIR(PIPE_A)); + MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A)); + MMIO_D(GEN8_DE_PIPE_IMR(PIPE_B)); + MMIO_D(GEN8_DE_PIPE_IER(PIPE_B)); + MMIO_D(GEN8_DE_PIPE_IIR(PIPE_B)); + MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B)); + MMIO_D(GEN8_DE_PIPE_IMR(PIPE_C)); + MMIO_D(GEN8_DE_PIPE_IER(PIPE_C)); + MMIO_D(GEN8_DE_PIPE_IIR(PIPE_C)); + MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C)); + MMIO_D(GEN8_DE_PORT_IMR); + MMIO_D(GEN8_DE_PORT_IER); + MMIO_D(GEN8_DE_PORT_IIR); + MMIO_D(GEN8_DE_PORT_ISR); + MMIO_D(GEN8_DE_MISC_IMR); + MMIO_D(GEN8_DE_MISC_IER); + MMIO_D(GEN8_DE_MISC_IIR); + MMIO_D(GEN8_DE_MISC_ISR); + MMIO_D(GEN8_PCU_IMR); + MMIO_D(GEN8_PCU_IER); + MMIO_D(GEN8_PCU_IIR); + MMIO_D(GEN8_PCU_ISR); + MMIO_D(GEN8_MASTER_IRQ); + MMIO_RING_D(RING_ACTHD_UDW); + +#define RING_REG(base) _MMIO((base) + 0xd0) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x230) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x234) + MMIO_RING_F(RING_REG, 8); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x244) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x370) + MMIO_RING_F(RING_REG, 48); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x3a0) + MMIO_RING_D(RING_REG); +#undef RING_REG + + MMIO_D(PIPEMISC(PIPE_A)); + MMIO_D(PIPEMISC(PIPE_B)); + MMIO_D(PIPEMISC(PIPE_C)); + MMIO_D(_MMIO(0x1c1d0)); + MMIO_D(GEN6_MBCUNIT_SNPCR); + MMIO_D(GEN7_MISCCPCTL); + MMIO_D(_MMIO(0x1c054)); + MMIO_D(GEN6_PCODE_MAILBOX); + if (!IS_BROXTON(dev_priv)) + MMIO_D(GEN8_PRIVATE_PAT_LO); + MMIO_D(GEN8_PRIVATE_PAT_HI); + MMIO_D(GAMTARBMODE); + +#define RING_REG(base) _MMIO((base) + 0x270) + MMIO_RING_F(RING_REG, 32); +#undef RING_REG + + MMIO_RING_D(RING_HWS_PGA); + MMIO_D(HDC_CHICKEN0); + MMIO_D(CHICKEN_PIPESL_1(PIPE_A)); + MMIO_D(CHICKEN_PIPESL_1(PIPE_B)); + MMIO_D(CHICKEN_PIPESL_1(PIPE_C)); + MMIO_D(_MMIO(0x6671c)); + MMIO_D(_MMIO(0x66c00)); + MMIO_D(_MMIO(0x66c04)); + MMIO_D(HSW_GTT_CACHE_EN); + MMIO_D(GEN8_EU_DISABLE0); + MMIO_D(GEN8_EU_DISABLE1); + MMIO_D(GEN8_EU_DISABLE2); + MMIO_D(_MMIO(0xfdc)); + MMIO_D(GEN8_ROW_CHICKEN); + MMIO_D(GEN7_ROW_CHICKEN2); + MMIO_D(GEN8_UCGCTL6); + MMIO_D(GEN8_L3SQCREG4); + MMIO_D(GEN9_SCRATCH_LNCF1); + MMIO_F(_MMIO(0x24d0), 48); + MMIO_D(_MMIO(0x44484)); + MMIO_D(_MMIO(0x4448c)); + MMIO_D(GEN8_L3_LRA_1_GPGPU); + MMIO_D(_MMIO(0x110000)); + MMIO_D(_MMIO(0x48400)); + MMIO_D(_MMIO(0x6e570)); + MMIO_D(_MMIO(0x65f10)); + MMIO_D(_MMIO(0xe194)); + MMIO_D(_MMIO(0xe188)); + MMIO_D(HALF_SLICE_CHICKEN2); + MMIO_D(_MMIO(0x2580)); + MMIO_D(_MMIO(0xe220)); + MMIO_D(_MMIO(0xe230)); + MMIO_D(_MMIO(0xe240)); + MMIO_D(_MMIO(0xe260)); + MMIO_D(_MMIO(0xe270)); + MMIO_D(_MMIO(0xe280)); + MMIO_D(_MMIO(0xe2a0)); + MMIO_D(_MMIO(0xe2b0)); + MMIO_D(_MMIO(0xe2c0)); + MMIO_D(_MMIO(0x21f0)); + MMIO_D(GEN8_GAMW_ECO_DEV_RW_IA); + MMIO_D(_MMIO(0x215c)); + MMIO_F(_MMIO(0x2290), 8); + MMIO_D(_MMIO(0x2b00)); + MMIO_D(_MMIO(0x2360)); + MMIO_D(_MMIO(0x1c17c)); + MMIO_D(_MMIO(0x1c178)); + MMIO_D(_MMIO(0x4260)); + MMIO_D(_MMIO(0x4264)); + MMIO_D(_MMIO(0x4268)); + MMIO_D(_MMIO(0x426c)); + MMIO_D(_MMIO(0x4270)); + MMIO_D(_MMIO(0x4094)); + MMIO_D(_MMIO(0x22178)); + MMIO_D(_MMIO(0x1a178)); + MMIO_D(_MMIO(0x1a17c)); + MMIO_D(_MMIO(0x2217c)); + MMIO_D(EDP_PSR_IMR); + MMIO_D(EDP_PSR_IIR); + MMIO_D(_MMIO(0xe4cc)); + MMIO_D(GEN7_SC_INSTDONE); + + return 0; +} + +static int iterate_pre_skl_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + MMIO_D(FORCEWAKE_MT); + + MMIO_D(PCH_ADPA); + MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4); + MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4); + MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4); + + MMIO_F(_MMIO(0x70440), 0xc); + MMIO_F(_MMIO(0x71440), 0xc); + MMIO_F(_MMIO(0x72440), 0xc); + MMIO_F(_MMIO(0x7044c), 0xc); + MMIO_F(_MMIO(0x7144c), 0xc); + MMIO_F(_MMIO(0x7244c), 0xc); + + return 0; +} + +static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *dev_priv = iter->i915; + + MMIO_D(FORCEWAKE_RENDER_GEN9); + MMIO_D(FORCEWAKE_ACK_RENDER_GEN9); + MMIO_D(FORCEWAKE_GT_GEN9); + MMIO_D(FORCEWAKE_ACK_GT_GEN9); + MMIO_D(FORCEWAKE_MEDIA_GEN9); + MMIO_D(FORCEWAKE_ACK_MEDIA_GEN9); + MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4); + MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4); + MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4); + MMIO_D(HSW_PWR_WELL_CTL1); + MMIO_D(HSW_PWR_WELL_CTL2); + MMIO_D(DBUF_CTL_S(0)); + MMIO_D(GEN9_PG_ENABLE); + MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS); + MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS); + MMIO_D(GEN9_GAMT_ECO_REG_RW_IA); + MMIO_D(MMCD_MISC_CTRL); + MMIO_D(CHICKEN_PAR1_1); + MMIO_D(DC_STATE_EN); + MMIO_D(DC_STATE_DEBUG); + MMIO_D(CDCLK_CTL); + MMIO_D(LCPLL1_CTL); + MMIO_D(LCPLL2_CTL); + MMIO_D(_MMIO(_DPLL1_CFGCR1)); + MMIO_D(_MMIO(_DPLL2_CFGCR1)); + MMIO_D(_MMIO(_DPLL3_CFGCR1)); + MMIO_D(_MMIO(_DPLL1_CFGCR2)); + MMIO_D(_MMIO(_DPLL2_CFGCR2)); + MMIO_D(_MMIO(_DPLL3_CFGCR2)); + MMIO_D(DPLL_CTRL1); + MMIO_D(DPLL_CTRL2); + MMIO_D(DPLL_STATUS); + MMIO_D(SKL_PS_WIN_POS(PIPE_A, 0)); + MMIO_D(SKL_PS_WIN_POS(PIPE_A, 1)); + MMIO_D(SKL_PS_WIN_POS(PIPE_B, 0)); + MMIO_D(SKL_PS_WIN_POS(PIPE_B, 1)); + MMIO_D(SKL_PS_WIN_POS(PIPE_C, 0)); + MMIO_D(SKL_PS_WIN_POS(PIPE_C, 1)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_A, 0)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_A, 1)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_B, 0)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_B, 1)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_C, 0)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_C, 1)); + MMIO_D(SKL_PS_CTRL(PIPE_A, 0)); + MMIO_D(SKL_PS_CTRL(PIPE_A, 1)); + MMIO_D(SKL_PS_CTRL(PIPE_B, 0)); + MMIO_D(SKL_PS_CTRL(PIPE_B, 1)); + MMIO_D(SKL_PS_CTRL(PIPE_C, 0)); + MMIO_D(SKL_PS_CTRL(PIPE_C, 1)); + MMIO_D(PLANE_BUF_CFG(PIPE_A, 0)); + MMIO_D(PLANE_BUF_CFG(PIPE_A, 1)); + MMIO_D(PLANE_BUF_CFG(PIPE_A, 2)); + MMIO_D(PLANE_BUF_CFG(PIPE_A, 3)); + MMIO_D(PLANE_BUF_CFG(PIPE_B, 0)); + MMIO_D(PLANE_BUF_CFG(PIPE_B, 1)); + MMIO_D(PLANE_BUF_CFG(PIPE_B, 2)); + MMIO_D(PLANE_BUF_CFG(PIPE_B, 3)); + MMIO_D(PLANE_BUF_CFG(PIPE_C, 0)); + MMIO_D(PLANE_BUF_CFG(PIPE_C, 1)); + MMIO_D(PLANE_BUF_CFG(PIPE_C, 2)); + MMIO_D(PLANE_BUF_CFG(PIPE_C, 3)); + MMIO_D(CUR_BUF_CFG(PIPE_A)); + MMIO_D(CUR_BUF_CFG(PIPE_B)); + MMIO_D(CUR_BUF_CFG(PIPE_C)); + MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8); + MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8); + MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8); + MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8); + MMIO_D(PLANE_WM_TRANS(PIPE_A, 0)); + MMIO_D(PLANE_WM_TRANS(PIPE_A, 1)); + MMIO_D(PLANE_WM_TRANS(PIPE_A, 2)); + MMIO_D(PLANE_WM_TRANS(PIPE_B, 0)); + MMIO_D(PLANE_WM_TRANS(PIPE_B, 1)); + MMIO_D(PLANE_WM_TRANS(PIPE_B, 2)); + MMIO_D(PLANE_WM_TRANS(PIPE_C, 0)); + MMIO_D(PLANE_WM_TRANS(PIPE_C, 1)); + MMIO_D(PLANE_WM_TRANS(PIPE_C, 2)); + MMIO_D(CUR_WM_TRANS(PIPE_A)); + MMIO_D(CUR_WM_TRANS(PIPE_B)); + MMIO_D(CUR_WM_TRANS(PIPE_C)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 0)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 1)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 2)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 3)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 0)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 1)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 2)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 3)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 0)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 1)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 2)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 3)); + MMIO_D(_MMIO(_REG_701C0(PIPE_A, 1))); + MMIO_D(_MMIO(_REG_701C0(PIPE_A, 2))); + MMIO_D(_MMIO(_REG_701C0(PIPE_A, 3))); + MMIO_D(_MMIO(_REG_701C0(PIPE_A, 4))); + MMIO_D(_MMIO(_REG_701C0(PIPE_B, 1))); + MMIO_D(_MMIO(_REG_701C0(PIPE_B, 2))); + MMIO_D(_MMIO(_REG_701C0(PIPE_B, 3))); + MMIO_D(_MMIO(_REG_701C0(PIPE_B, 4))); + MMIO_D(_MMIO(_REG_701C0(PIPE_C, 1))); + MMIO_D(_MMIO(_REG_701C0(PIPE_C, 2))); + MMIO_D(_MMIO(_REG_701C0(PIPE_C, 3))); + MMIO_D(_MMIO(_REG_701C0(PIPE_C, 4))); + MMIO_D(_MMIO(_REG_701C4(PIPE_A, 1))); + MMIO_D(_MMIO(_REG_701C4(PIPE_A, 2))); + MMIO_D(_MMIO(_REG_701C4(PIPE_A, 3))); + MMIO_D(_MMIO(_REG_701C4(PIPE_A, 4))); + MMIO_D(_MMIO(_REG_701C4(PIPE_B, 1))); + MMIO_D(_MMIO(_REG_701C4(PIPE_B, 2))); + MMIO_D(_MMIO(_REG_701C4(PIPE_B, 3))); + MMIO_D(_MMIO(_REG_701C4(PIPE_B, 4))); + MMIO_D(_MMIO(_REG_701C4(PIPE_C, 1))); + MMIO_D(_MMIO(_REG_701C4(PIPE_C, 2))); + MMIO_D(_MMIO(_REG_701C4(PIPE_C, 3))); + MMIO_D(_MMIO(_REG_701C4(PIPE_C, 4))); + MMIO_D(_MMIO(_PLANE_CTL_3_A)); + MMIO_D(_MMIO(_PLANE_CTL_3_B)); + MMIO_D(_MMIO(0x72380)); + MMIO_D(_MMIO(0x7239c)); + MMIO_D(_MMIO(_PLANE_SURF_3_A)); + MMIO_D(_MMIO(_PLANE_SURF_3_B)); + MMIO_D(DMC_SSP_BASE); + MMIO_D(DMC_HTP_SKL); + MMIO_D(DMC_LAST_WRITE); + MMIO_D(BDW_SCRATCH1); + MMIO_D(SKL_DFSM); + MMIO_D(DISPIO_CR_TX_BMU_CR0); + MMIO_F(GEN9_GFX_MOCS(0), 0x7f8); + MMIO_F(GEN7_L3CNTLREG2, 0x80); + MMIO_D(RPM_CONFIG0); + MMIO_D(_MMIO(0xd08)); + MMIO_D(RC6_LOCATION); + MMIO_D(GEN7_FF_SLICE_CS_CHICKEN1); + MMIO_D(GEN9_CS_DEBUG_MODE1); + /* TRTT */ + MMIO_D(TRVATTL3PTRDW(0)); + MMIO_D(TRVATTL3PTRDW(1)); + MMIO_D(TRVATTL3PTRDW(2)); + MMIO_D(TRVATTL3PTRDW(3)); + MMIO_D(TRVADR); + MMIO_D(TRTTE); + MMIO_D(_MMIO(0x4dfc)); + MMIO_D(_MMIO(0x46430)); + MMIO_D(_MMIO(0x46520)); + MMIO_D(_MMIO(0xc403c)); + MMIO_D(GEN8_GARBCNTL); + MMIO_D(DMA_CTRL); + MMIO_D(_MMIO(0x65900)); + MMIO_D(GEN6_STOLEN_RESERVED); + MMIO_D(_MMIO(0x4068)); + MMIO_D(_MMIO(0x67054)); + MMIO_D(_MMIO(0x6e560)); + MMIO_D(_MMIO(0x6e554)); + MMIO_D(_MMIO(0x2b20)); + MMIO_D(_MMIO(0x65f00)); + MMIO_D(_MMIO(0x65f08)); + MMIO_D(_MMIO(0x320f0)); + MMIO_D(_MMIO(0x70034)); + MMIO_D(_MMIO(0x71034)); + MMIO_D(_MMIO(0x72034)); + MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A))); + MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B))); + MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C))); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A))); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B))); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C))); + MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A))); + MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B))); + MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C))); + MMIO_D(_MMIO(0x44500)); +#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4) + MMIO_RING_D(CSFE_CHICKEN1_REG); +#undef CSFE_CHICKEN1_REG + MMIO_D(GEN8_HDC_CHICKEN1); + MMIO_D(GEN9_WM_CHICKEN3); + + if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) + MMIO_D(GAMT_CHKN_BIT_REG); + if (!IS_BROXTON(dev_priv)) + MMIO_D(GEN9_CTX_PREEMPT_REG); + MMIO_F(_MMIO(DMC_MMIO_START_RANGE), 0x3000); + return 0; +} + +static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *dev_priv = iter->i915; + + MMIO_F(_MMIO(0x80000), 0x3000); + MMIO_D(GEN7_SAMPLER_INSTDONE); + MMIO_D(GEN7_ROW_INSTDONE); + MMIO_D(GEN8_FAULT_TLB_DATA0); + MMIO_D(GEN8_FAULT_TLB_DATA1); + MMIO_D(ERROR_GEN6); + MMIO_D(DONE_REG); + MMIO_D(EIR); + MMIO_D(PGTBL_ER); + MMIO_D(_MMIO(0x4194)); + MMIO_D(_MMIO(0x4294)); + MMIO_D(_MMIO(0x4494)); + MMIO_RING_D(RING_PSMI_CTL); + MMIO_RING_D(RING_DMA_FADD); + MMIO_RING_D(RING_DMA_FADD_UDW); + MMIO_RING_D(RING_IPEHR); + MMIO_RING_D(RING_INSTPS); + MMIO_RING_D(RING_BBADDR_UDW); + MMIO_RING_D(RING_BBSTATE); + MMIO_RING_D(RING_IPEIR); + MMIO_F(SOFT_SCRATCH(0), 16 * 4); + MMIO_D(BXT_P_CR_GT_DISP_PWRON); + MMIO_D(BXT_RP_STATE_CAP); + MMIO_D(BXT_PHY_CTL_FAMILY(DPIO_PHY0)); + MMIO_D(BXT_PHY_CTL_FAMILY(DPIO_PHY1)); + MMIO_D(BXT_PHY_CTL(PORT_A)); + MMIO_D(BXT_PHY_CTL(PORT_B)); + MMIO_D(BXT_PHY_CTL(PORT_C)); + MMIO_D(BXT_PORT_PLL_ENABLE(PORT_A)); + MMIO_D(BXT_PORT_PLL_ENABLE(PORT_B)); + MMIO_D(BXT_PORT_PLL_ENABLE(PORT_C)); + MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0)); + MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0)); + MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0)); + MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1)); + MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1)); + MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1)); + MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1)); + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10)); + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10)); + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10)); + MMIO_D(BXT_DE_PLL_CTL); + MMIO_D(BXT_DE_PLL_ENABLE); + MMIO_D(BXT_DSI_PLL_CTL); + MMIO_D(BXT_DSI_PLL_ENABLE); + MMIO_D(GEN9_CLKGATE_DIS_0); + MMIO_D(GEN9_CLKGATE_DIS_4); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A)); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B)); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C)); + MMIO_D(RC6_CTX_BASE); + MMIO_D(GEN8_PUSHBUS_CONTROL); + MMIO_D(GEN8_PUSHBUS_ENABLE); + MMIO_D(GEN8_PUSHBUS_SHIFT); + MMIO_D(GEN6_GFXPAUSE); + MMIO_D(GEN8_L3SQCREG1); + MMIO_D(GEN8_L3CNTLREG); + MMIO_D(_MMIO(0x20D8)); + MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40); + MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40); + MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40); + MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40); + MMIO_D(GEN9_CTX_PREEMPT_REG); + MMIO_D(GEN8_PRIVATE_PAT_LO); + + return 0; +} + +/** + * intel_gvt_iterate_mmio_table - Iterate the GVT MMIO table + * @iter: the interator + * + * This function is called for iterating the GVT MMIO table when i915 is + * taking the snapshot of the HW and GVT is building MMIO tracking table. + */ +int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *i915 = iter->i915; + int ret; + + ret = iterate_generic_mmio(iter); + if (ret) + goto err; + + if (IS_BROADWELL(i915)) { + ret = iterate_bdw_only_mmio(iter); + if (ret) + goto err; + ret = iterate_bdw_plus_mmio(iter); + if (ret) + goto err; + ret = iterate_pre_skl_mmio(iter); + if (ret) + goto err; + } else if (IS_SKYLAKE(i915) || + IS_KABYLAKE(i915) || + IS_COFFEELAKE(i915) || + IS_COMETLAKE(i915)) { + ret = iterate_bdw_plus_mmio(iter); + if (ret) + goto err; + ret = iterate_skl_plus_mmio(iter); + if (ret) + goto err; + } else if (IS_BROXTON(i915)) { + ret = iterate_bdw_plus_mmio(iter); + if (ret) + goto err; + ret = iterate_skl_plus_mmio(iter); + if (ret) + goto err; + ret = iterate_bxt_mmio(iter); + if (ret) + goto err; + } + + return 0; +err: + return ret; +} From 66e7a8063381cb2f568cd3436df2f0ec239a84f9 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Thu, 7 Apr 2022 03:19:44 -0400 Subject: [PATCH 140/219] i915/gvt: Save the initial HW state snapshot in i915 Save the initial HW state snapshot in i915 so that the rest code of GVT-g can be moved into a dedicated module while it can still get a clean initial HW state saved at the correct time during the initialization of i915. The futhrer vGPU created by GVT-g will use this HW state as the initial HW state. v6: - Remove the reference of intel_gvt_device_info.(Christoph) - Refine the save_mmio() function. (Christoph) Cc: Christoph Hellwig Cc: Jason Gunthorpe Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Vivi Rodrigo Cc: Zhenyu Wang Cc: Zhi Wang Signed-off-by: Zhi Wang Reviewed-by: Christoph Hellwig Tested-by: Christoph Hellwig Reviewed-by: Zhenyu Wang Acked-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20220407071945.72148-3-zhi.a.wang@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/intel_gvt.c | 92 +++++++++++++++++++++++++++++++- 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fa14da84362e..0830020c11c0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -432,6 +432,8 @@ struct i915_virtual_gpu { struct mutex lock; /* serialises sending of g2v_notify command pkts */ bool active; u32 caps; + u32 *initial_mmio; + u8 *initial_cfg_space; }; struct i915_selftest_stash { diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index cf6e98962d82..65daab2c4d9e 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -86,6 +86,85 @@ bail: dev_priv->params.enable_gvt = 0; } +static void free_initial_hw_state(struct drm_i915_private *dev_priv) +{ + struct i915_virtual_gpu *vgpu = &dev_priv->vgpu; + + vfree(vgpu->initial_mmio); + vgpu->initial_mmio = NULL; + + kfree(vgpu->initial_cfg_space); + vgpu->initial_cfg_space = NULL; +} + +static void save_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset, + u32 size) +{ + struct drm_i915_private *dev_priv = iter->i915; + u32 *mmio, i; + + for (i = offset; i < offset + size; i += 4) { + mmio = iter->data + i; + *mmio = intel_uncore_read_notrace(to_gt(dev_priv)->uncore, + _MMIO(i)); + } +} + +static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, + u32 offset, u32 size) +{ + if (WARN_ON(!IS_ALIGNED(offset, 4))) + return -EINVAL; + + save_mmio(iter, offset, size); + return 0; +} + +static int save_initial_hw_state(struct drm_i915_private *dev_priv) +{ + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct i915_virtual_gpu *vgpu = &dev_priv->vgpu; + struct intel_gvt_mmio_table_iter iter; + void *mem; + int i, ret; + + mem = kzalloc(PCI_CFG_SPACE_EXP_SIZE, GFP_KERNEL); + if (!mem) + return -ENOMEM; + + vgpu->initial_cfg_space = mem; + + for (i = 0; i < PCI_CFG_SPACE_EXP_SIZE; i += 4) + pci_read_config_dword(pdev, i, mem + i); + + mem = vzalloc(2 * SZ_1M); + if (!mem) { + ret = -ENOMEM; + goto err_mmio; + } + + vgpu->initial_mmio = mem; + + iter.i915 = dev_priv; + iter.data = vgpu->initial_mmio; + iter.handle_mmio_cb = handle_mmio; + + ret = intel_gvt_iterate_mmio_table(&iter); + if (ret) + goto err_iterate; + + return 0; + +err_iterate: + vfree(vgpu->initial_mmio); + vgpu->initial_mmio = NULL; +err_mmio: + kfree(vgpu->initial_cfg_space); + vgpu->initial_cfg_space = NULL; + + return ret; +} + /** * intel_gvt_init - initialize GVT components * @dev_priv: drm i915 private data @@ -115,15 +194,23 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) return -EIO; } + ret = save_initial_hw_state(dev_priv); + if (ret) { + drm_dbg(&dev_priv->drm, "Fail to save initial HW state\n"); + goto err_save_hw_state; + } + ret = intel_gvt_init_device(dev_priv); if (ret) { drm_dbg(&dev_priv->drm, "Fail to init GVT device\n"); - goto bail; + goto err_init_device; } return 0; -bail: +err_init_device: + free_initial_hw_state(dev_priv); +err_save_hw_state: dev_priv->params.enable_gvt = 0; return 0; } @@ -147,6 +234,7 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) return; intel_gvt_clean_device(dev_priv); + free_initial_hw_state(dev_priv); } /** From 1672991412dfef000c9f9271558a3713081a4c57 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Thu, 7 Apr 2022 03:19:45 -0400 Subject: [PATCH 141/219] i915/gvt: Use the initial HW state snapshot saved in i915 The code of saving initial HW state snapshot has been moved into i915. Let the GVT-g core logic use that snapshot. Cc: Christoph Hellwig Cc: Jason Gunthorpe Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Vivi Rodrigo Cc: Zhenyu Wang Cc: Zhi Wang Signed-off-by: Zhi Wang Reviewed-by: Christoph Hellwig Tested-by: Christoph Hellwig Reviewed-by: Zhenyu Wang Acked-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20220407071945.72148-4-zhi.a.wang@intel.com --- drivers/gpu/drm/i915/gvt/firmware.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 1a8274a3f4b1..54fe442238c6 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -66,22 +66,16 @@ static struct bin_attribute firmware_attr = { .mmap = NULL, }; -static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) -{ - *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore, - _MMIO(offset)); - return 0; -} - static int expose_firmware_sysfs(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; - struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); + struct drm_i915_private *i915 = gvt->gt->i915; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct gvt_firmware_header *h; void *firmware; void *p; unsigned long size, crc32_start; - int i, ret; + int ret; size = sizeof(*h) + info->mmio_size + info->cfg_space_size; firmware = vzalloc(size); @@ -99,17 +93,16 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) p = firmware + h->cfg_space_offset; - for (i = 0; i < h->cfg_space_size; i += 4) - pci_read_config_dword(pdev, i, p + i); - - memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size); + memcpy(gvt->firmware.cfg_space, i915->vgpu.initial_cfg_space, + info->cfg_space_size); + memcpy(p, gvt->firmware.cfg_space, info->cfg_space_size); p = firmware + h->mmio_offset; - /* Take a snapshot of hw mmio registers. */ - intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p); + memcpy(gvt->firmware.mmio, i915->vgpu.initial_mmio, + info->mmio_size); - memcpy(gvt->firmware.mmio, p, info->mmio_size); + memcpy(p, gvt->firmware.mmio, info->mmio_size); crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start); From de5d437ae8696ab958903ac199c56c939036e3ea Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 13 Apr 2022 15:25:38 +0300 Subject: [PATCH 142/219] drm/i915/gvt: fix trace TRACE_INCLUDE_PATH TRACE_INCLUDE_PATH should be a path relative to define_trace.h, not the file including it. (See the comment in include/trace/define_trace.h.) Cc: Zhi Wang Cc: Christoph Hellwig Signed-off-by: Jani Nikula Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/48b772795b7ab674f609ecad53b4882c66a8262a.1649852517.git.jani.nikula@intel.com Reviewed-by: Zhi Wang Reviewed-by: Christoph Hellwig --- drivers/gpu/drm/i915/gvt/trace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h index 6d787750d279..020f1aa28322 100644 --- a/drivers/gpu/drm/i915/gvt/trace.h +++ b/drivers/gpu/drm/i915/gvt/trace.h @@ -377,7 +377,7 @@ TRACE_EVENT(render_mmio, /* This part must be out of protection */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/gvt #define TRACE_INCLUDE_FILE trace #include From 7f0cf30187cdb1f04d905505ffde910cecf1b35e Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 13 Apr 2022 15:25:39 +0300 Subject: [PATCH 143/219] drm/i915/gvt: better align the Makefile with i915 Makefile Drop extra ccflags, drop extra intermediate variables, list object files one per line alphabetically. Cc: Zhi Wang Cc: Christoph Hellwig Signed-off-by: Jani Nikula Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/8bc0895376c077156a671e24ac6a5c75b7db4c9c.1649852517.git.jani.nikula@intel.com Reviewed-by: Zhi Wang Reviewed-by: Christoph Hellwig --- drivers/gpu/drm/i915/Makefile | 6 +++--- drivers/gpu/drm/i915/gvt/Makefile | 30 +++++++++++++++++++++++------- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index a6cab2d9a9d1..95a449b3508b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -320,10 +320,10 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ # virtual gpu code i915-y += i915_vgpu.o -ifeq ($(CONFIG_DRM_I915_GVT),y) -i915-y += intel_gvt.o intel_gvt_mmio_table.o +i915-$(CONFIG_DRM_I915_GVT) += \ + intel_gvt.o \ + intel_gvt_mmio_table.o include $(src)/gvt/Makefile -endif obj-$(CONFIG_DRM_I915) += i915.o obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index ea8324abc784..584661047945 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -1,9 +1,25 @@ # SPDX-License-Identifier: GPL-2.0 -GVT_DIR := gvt -GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ - interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ - execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \ - fb_decoder.o dmabuf.o page_track.o -ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/ -i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) +i915-$(CONFIG_DRM_I915_GVT) += \ + gvt/aperture_gm.o \ + gvt/cfg_space.o \ + gvt/cmd_parser.o \ + gvt/debugfs.o \ + gvt/display.o \ + gvt/dmabuf.o \ + gvt/edid.o \ + gvt/execlist.o \ + gvt/fb_decoder.o \ + gvt/firmware.o \ + gvt/gtt.o \ + gvt/gvt.o \ + gvt/handlers.o \ + gvt/interrupt.o \ + gvt/mmio.o \ + gvt/mmio_context.o \ + gvt/opregion.o \ + gvt/page_track.o \ + gvt/sched_policy.o \ + gvt/scheduler.o \ + gvt/trace_points.o \ + gvt/vgpu.o From a85749e12d66c2cd89d1bce05ef9abca8b5875e9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:30 +0200 Subject: [PATCH 144/219] drm/i915/gvt: remove module refcounting in intel_gvt_{,un}register_hypervisor THIS_MODULE always is reference when a symbol called by it is used, so don't bother with the additional reference. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-2-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index f0b69e4dcb52..623424766c35 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -308,10 +308,6 @@ intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m) m->type != INTEL_GVT_HYPERVISOR_XEN) return -EINVAL; - /* Get a reference for device model module */ - if (!try_module_get(THIS_MODULE)) - return -ENODEV; - intel_gvt_host.mpt = m; intel_gvt_host.hypervisor_type = m->type; gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; @@ -321,7 +317,6 @@ intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m) if (ret < 0) { gvt_err("Failed to init %s hypervisor module\n", supported_hypervisors[intel_gvt_host.hypervisor_type]); - module_put(THIS_MODULE); return -ENODEV; } gvt_dbg_core("Running with hypervisor %s in host mode\n", @@ -335,6 +330,5 @@ intel_gvt_unregister_hypervisor(void) { void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt); - module_put(THIS_MODULE); } EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor); From 367748066eeb378bcb1399f1cfa6675c76afc9e1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:31 +0200 Subject: [PATCH 145/219] drm/i915/gvt: remove enum hypervisor_type The only supported hypervisor is KVM, so don't bother with dead code enumerating hypervisors. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-3-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 17 +-- drivers/gpu/drm/i915/gvt/gvt.h | 1 - drivers/gpu/drm/i915/gvt/hypercall.h | 6 -- drivers/gpu/drm/i915/gvt/kvmgt.c | 1 - drivers/gpu/drm/i915/gvt/opregion.c | 150 ++++++--------------------- 5 files changed, 34 insertions(+), 141 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 623424766c35..b4b13e4a94e3 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -41,11 +41,6 @@ struct intel_gvt_host intel_gvt_host; -static const char * const supported_hypervisors[] = { - [INTEL_GVT_HYPERVISOR_XEN] = "XEN", - [INTEL_GVT_HYPERVISOR_KVM] = "KVM", -}; - static const struct intel_gvt_ops intel_gvt_ops = { .emulate_cfg_read = intel_vgpu_emulate_cfg_read, .emulate_cfg_write = intel_vgpu_emulate_cfg_write, @@ -304,23 +299,13 @@ intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m) if (!intel_gvt_host.initialized) return -ENODEV; - if (m->type != INTEL_GVT_HYPERVISOR_KVM && - m->type != INTEL_GVT_HYPERVISOR_XEN) - return -EINVAL; - intel_gvt_host.mpt = m; - intel_gvt_host.hypervisor_type = m->type; gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt, &intel_gvt_ops); - if (ret < 0) { - gvt_err("Failed to init %s hypervisor module\n", - supported_hypervisors[intel_gvt_host.hypervisor_type]); + if (ret < 0) return -ENODEV; - } - gvt_dbg_core("Running with hypervisor %s in host mode\n", - supported_hypervisors[intel_gvt_host.hypervisor_type]); return 0; } EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index bfe07c69cfd2..554baf26cab3 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -60,7 +60,6 @@ struct intel_gvt_host { struct device *dev; bool initialized; - int hypervisor_type; const struct intel_gvt_mpt *mpt; }; diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index f33e3cbd0439..317983153645 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -37,17 +37,11 @@ struct device; -enum hypervisor_type { - INTEL_GVT_HYPERVISOR_XEN = 0, - INTEL_GVT_HYPERVISOR_KVM, -}; - /* * Specific GVT-g MPT modules function collections. Currently GVT-g supports * both Xen and KVM by providing dedicated hypervisor-related MPT modules. */ struct intel_gvt_mpt { - enum hypervisor_type type; int (*host_init)(struct device *dev, void *gvt, const void *ops); void (*host_exit)(struct device *dev, void *gvt); int (*attach_vgpu)(void *vgpu, unsigned long *handle); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 057ec4490104..5231ce8084b3 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -2221,7 +2221,6 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) } static const struct intel_gvt_mpt kvmgt_mpt = { - .type = INTEL_GVT_HYPERVISOR_KVM, .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, .attach_vgpu = kvmgt_attach_vgpu, diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 33569b910ed5..286ac6d7c6ce 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -255,33 +255,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) return 0; } -static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) -{ - u64 mfn; - int i, ret; - - for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { - mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va - + i * PAGE_SIZE); - if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("fail to get MFN from VA\n"); - return -EINVAL; - } - ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, - vgpu_opregion(vgpu)->gfn[i], - mfn, 1, map); - if (ret) { - gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n", - ret); - return ret; - } - } - - vgpu_opregion(vgpu)->mapped = map; - - return 0; -} - /** * intel_vgpu_opregion_base_write_handler - Opregion base register write handler * @@ -294,34 +267,13 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa) { - int i, ret = 0; + int i; gvt_dbg_core("emulate opregion from kernel\n"); - switch (intel_gvt_host.hypervisor_type) { - case INTEL_GVT_HYPERVISOR_KVM: - for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) - vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; - break; - case INTEL_GVT_HYPERVISOR_XEN: - /** - * Wins guest on Xengt will write this register twice: xen - * hvmloader and windows graphic driver. - */ - if (vgpu_opregion(vgpu)->mapped) - map_vgpu_opregion(vgpu, false); - - for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) - vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; - - ret = map_vgpu_opregion(vgpu, true); - break; - default: - ret = -EINVAL; - gvt_vgpu_err("not supported hypervisor\n"); - } - - return ret; + for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) + vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; + return 0; } /** @@ -336,12 +288,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) if (!vgpu_opregion(vgpu)->va) return; - if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { - if (vgpu_opregion(vgpu)->mapped) - map_vgpu_opregion(vgpu, false); - } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { - /* Guest opregion is released by VFIO */ - } + /* Guest opregion is released by VFIO */ free_pages((unsigned long)vgpu_opregion(vgpu)->va, get_order(INTEL_GVT_OPREGION_SIZE)); @@ -470,39 +417,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) u64 scic_pa = 0, parm_pa = 0; int ret; - switch (intel_gvt_host.hypervisor_type) { - case INTEL_GVT_HYPERVISOR_XEN: - scic = *((u32 *)vgpu_opregion(vgpu)->va + - INTEL_GVT_OPREGION_SCIC); - parm = *((u32 *)vgpu_opregion(vgpu)->va + - INTEL_GVT_OPREGION_PARM); - break; - case INTEL_GVT_HYPERVISOR_KVM: - scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + - INTEL_GVT_OPREGION_SCIC; - parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + - INTEL_GVT_OPREGION_PARM; + scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + + INTEL_GVT_OPREGION_SCIC; + parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + + INTEL_GVT_OPREGION_PARM; + ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa, &scic, sizeof(scic)); + if (ret) { + gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } - ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa, - &scic, sizeof(scic)); - if (ret) { - gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", - ret, scic_pa, sizeof(scic)); - return ret; - } - - ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa, - &parm, sizeof(parm)); - if (ret) { - gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", - ret, scic_pa, sizeof(scic)); - return ret; - } - - break; - default: - gvt_vgpu_err("not supported hypervisor\n"); - return -EINVAL; + ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa, &parm, sizeof(parm)); + if (ret) { + gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; } if (!(swsci & SWSCI_SCI_SELECT)) { @@ -535,34 +465,20 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) parm = 0; out: - switch (intel_gvt_host.hypervisor_type) { - case INTEL_GVT_HYPERVISOR_XEN: - *((u32 *)vgpu_opregion(vgpu)->va + - INTEL_GVT_OPREGION_SCIC) = scic; - *((u32 *)vgpu_opregion(vgpu)->va + - INTEL_GVT_OPREGION_PARM) = parm; - break; - case INTEL_GVT_HYPERVISOR_KVM: - ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa, - &scic, sizeof(scic)); - if (ret) { - gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", - ret, scic_pa, sizeof(scic)); - return ret; - } + ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa, &scic, + sizeof(scic)); + if (ret) { + gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } - ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa, - &parm, sizeof(parm)); - if (ret) { - gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", - ret, scic_pa, sizeof(scic)); - return ret; - } - - break; - default: - gvt_vgpu_err("not supported hypervisor\n"); - return -EINVAL; + ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa, &parm, + sizeof(parm)); + if (ret) { + gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; } return 0; From f49fc35799fa63e149ad79f4250a655edfac57a2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:32 +0200 Subject: [PATCH 146/219] drm/i915/gvt: rename intel_vgpu_ops to intel_vgpu_mdev_ops Free the intel_vgpu_ops symbol name for something that fits better. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-4-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 5231ce8084b3..e3f0c555ed5f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1765,7 +1765,7 @@ static const struct attribute_group *intel_vgpu_groups[] = { NULL, }; -static struct mdev_parent_ops intel_vgpu_ops = { +static struct mdev_parent_ops intel_vgpu_mdev_ops = { .mdev_attr_groups = intel_vgpu_groups, .create = intel_vgpu_create, .remove = intel_vgpu_remove, @@ -1788,9 +1788,9 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) return ret; intel_gvt_ops = ops; - intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups; + intel_vgpu_mdev_ops.supported_type_groups = gvt_vgpu_type_groups; - ret = mdev_register_device(dev, &intel_vgpu_ops); + ret = mdev_register_device(dev, &intel_vgpu_mdev_ops); if (ret) intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt); From 8b750bf744181ca3eadfb288830d2f42b04adc67 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:35 +0200 Subject: [PATCH 147/219] drm/i915/gvt: move the gvt code into kvmgt.ko Instead of having an option to build the gvt code into the main i915 module, just move it into the kvmgt.ko module. This only requires a new struct with three entries that the KVMGT modules needs to register with the main i915 module, and a proper list of GVT-enabled devices instead of global device pointer. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-7-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/Kconfig | 36 ++-- drivers/gpu/drm/i915/Makefile | 2 +- drivers/gpu/drm/i915/gvt/Makefile | 3 +- drivers/gpu/drm/i915/gvt/gvt.c | 55 ++---- drivers/gpu/drm/i915/gvt/gvt.h | 6 +- drivers/gpu/drm/i915/gvt/kvmgt.c | 14 +- drivers/gpu/drm/i915/gvt/mpt.h | 3 - drivers/gpu/drm/i915/i915_driver.c | 7 - drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_gvt.c | 206 +++++++++++++------- drivers/gpu/drm/i915/intel_gvt.h | 17 +- drivers/gpu/drm/i915/intel_gvt_mmio_table.c | 1 + 12 files changed, 195 insertions(+), 156 deletions(-) diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 98c5450b8eac..be122f541cf9 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -102,40 +102,30 @@ config DRM_I915_USERPTR If in doubt, say "Y". config DRM_I915_GVT - bool "Enable Intel GVT-g graphics virtualization host support" + bool + +config DRM_I915_GVT_KVMGT + tristate "Enable KVM host support Intel GVT-g graphics virtualization" depends on DRM_I915 depends on X86 depends on 64BIT - default n + depends on KVM + depends on VFIO_MDEV + select DRM_I915_GVT + select KVM_EXTERNAL_WRITE_TRACKING + help Choose this option if you want to enable Intel GVT-g graphics virtualization technology host support with integrated graphics. With GVT-g, it's possible to have one integrated graphics - device shared by multiple VMs under different hypervisors. + device shared by multiple VMs under KVM. - Note that at least one hypervisor like Xen or KVM is required for - this driver to work, and it only supports newer device from - Broadwell+. For further information and setup guide, you can - visit: http://01.org/igvt-g. - - Now it's just a stub to support the modifications of i915 for - GVT device model. It requires at least one MPT modules for Xen/KVM - and other components of GVT device model to work. Use it under - you own risk. + Note that this driver only supports newer device from Broadwell on. + For further information and setup guide, you can visit: + http://01.org/igvt-g. If in doubt, say "N". -config DRM_I915_GVT_KVMGT - tristate "Enable KVM/VFIO support for Intel GVT-g" - depends on DRM_I915_GVT - depends on KVM - depends on VFIO_MDEV - select KVM_EXTERNAL_WRITE_TRACKING - default n - help - Choose this option if you want to enable KVMGT support for - Intel GVT-g. - config DRM_I915_PXP bool "Enable Intel PXP support" depends on DRM_I915 diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 95a449b3508b..440cb3decd12 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -326,7 +326,7 @@ i915-$(CONFIG_DRM_I915_GVT) += \ include $(src)/gvt/Makefile obj-$(CONFIG_DRM_I915) += i915.o -obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o +obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o # header test diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index 584661047945..ca8894049dca 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -i915-$(CONFIG_DRM_I915_GVT) += \ +kvmgt-$(CONFIG_DRM_I915_GVT) += \ gvt/aperture_gm.o \ gvt/cfg_space.o \ gvt/cmd_parser.o \ @@ -15,6 +15,7 @@ i915-$(CONFIG_DRM_I915_GVT) += \ gvt/gvt.o \ gvt/handlers.o \ gvt/interrupt.o \ + gvt/kvmgt.o \ gvt/mmio.o \ gvt/mmio_context.o \ gvt/opregion.o \ diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index b4b13e4a94e3..c3308058f461 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -39,8 +39,6 @@ #include #include -struct intel_gvt_host intel_gvt_host; - static const struct intel_gvt_ops intel_gvt_ops = { .emulate_cfg_read = intel_vgpu_emulate_cfg_read, .emulate_cfg_write = intel_vgpu_emulate_cfg_write, @@ -147,13 +145,14 @@ static int init_service_thread(struct intel_gvt *gvt) * resources owned by a GVT device. * */ -void intel_gvt_clean_device(struct drm_i915_private *i915) +static void intel_gvt_clean_device(struct drm_i915_private *i915) { struct intel_gvt *gvt = fetch_and_zero(&i915->gvt); if (drm_WARN_ON(&i915->drm, !gvt)) return; + intel_gvt_hypervisor_host_exit(i915->drm.dev, gvt); intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_clean_vgpu_types(gvt); @@ -181,7 +180,7 @@ void intel_gvt_clean_device(struct drm_i915_private *i915) * Zero on success, negative error code if failed. * */ -int intel_gvt_init_device(struct drm_i915_private *i915) +static int intel_gvt_init_device(struct drm_i915_private *i915) { struct intel_gvt *gvt; struct intel_vgpu *vgpu; @@ -253,11 +252,17 @@ int intel_gvt_init_device(struct drm_i915_private *i915) intel_gvt_debugfs_init(gvt); + ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt, + &intel_gvt_ops); + if (ret) + goto out_destroy_idle_vgpu; + gvt_dbg_core("gvt device initialization is done\n"); - intel_gvt_host.dev = i915->drm.dev; - intel_gvt_host.initialized = true; return 0; +out_destroy_idle_vgpu: + intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); + intel_gvt_debugfs_clean(gvt); out_clean_types: intel_gvt_clean_vgpu_types(gvt); out_clean_thread: @@ -281,39 +286,17 @@ out_clean_idr: return ret; } -int -intel_gvt_pm_resume(struct intel_gvt *gvt) +static void intel_gvt_pm_resume(struct drm_i915_private *i915) { + struct intel_gvt *gvt = i915->gvt; + intel_gvt_restore_fence(gvt); intel_gvt_restore_mmio(gvt); intel_gvt_restore_ggtt(gvt); - return 0; } -int -intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m) -{ - int ret; - void *gvt; - - if (!intel_gvt_host.initialized) - return -ENODEV; - - intel_gvt_host.mpt = m; - gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; - - ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt, - &intel_gvt_ops); - if (ret < 0) - return -ENODEV; - return 0; -} -EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor); - -void -intel_gvt_unregister_hypervisor(void) -{ - void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; - intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt); -} -EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor); +const struct intel_vgpu_ops intel_gvt_vgpu_ops = { + .init_device = intel_gvt_init_device, + .clean_device = intel_gvt_clean_device, + .pm_resume = intel_gvt_pm_resume, +}; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 554baf26cab3..dc36a791467b 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -58,8 +58,6 @@ #define GVT_MAX_VGPU 8 struct intel_gvt_host { - struct device *dev; - bool initialized; const struct intel_gvt_mpt *mpt; }; @@ -728,9 +726,9 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); -int intel_gvt_pm_resume(struct intel_gvt *gvt); - #include "trace.h" #include "mpt.h" +extern const struct intel_vgpu_ops intel_gvt_vgpu_ops; + #endif diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index e3f0c555ed5f..fa8b326eb219 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -49,8 +49,12 @@ #include #include "i915_drv.h" +#include "intel_gvt.h" #include "gvt.h" +MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS(I915_GVT); + static const struct intel_gvt_ops *intel_gvt_ops; /* helper macros copied from vfio-pci */ @@ -2242,16 +2246,18 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .is_valid_gfn = kvmgt_is_valid_gfn, }; +struct intel_gvt_host intel_gvt_host = { + .mpt = &kvmgt_mpt, +}; + static int __init kvmgt_init(void) { - if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0) - return -ENODEV; - return 0; + return intel_gvt_set_ops(&intel_gvt_vgpu_ops); } static void __exit kvmgt_exit(void) { - intel_gvt_unregister_hypervisor(); + intel_gvt_clear_ops(&intel_gvt_vgpu_ops); } module_init(kvmgt_init); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index e6c5a792a49a..1b5617bb2745 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -394,7 +394,4 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn( return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); } -int intel_gvt_register_hypervisor(const struct intel_gvt_mpt *); -void intel_gvt_unregister_hypervisor(void); - #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 62b3f332bbf5..46b7315de696 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -468,11 +468,6 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) pci_dev_put(dev_priv->bridge_dev); } -static void intel_sanitize_options(struct drm_i915_private *dev_priv) -{ - intel_gvt_sanitize_options(dev_priv); -} - /** * i915_set_dma_info - set all relevant PCI dma info as configured for the * platform @@ -566,8 +561,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) } } - intel_sanitize_options(dev_priv); - /* needs to be done before ggtt probe */ intel_dram_edram_detect(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0830020c11c0..204cf703323e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -434,6 +434,7 @@ struct i915_virtual_gpu { u32 caps; u32 *initial_mmio; u8 *initial_cfg_space; + struct list_head entry; }; struct i915_selftest_stash { diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 65daab2c4d9e..7c03d975069e 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -24,7 +24,10 @@ #include "i915_drv.h" #include "i915_vgpu.h" #include "intel_gvt.h" -#include "gvt/gvt.h" +#include "gem/i915_gem_dmabuf.h" +#include "gt/intel_context.h" +#include "gt/intel_ring.h" +#include "gt/shmem_utils.h" /** * DOC: Intel GVT-g host support @@ -41,6 +44,10 @@ * doc is available on https://01.org/group/2230/documentation-list. */ +static LIST_HEAD(intel_gvt_devices); +static const struct intel_vgpu_ops *intel_gvt_ops; +static DEFINE_MUTEX(intel_gvt_mutex); + static bool is_supported_device(struct drm_i915_private *dev_priv) { if (IS_BROADWELL(dev_priv)) @@ -59,33 +66,6 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) return false; } -/** - * intel_gvt_sanitize_options - sanitize GVT related options - * @dev_priv: drm i915 private data - * - * This function is called at the i915 options sanitize stage. - */ -void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) -{ - if (!dev_priv->params.enable_gvt) - return; - - if (intel_vgpu_active(dev_priv)) { - drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n"); - goto bail; - } - - if (!is_supported_device(dev_priv)) { - drm_info(&dev_priv->drm, - "Unsupported device. GVT-g is disabled\n"); - goto bail; - } - - return; -bail: - dev_priv->params.enable_gvt = 0; -} - static void free_initial_hw_state(struct drm_i915_private *dev_priv) { struct i915_virtual_gpu *vgpu = &dev_priv->vgpu; @@ -165,6 +145,84 @@ err_mmio: return ret; } +static void intel_gvt_init_device(struct drm_i915_private *dev_priv) +{ + if (!dev_priv->params.enable_gvt) { + drm_dbg(&dev_priv->drm, + "GVT-g is disabled by kernel params\n"); + return; + } + + if (intel_vgpu_active(dev_priv)) { + drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n"); + return; + } + + if (!is_supported_device(dev_priv)) { + drm_info(&dev_priv->drm, + "Unsupported device. GVT-g is disabled\n"); + return; + } + + if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) { + drm_err(&dev_priv->drm, + "Graphics virtualization is not yet supported with GuC submission\n"); + return; + } + + if (save_initial_hw_state(dev_priv)) { + drm_dbg(&dev_priv->drm, "Failed to save initial HW state\n"); + return; + } + + if (intel_gvt_ops->init_device(dev_priv)) + drm_dbg(&dev_priv->drm, "Fail to init GVT device\n"); +} + +static void intel_gvt_clean_device(struct drm_i915_private *dev_priv) +{ + if (dev_priv->gvt) + intel_gvt_ops->clean_device(dev_priv); + free_initial_hw_state(dev_priv); +} + +int intel_gvt_set_ops(const struct intel_vgpu_ops *ops) +{ + struct drm_i915_private *dev_priv; + + mutex_lock(&intel_gvt_mutex); + if (intel_gvt_ops) { + mutex_unlock(&intel_gvt_mutex); + return -EINVAL; + } + intel_gvt_ops = ops; + + list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry) + intel_gvt_init_device(dev_priv); + mutex_unlock(&intel_gvt_mutex); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, I915_GVT); + +void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops) +{ + struct drm_i915_private *dev_priv; + + mutex_lock(&intel_gvt_mutex); + if (intel_gvt_ops != ops) { + mutex_unlock(&intel_gvt_mutex); + return; + } + + list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry) + intel_gvt_clean_device(dev_priv); + + intel_gvt_ops = NULL; + mutex_unlock(&intel_gvt_mutex); +} +EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, I915_GVT); + /** * intel_gvt_init - initialize GVT components * @dev_priv: drm i915 private data @@ -177,47 +235,16 @@ err_mmio: */ int intel_gvt_init(struct drm_i915_private *dev_priv) { - int ret; - if (i915_inject_probe_failure(dev_priv)) return -ENODEV; - if (!dev_priv->params.enable_gvt) { - drm_dbg(&dev_priv->drm, - "GVT-g is disabled by kernel params\n"); - return 0; - } - - if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) { - drm_err(&dev_priv->drm, - "i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n"); - return -EIO; - } - - ret = save_initial_hw_state(dev_priv); - if (ret) { - drm_dbg(&dev_priv->drm, "Fail to save initial HW state\n"); - goto err_save_hw_state; - } - - ret = intel_gvt_init_device(dev_priv); - if (ret) { - drm_dbg(&dev_priv->drm, "Fail to init GVT device\n"); - goto err_init_device; - } + mutex_lock(&intel_gvt_mutex); + list_add_tail(&dev_priv->vgpu.entry, &intel_gvt_devices); + if (intel_gvt_ops) + intel_gvt_init_device(dev_priv); + mutex_unlock(&intel_gvt_mutex); return 0; - -err_init_device: - free_initial_hw_state(dev_priv); -err_save_hw_state: - dev_priv->params.enable_gvt = 0; - return 0; -} - -static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) -{ - return dev_priv->gvt; } /** @@ -230,11 +257,10 @@ static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) */ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) { - if (!intel_gvt_active(dev_priv)) - return; - + mutex_lock(&intel_gvt_mutex); intel_gvt_clean_device(dev_priv); - free_initial_hw_state(dev_priv); + list_del(&dev_priv->vgpu.entry); + mutex_unlock(&intel_gvt_mutex); } /** @@ -247,6 +273,46 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) */ void intel_gvt_resume(struct drm_i915_private *dev_priv) { - if (intel_gvt_active(dev_priv)) - intel_gvt_pm_resume(dev_priv->gvt); + mutex_lock(&intel_gvt_mutex); + if (dev_priv->gvt) + intel_gvt_ops->pm_resume(dev_priv); + mutex_unlock(&intel_gvt_mutex); } + +/* + * Exported here so that the exports only get created when GVT support is + * actually enabled. + */ +EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_request_add, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_request_create, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_request_wait, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_vm_release, I915_GVT); +EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_context_create, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT); +EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT); +EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT); diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h index 3a2563ad50f8..eb2a2be252ca 100644 --- a/drivers/gpu/drm/i915/intel_gvt.h +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -39,12 +39,19 @@ struct intel_gvt_mmio_table_iter { int intel_gvt_init(struct drm_i915_private *dev_priv); void intel_gvt_driver_remove(struct drm_i915_private *dev_priv); -int intel_gvt_init_device(struct drm_i915_private *dev_priv); -void intel_gvt_clean_device(struct drm_i915_private *dev_priv); int intel_gvt_init_host(void); -void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv); void intel_gvt_resume(struct drm_i915_private *dev_priv); int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter); + +struct intel_vgpu_ops { + int (*init_device)(struct drm_i915_private *dev_priv); + void (*clean_device)(struct drm_i915_private *dev_priv); + void (*pm_resume)(struct drm_i915_private *i915); +}; + +int intel_gvt_set_ops(const struct intel_vgpu_ops *ops); +void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops); + #else static inline int intel_gvt_init(struct drm_i915_private *dev_priv) { @@ -55,10 +62,6 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) { } -static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) -{ -} - static inline void intel_gvt_resume(struct drm_i915_private *dev_priv) { } diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c index 7f7fa9a6a7be..03a7fcd0f904 100644 --- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -1288,3 +1288,4 @@ int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter) err: return ret; } +EXPORT_SYMBOL_NS_GPL(intel_gvt_iterate_mmio_table, I915_GVT); From 675e5c4a33e20cc1924e99cc6f71a42d355c2c31 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:36 +0200 Subject: [PATCH 148/219] drm/i915/gvt: remove intel_gvt_ops Remove these pointless indirect alls by just calling the only instance of each method directly. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-8-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 20 +-------------- drivers/gpu/drm/i915/gvt/gvt.h | 24 ------------------ drivers/gpu/drm/i915/gvt/hypercall.h | 2 +- drivers/gpu/drm/i915/gvt/kvmgt.c | 37 +++++++++++----------------- drivers/gpu/drm/i915/gvt/mpt.h | 5 ++-- 5 files changed, 19 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index c3308058f461..9259f2a17398 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -39,23 +39,6 @@ #include #include -static const struct intel_gvt_ops intel_gvt_ops = { - .emulate_cfg_read = intel_vgpu_emulate_cfg_read, - .emulate_cfg_write = intel_vgpu_emulate_cfg_write, - .emulate_mmio_read = intel_vgpu_emulate_mmio_read, - .emulate_mmio_write = intel_vgpu_emulate_mmio_write, - .vgpu_create = intel_gvt_create_vgpu, - .vgpu_destroy = intel_gvt_destroy_vgpu, - .vgpu_release = intel_gvt_release_vgpu, - .vgpu_reset = intel_gvt_reset_vgpu, - .vgpu_activate = intel_gvt_activate_vgpu, - .vgpu_deactivate = intel_gvt_deactivate_vgpu, - .vgpu_query_plane = intel_vgpu_query_plane, - .vgpu_get_dmabuf = intel_vgpu_get_dmabuf, - .write_protect_handler = intel_vgpu_page_track_handler, - .emulate_hotplug = intel_vgpu_emulate_hotplug, -}; - static void init_device_info(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; @@ -252,8 +235,7 @@ static int intel_gvt_init_device(struct drm_i915_private *i915) intel_gvt_debugfs_init(gvt); - ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt, - &intel_gvt_ops); + ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt); if (ret) goto out_destroy_idle_vgpu; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index dc36a791467b..97150a1297eb 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -556,30 +556,6 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu); int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason); -struct intel_gvt_ops { - int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *, - unsigned int); - int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *, - unsigned int); - int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *, - unsigned int); - int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *, - unsigned int); - struct intel_vgpu *(*vgpu_create)(struct intel_gvt *, - struct intel_vgpu_type *); - void (*vgpu_destroy)(struct intel_vgpu *vgpu); - void (*vgpu_release)(struct intel_vgpu *vgpu); - void (*vgpu_reset)(struct intel_vgpu *); - void (*vgpu_activate)(struct intel_vgpu *); - void (*vgpu_deactivate)(struct intel_vgpu *); - int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *); - int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int); - int (*write_protect_handler)(struct intel_vgpu *, u64, void *, - unsigned int); - void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected); -}; - - enum { GVT_FAILSAFE_UNSUPPORTED_GUEST, GVT_FAILSAFE_INSUFFICIENT_RESOURCE, diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 317983153645..395bce9633fa 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -42,7 +42,7 @@ struct device; * both Xen and KVM by providing dedicated hypervisor-related MPT modules. */ struct intel_gvt_mpt { - int (*host_init)(struct device *dev, void *gvt, const void *ops); + int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); int (*attach_vgpu)(void *vgpu, unsigned long *handle); void (*detach_vgpu)(void *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index fa8b326eb219..9ed13c86d476 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -55,8 +55,6 @@ MODULE_IMPORT_NS(DMA_BUF); MODULE_IMPORT_NS(I915_GVT); -static const struct intel_gvt_ops *intel_gvt_ops; - /* helper macros copied from vfio-pci */ #define VFIO_PCI_OFFSET_SHIFT 40 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) @@ -621,9 +619,9 @@ static int handle_edid_regs(struct intel_vgpu *vgpu, gvt_vgpu_err("invalid EDID blob\n"); return -EINVAL; } - intel_gvt_ops->emulate_hotplug(vgpu, true); + intel_vgpu_emulate_hotplug(vgpu, true); } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN) - intel_gvt_ops->emulate_hotplug(vgpu, false); + intel_vgpu_emulate_hotplug(vgpu, false); else { gvt_vgpu_err("invalid EDID link state %d\n", regs->link_state); @@ -825,7 +823,7 @@ static int intel_vgpu_create(struct mdev_device *mdev) goto out; } - vgpu = intel_gvt_ops->vgpu_create(gvt, type); + vgpu = intel_gvt_create_vgpu(gvt, type); if (IS_ERR_OR_NULL(vgpu)) { ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); gvt_err("failed to create intel vgpu: %d\n", ret); @@ -852,7 +850,7 @@ static int intel_vgpu_remove(struct mdev_device *mdev) if (handle_valid(vgpu->handle)) return -EBUSY; - intel_gvt_ops->vgpu_destroy(vgpu); + intel_gvt_destroy_vgpu(vgpu); return 0; } @@ -955,7 +953,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) if (ret) goto undo_group; - intel_gvt_ops->vgpu_activate(vgpu); + intel_gvt_activate_vgpu(vgpu); atomic_set(&vdev->released, 0); return ret; @@ -1000,7 +998,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) if (atomic_cmpxchg(&vdev->released, 0, 1)) return; - intel_gvt_ops->vgpu_release(vgpu); + intel_gvt_release_vgpu(vgpu); ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY, &vdev->iommu_notifier); @@ -1074,10 +1072,10 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off, int ret; if (is_write) - ret = intel_gvt_ops->emulate_mmio_write(vgpu, + ret = intel_vgpu_emulate_mmio_write(vgpu, bar_start + off, buf, count); else - ret = intel_gvt_ops->emulate_mmio_read(vgpu, + ret = intel_vgpu_emulate_mmio_read(vgpu, bar_start + off, buf, count); return ret; } @@ -1133,10 +1131,10 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, switch (index) { case VFIO_PCI_CONFIG_REGION_INDEX: if (is_write) - ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos, + ret = intel_vgpu_emulate_cfg_write(vgpu, pos, buf, count); else - ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos, + ret = intel_vgpu_emulate_cfg_read(vgpu, pos, buf, count); break; case VFIO_PCI_BAR0_REGION_INDEX: @@ -1704,7 +1702,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, return ret; } else if (cmd == VFIO_DEVICE_RESET) { - intel_gvt_ops->vgpu_reset(vgpu); + intel_gvt_reset_vgpu(vgpu); return 0; } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) { struct vfio_device_gfx_plane_info dmabuf; @@ -1717,7 +1715,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, if (dmabuf.argsz < minsz) return -EINVAL; - ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf); + ret = intel_vgpu_query_plane(vgpu, &dmabuf); if (ret != 0) return ret; @@ -1725,14 +1723,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, -EFAULT : 0; } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) { __u32 dmabuf_id; - __s32 dmabuf_fd; if (get_user(dmabuf_id, (__u32 __user *)arg)) return -EFAULT; - - dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id); - return dmabuf_fd; - + return intel_vgpu_get_dmabuf(vgpu, dmabuf_id); } return -ENOTTY; @@ -1783,7 +1777,7 @@ static struct mdev_parent_ops intel_vgpu_mdev_ops = { .ioctl = intel_vgpu_ioctl, }; -static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) +static int kvmgt_host_init(struct device *dev, void *gvt) { int ret; @@ -1791,7 +1785,6 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) if (ret) return ret; - intel_gvt_ops = ops; intel_vgpu_mdev_ops.supported_type_groups = gvt_vgpu_type_groups; ret = mdev_register_device(dev, &intel_vgpu_mdev_ops); @@ -1883,7 +1876,7 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvmgt_guest_info, track_node); if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) - intel_gvt_ops->write_protect_handler(info->vgpu, gpa, + intel_vgpu_page_track_handler(info->vgpu, gpa, (void *)val, len); } diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 1b5617bb2745..0e3966e1fec8 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -51,13 +51,12 @@ * Returns: * Zero on success, negative error code if failed */ -static inline int intel_gvt_hypervisor_host_init(struct device *dev, - void *gvt, const void *ops) +static inline int intel_gvt_hypervisor_host_init(struct device *dev, void *gvt) { if (!intel_gvt_host.mpt->host_init) return -ENODEV; - return intel_gvt_host.mpt->host_init(dev, gvt, ops); + return intel_gvt_host.mpt->host_init(dev, gvt); } /** From c977092a9977083f1e73306658182be5123116e3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:37 +0200 Subject: [PATCH 149/219] drm/i915/gvt: remove the map_gfn_to_mfn and set_trap_area ops The map_gfn_to_mfn and set_trap_area ops are never defined, so remove them and clean up code that depends on them in the callers. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-9-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/cfg_space.c | 89 ++++++---------------------- drivers/gpu/drm/i915/gvt/hypercall.h | 4 -- drivers/gpu/drm/i915/gvt/mpt.h | 44 -------------- 3 files changed, 17 insertions(+), 120 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index b490e3db2e38..dad3a6054335 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -129,60 +129,16 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static int map_aperture(struct intel_vgpu *vgpu, bool map) +static void map_aperture(struct intel_vgpu *vgpu, bool map) { - phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu); - unsigned long aperture_sz = vgpu_aperture_sz(vgpu); - u64 first_gfn; - u64 val; - int ret; - - if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) - return 0; - - val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; - if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) - val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); - else - val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); - - first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT; - - ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn, - aperture_pa >> PAGE_SHIFT, - aperture_sz >> PAGE_SHIFT, - map); - if (ret) - return ret; - - vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; - return 0; + if (map != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; } -static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap) +static void trap_gttmmio(struct intel_vgpu *vgpu, bool trap) { - u64 start, end; - u64 val; - int ret; - - if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked) - return 0; - - val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0]; - if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) - start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); - else - start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); - - start &= ~GENMASK(3, 0); - end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1; - - ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap); - if (ret) - return ret; - - vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap; - return 0; + if (trap != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked) + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap; } static int emulate_pci_command_write(struct intel_vgpu *vgpu, @@ -191,26 +147,17 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, u8 old = vgpu_cfg_space(vgpu)[offset]; u8 new = *(u8 *)p_data; u8 changed = old ^ new; - int ret; vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); if (!(changed & PCI_COMMAND_MEMORY)) return 0; if (old & PCI_COMMAND_MEMORY) { - ret = trap_gttmmio(vgpu, false); - if (ret) - return ret; - ret = map_aperture(vgpu, false); - if (ret) - return ret; + trap_gttmmio(vgpu, false); + map_aperture(vgpu, false); } else { - ret = trap_gttmmio(vgpu, true); - if (ret) - return ret; - ret = map_aperture(vgpu, true); - if (ret) - return ret; + trap_gttmmio(vgpu, true); + map_aperture(vgpu, true); } return 0; @@ -230,13 +177,12 @@ static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu, return 0; } -static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, +static void emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 new = *(u32 *)(p_data); bool lo = IS_ALIGNED(offset, 8); u64 size; - int ret = 0; bool mmio_enabled = vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar; @@ -259,14 +205,14 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, * Untrap the BAR, since guest hasn't configured a * valid GPA */ - ret = trap_gttmmio(vgpu, false); + trap_gttmmio(vgpu, false); break; case PCI_BASE_ADDRESS_2: case PCI_BASE_ADDRESS_3: size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1); intel_vgpu_write_pci_bar(vgpu, offset, size >> (lo ? 0 : 32), lo); - ret = map_aperture(vgpu, false); + map_aperture(vgpu, false); break; default: /* Unimplemented BARs */ @@ -282,19 +228,18 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, */ trap_gttmmio(vgpu, false); intel_vgpu_write_pci_bar(vgpu, offset, new, lo); - ret = trap_gttmmio(vgpu, mmio_enabled); + trap_gttmmio(vgpu, mmio_enabled); break; case PCI_BASE_ADDRESS_2: case PCI_BASE_ADDRESS_3: map_aperture(vgpu, false); intel_vgpu_write_pci_bar(vgpu, offset, new, lo); - ret = map_aperture(vgpu, mmio_enabled); + map_aperture(vgpu, mmio_enabled); break; default: intel_vgpu_write_pci_bar(vgpu, offset, new, lo); } } - return ret; } /** @@ -336,8 +281,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; - return emulate_pci_bar_write(vgpu, offset, p_data, bytes); - + emulate_pci_bar_write(vgpu, offset, p_data, bytes); + break; case INTEL_GVT_PCI_SWSCI: if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 395bce9633fa..f1a4926f6f1b 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -62,10 +62,6 @@ struct intel_gvt_mpt { int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr); - int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, - unsigned long mfn, unsigned int nr, bool map); - int (*set_trap_area)(unsigned long handle, u64 start, u64 end, - bool map); int (*set_opregion)(void *vgpu); int (*set_edid)(void *vgpu, int port_num); int (*get_vfio_device)(void *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 0e3966e1fec8..bb0e9e71d13e 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -270,50 +270,6 @@ intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr); } -/** - * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN - * @vgpu: a vGPU - * @gfn: guest PFN - * @mfn: host PFN - * @nr: amount of PFNs - * @map: map or unmap - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_map_gfn_to_mfn( - struct intel_vgpu *vgpu, unsigned long gfn, - unsigned long mfn, unsigned int nr, - bool map) -{ - /* a MPT implementation could have MMIO mapped elsewhere */ - if (!intel_gvt_host.mpt->map_gfn_to_mfn) - return 0; - - return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr, - map); -} - -/** - * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region - * @vgpu: a vGPU - * @start: the beginning of the guest physical address region - * @end: the end of the guest physical address region - * @map: map or unmap - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_set_trap_area( - struct intel_vgpu *vgpu, u64 start, u64 end, bool map) -{ - /* a MPT implementation could have MMIO trapped elsewhere */ - if (!intel_gvt_host.mpt->set_trap_area) - return 0; - - return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); -} - /** * intel_gvt_hypervisor_set_opregion - Set opregion for guest * @vgpu: a vGPU From 3cbac24c2cdbfe7174427933a41a1027015d2644 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:38 +0200 Subject: [PATCH 150/219] drm/i915/gvt: remove the unused from_virt_to_mfn op Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-10-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/hypercall.h | 1 - drivers/gpu/drm/i915/gvt/kvmgt.c | 6 ------ drivers/gpu/drm/i915/gvt/mpt.h | 12 ------------ 3 files changed, 19 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index f1a4926f6f1b..27890a5e2d82 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -47,7 +47,6 @@ struct intel_gvt_mpt { int (*attach_vgpu)(void *vgpu, unsigned long *handle); void (*detach_vgpu)(void *vgpu); int (*inject_msi)(unsigned long handle, u32 addr, u16 data); - unsigned long (*from_virt_to_mfn)(void *p); int (*enable_page_track)(unsigned long handle, u64 gfn); int (*disable_page_track)(unsigned long handle, u64 gfn); int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 9ed13c86d476..f7b8ce87bc33 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -2192,11 +2192,6 @@ static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa, return kvmgt_rw_gpa(handle, gpa, buf, len, true); } -static unsigned long kvmgt_virt_to_pfn(void *addr) -{ - return PFN_DOWN(__pa(addr)); -} - static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) { struct kvmgt_guest_info *info; @@ -2223,7 +2218,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .attach_vgpu = kvmgt_attach_vgpu, .detach_vgpu = kvmgt_detach_vgpu, .inject_msi = kvmgt_inject_msi, - .from_virt_to_mfn = kvmgt_virt_to_pfn, .enable_page_track = kvmgt_page_track_add, .disable_page_track = kvmgt_page_track_remove, .read_gpa = kvmgt_read_gpa, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index bb0e9e71d13e..6d062cf71de9 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -140,18 +140,6 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) return 0; } -/** - * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN - * @p: host kernel virtual address - * - * Returns: - * MFN on success, INTEL_GVT_INVALID_ADDR if failed. - */ -static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p) -{ - return intel_gvt_host.mpt->from_virt_to_mfn(p); -} - /** * intel_gvt_hypervisor_enable_page_track - track a guest page * @vgpu: a vGPU From 62980cacc37f58bd054de012d08052dfc4f5fa48 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:39 +0200 Subject: [PATCH 151/219] drm/i915/gvt: merge struct kvmgt_vdev into struct intel_vgpu Move towards having only a single structure for the per-VGPU state. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-11-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 31 ++- drivers/gpu/drm/i915/gvt/hypercall.h | 1 - drivers/gpu/drm/i915/gvt/kvmgt.c | 288 ++++++++++----------------- drivers/gpu/drm/i915/gvt/mpt.h | 16 -- drivers/gpu/drm/i915/gvt/vgpu.c | 8 +- 5 files changed, 128 insertions(+), 216 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 97150a1297eb..628dd686c03d 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -207,21 +207,36 @@ struct intel_vgpu { struct dentry *debugfs; - /* Hypervisor-specific device state. */ - void *vdev; - struct list_head dmabuf_obj_list_head; struct mutex dmabuf_lock; struct idr object_idr; struct intel_vgpu_vblank_timer vblank_timer; u32 scan_nonprivbb; -}; -static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu) -{ - return vgpu->vdev; -} + struct mdev_device *mdev; + struct vfio_region *region; + int num_regions; + struct eventfd_ctx *intx_trigger; + struct eventfd_ctx *msi_trigger; + + /* + * Two caches are used to avoid mapping duplicated pages (eg. + * scratch pages). This help to reduce dma setup overhead. + */ + struct rb_root gfn_cache; + struct rb_root dma_addr_cache; + unsigned long nr_cache_entries; + struct mutex cache_lock; + + struct notifier_block iommu_notifier; + struct notifier_block group_notifier; + struct kvm *kvm; + struct work_struct release_work; + atomic_t released; + struct vfio_device *vfio_device; + struct vfio_group *vfio_group; +}; /* validating GM healthy status*/ #define vgpu_is_vm_unhealthy(ret_val) \ diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 27890a5e2d82..eacee6f41f9c 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -44,7 +44,6 @@ struct device; struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - int (*attach_vgpu)(void *vgpu, unsigned long *handle); void (*detach_vgpu)(void *vgpu); int (*inject_msi)(unsigned long handle, u32 addr, u16 data); int (*enable_page_track)(unsigned long handle, u64 gfn); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index f7b8ce87bc33..1c2b949d8e01 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -112,37 +112,6 @@ struct gvt_dma { struct kref ref; }; -struct kvmgt_vdev { - struct intel_vgpu *vgpu; - struct mdev_device *mdev; - struct vfio_region *region; - int num_regions; - struct eventfd_ctx *intx_trigger; - struct eventfd_ctx *msi_trigger; - - /* - * Two caches are used to avoid mapping duplicated pages (eg. - * scratch pages). This help to reduce dma setup overhead. - */ - struct rb_root gfn_cache; - struct rb_root dma_addr_cache; - unsigned long nr_cache_entries; - struct mutex cache_lock; - - struct notifier_block iommu_notifier; - struct notifier_block group_notifier; - struct kvm *kvm; - struct work_struct release_work; - atomic_t released; - struct vfio_device *vfio_device; - struct vfio_group *vfio_group; -}; - -static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu) -{ - return intel_vgpu_vdev(vgpu); -} - static inline bool handle_valid(unsigned long handle) { return !!(handle & ~0xff); @@ -269,7 +238,6 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); int total_pages; int npage; int ret; @@ -279,7 +247,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, for (npage = 0; npage < total_pages; npage++) { unsigned long cur_gfn = gfn + npage; - ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1); + ret = vfio_group_unpin_pages(vgpu->vfio_group, &cur_gfn, 1); drm_WARN_ON(&i915->drm, ret != 1); } } @@ -288,7 +256,6 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, struct page **page) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned long base_pfn = 0; int total_pages; int npage; @@ -303,7 +270,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long cur_gfn = gfn + npage; unsigned long pfn; - ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1, + ret = vfio_group_pin_pages(vgpu->vfio_group, &cur_gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); if (ret != 1) { gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", @@ -370,7 +337,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { - struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node; + struct rb_node *node = vgpu->dma_addr_cache.rb_node; struct gvt_dma *itr; while (node) { @@ -388,7 +355,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) { - struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node; + struct rb_node *node = vgpu->gfn_cache.rb_node; struct gvt_dma *itr; while (node) { @@ -409,7 +376,6 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, { struct gvt_dma *new, *itr; struct rb_node **link, *parent = NULL; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); if (!new) @@ -422,7 +388,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kref_init(&new->ref); /* gfn_cache maps gfn to struct gvt_dma. */ - link = &vdev->gfn_cache.rb_node; + link = &vgpu->gfn_cache.rb_node; while (*link) { parent = *link; itr = rb_entry(parent, struct gvt_dma, gfn_node); @@ -433,11 +399,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, link = &parent->rb_right; } rb_link_node(&new->gfn_node, parent, link); - rb_insert_color(&new->gfn_node, &vdev->gfn_cache); + rb_insert_color(&new->gfn_node, &vgpu->gfn_cache); /* dma_addr_cache maps dma addr to struct gvt_dma. */ parent = NULL; - link = &vdev->dma_addr_cache.rb_node; + link = &vgpu->dma_addr_cache.rb_node; while (*link) { parent = *link; itr = rb_entry(parent, struct gvt_dma, dma_addr_node); @@ -448,51 +414,46 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, link = &parent->rb_right; } rb_link_node(&new->dma_addr_node, parent, link); - rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache); + rb_insert_color(&new->dma_addr_node, &vgpu->dma_addr_cache); - vdev->nr_cache_entries++; + vgpu->nr_cache_entries++; return 0; } static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, struct gvt_dma *entry) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - - rb_erase(&entry->gfn_node, &vdev->gfn_cache); - rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache); + rb_erase(&entry->gfn_node, &vgpu->gfn_cache); + rb_erase(&entry->dma_addr_node, &vgpu->dma_addr_cache); kfree(entry); - vdev->nr_cache_entries--; + vgpu->nr_cache_entries--; } static void gvt_cache_destroy(struct intel_vgpu *vgpu) { struct gvt_dma *dma; struct rb_node *node = NULL; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); for (;;) { - mutex_lock(&vdev->cache_lock); - node = rb_first(&vdev->gfn_cache); + mutex_lock(&vgpu->cache_lock); + node = rb_first(&vgpu->gfn_cache); if (!node) { - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); break; } dma = rb_entry(node, struct gvt_dma, gfn_node); gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); __gvt_cache_remove_entry(vgpu, dma); - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); } } static void gvt_cache_init(struct intel_vgpu *vgpu) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - - vdev->gfn_cache = RB_ROOT; - vdev->dma_addr_cache = RB_ROOT; - vdev->nr_cache_entries = 0; - mutex_init(&vdev->cache_lock); + vgpu->gfn_cache = RB_ROOT; + vgpu->dma_addr_cache = RB_ROOT; + vgpu->nr_cache_entries = 0; + mutex_init(&vgpu->cache_lock); } static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) @@ -566,18 +527,17 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf, size_t count, loff_t *ppos, bool iswrite) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; - void *base = vdev->region[i].data; + void *base = vgpu->region[i].data; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - if (pos >= vdev->region[i].size || iswrite) { + if (pos >= vgpu->region[i].size || iswrite) { gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n"); return -EINVAL; } - count = min(count, (size_t)(vdev->region[i].size - pos)); + count = min(count, (size_t)(vgpu->region[i].size - pos)); memcpy(buf, base + pos, count); return count; @@ -670,8 +630,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf, int ret; unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; - struct vfio_edid_region *region = - (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data; + struct vfio_edid_region *region = vgpu->region[i].data; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; if (pos < region->vfio_edid_regs.edid_offset) { @@ -703,34 +662,32 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, const struct intel_vgpu_regops *ops, size_t size, u32 flags, void *data) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct vfio_region *region; - region = krealloc(vdev->region, - (vdev->num_regions + 1) * sizeof(*region), + region = krealloc(vgpu->region, + (vgpu->num_regions + 1) * sizeof(*region), GFP_KERNEL); if (!region) return -ENOMEM; - vdev->region = region; - vdev->region[vdev->num_regions].type = type; - vdev->region[vdev->num_regions].subtype = subtype; - vdev->region[vdev->num_regions].ops = ops; - vdev->region[vdev->num_regions].size = size; - vdev->region[vdev->num_regions].flags = flags; - vdev->region[vdev->num_regions].data = data; - vdev->num_regions++; + vgpu->region = region; + vgpu->region[vgpu->num_regions].type = type; + vgpu->region[vgpu->num_regions].subtype = subtype; + vgpu->region[vgpu->num_regions].ops = ops; + vgpu->region[vgpu->num_regions].size = size; + vgpu->region[vgpu->num_regions].flags = flags; + vgpu->region[vgpu->num_regions].data = data; + vgpu->num_regions++; return 0; } static int kvmgt_get_vfio_device(void *p_vgpu) { struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - vdev->vfio_device = vfio_device_get_from_dev( - mdev_dev(vdev->mdev)); - if (!vdev->vfio_device) { + vgpu->vfio_device = vfio_device_get_from_dev( + mdev_dev(vgpu->mdev)); + if (!vgpu->vfio_device) { gvt_vgpu_err("failed to get vfio device\n"); return -ENODEV; } @@ -796,14 +753,14 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num) return ret; } -static void kvmgt_put_vfio_device(void *vgpu) +static void kvmgt_put_vfio_device(void *data) { - struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu); + struct intel_vgpu *vgpu = data; - if (WARN_ON(!vdev->vfio_device)) + if (WARN_ON(!vgpu->vfio_device)) return; - vfio_device_put(vdev->vfio_device); + vfio_device_put(vgpu->vfio_device); } static int intel_vgpu_create(struct mdev_device *mdev) @@ -830,9 +787,9 @@ static int intel_vgpu_create(struct mdev_device *mdev) goto out; } - INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work); + INIT_WORK(&vgpu->release_work, intel_vgpu_release_work); - kvmgt_vdev(vgpu)->mdev = mdev; + vgpu->mdev = mdev; mdev_set_drvdata(mdev, vgpu); gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", @@ -857,10 +814,8 @@ static int intel_vgpu_remove(struct mdev_device *mdev) static int intel_vgpu_iommu_notifier(struct notifier_block *nb, unsigned long action, void *data) { - struct kvmgt_vdev *vdev = container_of(nb, - struct kvmgt_vdev, - iommu_notifier); - struct intel_vgpu *vgpu = vdev->vgpu; + struct intel_vgpu *vgpu = + container_of(nb, struct intel_vgpu, iommu_notifier); if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { struct vfio_iommu_type1_dma_unmap *unmap = data; @@ -870,7 +825,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, iov_pfn = unmap->iova >> PAGE_SHIFT; end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE; - mutex_lock(&vdev->cache_lock); + mutex_lock(&vgpu->cache_lock); for (; iov_pfn < end_iov_pfn; iov_pfn++) { entry = __gvt_cache_find_gfn(vgpu, iov_pfn); if (!entry) @@ -880,7 +835,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, entry->size); __gvt_cache_remove_entry(vgpu, entry); } - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); } return NOTIFY_OK; @@ -889,16 +844,15 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, static int intel_vgpu_group_notifier(struct notifier_block *nb, unsigned long action, void *data) { - struct kvmgt_vdev *vdev = container_of(nb, - struct kvmgt_vdev, - group_notifier); + struct intel_vgpu *vgpu = + container_of(nb, struct intel_vgpu, group_notifier); /* the only action we care about */ if (action == VFIO_GROUP_NOTIFY_SET_KVM) { - vdev->kvm = data; + vgpu->kvm = data; if (!data) - schedule_work(&vdev->release_work); + schedule_work(&vgpu->release_work); } return NOTIFY_OK; @@ -907,17 +861,16 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb, static int intel_vgpu_open_device(struct mdev_device *mdev) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned long events; int ret; struct vfio_group *vfio_group; - vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; - vdev->group_notifier.notifier_call = intel_vgpu_group_notifier; + vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; + vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier; events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, - &vdev->iommu_notifier); + &vgpu->iommu_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", ret); @@ -926,7 +879,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) events = VFIO_GROUP_NOTIFY_SET_KVM; ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, - &vdev->group_notifier); + &vgpu->group_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", ret); @@ -939,7 +892,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n"); goto undo_register; } - vdev->vfio_group = vfio_group; + vgpu->vfio_group = vfio_group; /* Take a module reference as mdev core doesn't take * a reference for vendor driver. @@ -955,39 +908,37 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) intel_gvt_activate_vgpu(vgpu); - atomic_set(&vdev->released, 0); + atomic_set(&vgpu->released, 0); return ret; undo_group: - vfio_group_put_external_user(vdev->vfio_group); - vdev->vfio_group = NULL; + vfio_group_put_external_user(vgpu->vfio_group); + vgpu->vfio_group = NULL; undo_register: vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, - &vdev->group_notifier); + &vgpu->group_notifier); undo_iommu: vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, - &vdev->iommu_notifier); + &vgpu->iommu_notifier); out: return ret; } static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct eventfd_ctx *trigger; - trigger = vdev->msi_trigger; + trigger = vgpu->msi_trigger; if (trigger) { eventfd_ctx_put(trigger); - vdev->msi_trigger = NULL; + vgpu->msi_trigger = NULL; } } static void __intel_vgpu_release(struct intel_vgpu *vgpu) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct kvmgt_guest_info *info; int ret; @@ -995,18 +946,18 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) if (!handle_valid(vgpu->handle)) return; - if (atomic_cmpxchg(&vdev->released, 0, 1)) + if (atomic_cmpxchg(&vgpu->released, 0, 1)) return; intel_gvt_release_vgpu(vgpu); - ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY, - &vdev->iommu_notifier); + ret = vfio_unregister_notifier(mdev_dev(vgpu->mdev), VFIO_IOMMU_NOTIFY, + &vgpu->iommu_notifier); drm_WARN(&i915->drm, ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); - ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY, - &vdev->group_notifier); + ret = vfio_unregister_notifier(mdev_dev(vgpu->mdev), VFIO_GROUP_NOTIFY, + &vgpu->group_notifier); drm_WARN(&i915->drm, ret, "vfio_unregister_notifier for group failed: %d\n", ret); @@ -1017,9 +968,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) kvmgt_guest_exit(info); intel_vgpu_release_msi_eventfd_ctx(vgpu); - vfio_group_put_external_user(vdev->vfio_group); + vfio_group_put_external_user(vgpu->vfio_group); - vdev->kvm = NULL; + vgpu->kvm = NULL; vgpu->handle = 0; } @@ -1032,10 +983,10 @@ static void intel_vgpu_close_device(struct mdev_device *mdev) static void intel_vgpu_release_work(struct work_struct *work) { - struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev, - release_work); + struct intel_vgpu *vgpu = + container_of(work, struct intel_vgpu, release_work); - __intel_vgpu_release(vdev->vgpu); + __intel_vgpu_release(vgpu); } static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) @@ -1117,13 +1068,12 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, size_t count, loff_t *ppos, bool is_write) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; int ret = -EINVAL; - if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) { + if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) { gvt_vgpu_err("invalid index: %u\n", index); return -EINVAL; } @@ -1152,11 +1102,11 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, case VFIO_PCI_ROM_REGION_INDEX: break; default: - if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) + if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) return -EINVAL; index -= VFIO_PCI_NUM_REGIONS; - return vdev->region[index].ops->rw(vgpu, buf, count, + return vgpu->region[index].ops->rw(vgpu, buf, count, ppos, is_write); } @@ -1409,7 +1359,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, gvt_vgpu_err("eventfd_ctx_fdget failed\n"); return PTR_ERR(trigger); } - kvmgt_vdev(vgpu)->msi_trigger = trigger; + vgpu->msi_trigger = trigger; } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count) intel_vgpu_release_msi_eventfd_ctx(vgpu); @@ -1461,7 +1411,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned long arg) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned long minsz; gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); @@ -1480,7 +1429,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, info.flags = VFIO_DEVICE_FLAGS_PCI; info.flags |= VFIO_DEVICE_FLAGS_RESET; info.num_regions = VFIO_PCI_NUM_REGIONS + - vdev->num_regions; + vgpu->num_regions; info.num_irqs = VFIO_PCI_NUM_IRQS; return copy_to_user((void __user *)arg, &info, minsz) ? @@ -1571,22 +1520,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, .header.version = 1 }; if (info.index >= VFIO_PCI_NUM_REGIONS + - vdev->num_regions) + vgpu->num_regions) return -EINVAL; info.index = array_index_nospec(info.index, VFIO_PCI_NUM_REGIONS + - vdev->num_regions); + vgpu->num_regions); i = info.index - VFIO_PCI_NUM_REGIONS; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - info.size = vdev->region[i].size; - info.flags = vdev->region[i].flags; + info.size = vgpu->region[i].size; + info.flags = vgpu->region[i].flags; - cap_type.type = vdev->region[i].type; - cap_type.subtype = vdev->region[i].subtype; + cap_type.type = vgpu->region[i].type; + cap_type.subtype = vgpu->region[i].subtype; ret = vfio_info_add_capability(&caps, &cap_type.header, @@ -1928,15 +1877,13 @@ static int kvmgt_guest_init(struct mdev_device *mdev) { struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; - struct kvmgt_vdev *vdev; struct kvm *kvm; vgpu = mdev_get_drvdata(mdev); if (handle_valid(vgpu->handle)) return -EEXIST; - vdev = kvmgt_vdev(vgpu); - kvm = vdev->kvm; + kvm = vgpu->kvm; if (!kvm || kvm->mm != current->mm) { gvt_vgpu_err("KVM is required to use Intel vGPU\n"); return -ESRCH; @@ -1962,7 +1909,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev) kvm_page_track_register_notifier(kvm, &info->track_node); debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, - &vdev->nr_cache_entries); + &vgpu->nr_cache_entries); return 0; } @@ -1980,52 +1927,33 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) return true; } -static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle) -{ - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; - - vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL); - - if (!vgpu->vdev) - return -ENOMEM; - - kvmgt_vdev(vgpu)->vgpu = vgpu; - - return 0; -} - static void kvmgt_detach_vgpu(void *p_vgpu) { int i; struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - if (!vdev->region) + if (!vgpu->region) return; - for (i = 0; i < vdev->num_regions; i++) - if (vdev->region[i].ops->release) - vdev->region[i].ops->release(vgpu, - &vdev->region[i]); - vdev->num_regions = 0; - kfree(vdev->region); - vdev->region = NULL; - - kfree(vdev); + for (i = 0; i < vgpu->num_regions; i++) + if (vgpu->region[i].ops->release) + vgpu->region[i].ops->release(vgpu, + &vgpu->region[i]); + vgpu->num_regions = 0; + kfree(vgpu->region); + vgpu->region = NULL; } static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) { struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; - struct kvmgt_vdev *vdev; if (!handle_valid(handle)) return -ESRCH; info = (struct kvmgt_guest_info *)handle; vgpu = info->vgpu; - vdev = kvmgt_vdev(vgpu); /* * When guest is poweroff, msi_trigger is set to NULL, but vgpu's @@ -2036,10 +1964,10 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) * enabled by guest. so if msi_trigger is null, success is still * returned and don't inject interrupt into guest. */ - if (vdev->msi_trigger == NULL) + if (vgpu->msi_trigger == NULL) return 0; - if (eventfd_signal(vdev->msi_trigger, 1) == 1) + if (eventfd_signal(vgpu->msi_trigger, 1) == 1) return 0; return -EFAULT; @@ -2066,7 +1994,6 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { struct intel_vgpu *vgpu; - struct kvmgt_vdev *vdev; struct gvt_dma *entry; int ret; @@ -2074,9 +2001,8 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, return -EINVAL; vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; - vdev = kvmgt_vdev(vgpu); - mutex_lock(&vdev->cache_lock); + mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_gfn(vgpu, gfn); if (!entry) { @@ -2104,20 +2030,19 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, *dma_addr = entry->dma_addr; } - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); return 0; err_unmap: gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); err_unlock: - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); return ret; } static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) { struct kvmgt_guest_info *info; - struct kvmgt_vdev *vdev; struct gvt_dma *entry; int ret = 0; @@ -2125,15 +2050,14 @@ static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) return -ENODEV; info = (struct kvmgt_guest_info *)handle; - vdev = kvmgt_vdev(info->vgpu); - mutex_lock(&vdev->cache_lock); + mutex_lock(&info->vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); if (entry) kref_get(&entry->ref); else ret = -ENOMEM; - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&info->vgpu->cache_lock); return ret; } @@ -2150,20 +2074,18 @@ static void __gvt_dma_release(struct kref *ref) static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) { struct intel_vgpu *vgpu; - struct kvmgt_vdev *vdev; struct gvt_dma *entry; if (!handle_valid(handle)) return; vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; - vdev = kvmgt_vdev(vgpu); - mutex_lock(&vdev->cache_lock); + mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) kref_put(&entry->ref, __gvt_dma_release); - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); } static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, @@ -2176,8 +2098,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, info = (struct kvmgt_guest_info *)handle; - return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group, - gpa, buf, len, write); + return vfio_dma_rw(info->vgpu->vfio_group, gpa, buf, len, write); } static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, @@ -2215,7 +2136,6 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, - .attach_vgpu = kvmgt_attach_vgpu, .detach_vgpu = kvmgt_detach_vgpu, .inject_msi = kvmgt_inject_msi, .enable_page_track = kvmgt_page_track_add, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 6d062cf71de9..8a659301d78b 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -71,22 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) intel_gvt_host.mpt->host_exit(dev, gvt); } -/** - * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU - * related stuffs inside hypervisor. - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu) -{ - /* optional to provide */ - if (!intel_gvt_host.mpt->attach_vgpu) - return 0; - - return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle); -} - /** * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU * related stuffs inside hypervisor. diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 8dddd0a940a1..fd335a7bcb84 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -405,13 +405,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, populate_pvinfo_page(vgpu); - ret = intel_gvt_hypervisor_attach_vgpu(vgpu); - if (ret) - goto out_clean_vgpu_resource; - ret = intel_vgpu_init_gtt(vgpu); if (ret) - goto out_detach_hypervisor_vgpu; + goto out_clean_vgpu_resource; ret = intel_vgpu_init_opregion(vgpu); if (ret) @@ -454,8 +450,6 @@ out_clean_opregion: intel_vgpu_clean_opregion(vgpu); out_clean_gtt: intel_vgpu_clean_gtt(vgpu); -out_detach_hypervisor_vgpu: - intel_gvt_hypervisor_detach_vgpu(vgpu); out_clean_vgpu_resource: intel_vgpu_free_resource(vgpu); out_clean_vgpu_mmio: From 10ddb96295f3bdc6caf4518b8001725440d7a7d2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:40 +0200 Subject: [PATCH 152/219] drm/i915/gvt: merge struct kvmgt_guest_info into strut intel_vgpu Consolidate the per-VGPU structures into a single one. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-12-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 8 +++ drivers/gpu/drm/i915/gvt/kvmgt.c | 117 ++++++++++++------------------- 2 files changed, 52 insertions(+), 73 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 628dd686c03d..16daa615f9c0 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -34,6 +34,7 @@ #define _GVT_H_ #include +#include #include "i915_drv.h" #include "intel_gvt.h" @@ -174,6 +175,8 @@ struct intel_vgpu_submission { } last_ctx[I915_NUM_ENGINES]; }; +#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries" + struct intel_vgpu { struct intel_gvt *gvt; struct mutex vgpu_lock; @@ -236,6 +239,11 @@ struct intel_vgpu { atomic_t released; struct vfio_device *vfio_device; struct vfio_group *vfio_group; + + struct kvm_page_track_notifier_node track_node; +#define NR_BKT (1 << 18) + struct hlist_head ptable[NR_BKT]; +#undef NR_BKT }; /* validating GM healthy status*/ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 1c2b949d8e01..37cdf092a714 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -92,16 +91,6 @@ struct kvmgt_pgfn { struct hlist_node hnode; }; -#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries" -struct kvmgt_guest_info { - struct kvm *kvm; - struct intel_vgpu *vgpu; - struct kvm_page_track_notifier_node track_node; -#define NR_BKT (1 << 18) - struct hlist_head ptable[NR_BKT]; -#undef NR_BKT -}; - struct gvt_dma { struct intel_vgpu *vgpu; struct rb_node gfn_node; @@ -232,7 +221,7 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) static int kvmgt_guest_init(struct mdev_device *mdev); static void intel_vgpu_release_work(struct work_struct *work); -static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); +static bool kvmgt_guest_exit(struct intel_vgpu *info); static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size) @@ -456,12 +445,12 @@ static void gvt_cache_init(struct intel_vgpu *vgpu) mutex_init(&vgpu->cache_lock); } -static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) +static void kvmgt_protect_table_init(struct intel_vgpu *info) { hash_init(info->ptable); } -static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info) +static void kvmgt_protect_table_destroy(struct intel_vgpu *info) { struct kvmgt_pgfn *p; struct hlist_node *tmp; @@ -474,7 +463,7 @@ static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info) } static struct kvmgt_pgfn * -__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) +__kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p, *res = NULL; @@ -488,8 +477,7 @@ __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) return res; } -static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info, - gfn_t gfn) +static bool kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p; @@ -497,7 +485,7 @@ static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info, return !!p; } -static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) +static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p; @@ -512,8 +500,7 @@ static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) hash_add(info->ptable, &p->hnode, gfn); } -static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, - gfn_t gfn) +static void kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p; @@ -940,7 +927,6 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) static void __intel_vgpu_release(struct intel_vgpu *vgpu) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - struct kvmgt_guest_info *info; int ret; if (!handle_valid(vgpu->handle)) @@ -964,8 +950,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) /* dereference module reference taken at open */ module_put(THIS_MODULE); - info = (struct kvmgt_guest_info *)vgpu->handle; - kvmgt_guest_exit(info); + kvmgt_guest_exit(vgpu); intel_vgpu_release_msi_eventfd_ctx(vgpu); vfio_group_put_external_user(vgpu->vfio_group); @@ -1751,7 +1736,7 @@ static void kvmgt_host_exit(struct device *dev, void *gvt) static int kvmgt_page_track_add(unsigned long handle, u64 gfn) { - struct kvmgt_guest_info *info; + struct intel_vgpu *info; struct kvm *kvm; struct kvm_memory_slot *slot; int idx; @@ -1759,7 +1744,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn) if (!handle_valid(handle)) return -ESRCH; - info = (struct kvmgt_guest_info *)handle; + info = (struct intel_vgpu *)handle; kvm = info->kvm; idx = srcu_read_lock(&kvm->srcu); @@ -1785,7 +1770,7 @@ out: static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) { - struct kvmgt_guest_info *info; + struct intel_vgpu *info; struct kvm *kvm; struct kvm_memory_slot *slot; int idx; @@ -1793,7 +1778,7 @@ static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) if (!handle_valid(handle)) return 0; - info = (struct kvmgt_guest_info *)handle; + info = (struct intel_vgpu *)handle; kvm = info->kvm; idx = srcu_read_lock(&kvm->srcu); @@ -1821,11 +1806,11 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *val, int len, struct kvm_page_track_notifier_node *node) { - struct kvmgt_guest_info *info = container_of(node, - struct kvmgt_guest_info, track_node); + struct intel_vgpu *info = + container_of(node, struct intel_vgpu, track_node); if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) - intel_vgpu_page_track_handler(info->vgpu, gpa, + intel_vgpu_page_track_handler(info, gpa, (void *)val, len); } @@ -1835,8 +1820,8 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, { int i; gfn_t gfn; - struct kvmgt_guest_info *info = container_of(node, - struct kvmgt_guest_info, track_node); + struct intel_vgpu *info = + container_of(node, struct intel_vgpu, track_node); write_lock(&kvm->mmu_lock); for (i = 0; i < slot->npages; i++) { @@ -1853,7 +1838,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) { struct intel_vgpu *itr; - struct kvmgt_guest_info *info; int id; bool ret = false; @@ -1862,8 +1846,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) if (!handle_valid(itr->handle)) continue; - info = (struct kvmgt_guest_info *)itr->handle; - if (kvm && kvm == info->kvm) { + if (kvm && kvm == itr->kvm) { ret = true; goto out; } @@ -1875,7 +1858,6 @@ out: static int kvmgt_guest_init(struct mdev_device *mdev) { - struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; struct kvm *kvm; @@ -1892,38 +1874,29 @@ static int kvmgt_guest_init(struct mdev_device *mdev) if (__kvmgt_vgpu_exist(vgpu, kvm)) return -EEXIST; - info = vzalloc(sizeof(struct kvmgt_guest_info)); - if (!info) - return -ENOMEM; + vgpu->handle = (unsigned long)vgpu; + kvm_get_kvm(vgpu->kvm); - vgpu->handle = (unsigned long)info; - info->vgpu = vgpu; - info->kvm = kvm; - kvm_get_kvm(info->kvm); - - kvmgt_protect_table_init(info); + kvmgt_protect_table_init(vgpu); gvt_cache_init(vgpu); - info->track_node.track_write = kvmgt_page_track_write; - info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; - kvm_page_track_register_notifier(kvm, &info->track_node); + vgpu->track_node.track_write = kvmgt_page_track_write; + vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; + kvm_page_track_register_notifier(kvm, &vgpu->track_node); debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, &vgpu->nr_cache_entries); return 0; } -static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) +static bool kvmgt_guest_exit(struct intel_vgpu *info) { - debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, - info->vgpu->debugfs)); + debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, info->debugfs)); kvm_page_track_unregister_notifier(info->kvm, &info->track_node); kvm_put_kvm(info->kvm); kvmgt_protect_table_destroy(info); - gvt_cache_destroy(info->vgpu); - vfree(info); - + gvt_cache_destroy(info); return true; } @@ -1946,14 +1919,12 @@ static void kvmgt_detach_vgpu(void *p_vgpu) static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) { - struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; if (!handle_valid(handle)) return -ESRCH; - info = (struct kvmgt_guest_info *)handle; - vgpu = info->vgpu; + vgpu = (struct intel_vgpu *)handle; /* * When guest is poweroff, msi_trigger is set to NULL, but vgpu's @@ -1975,15 +1946,15 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) { - struct kvmgt_guest_info *info; + struct intel_vgpu *vgpu; kvm_pfn_t pfn; if (!handle_valid(handle)) return INTEL_GVT_INVALID_ADDR; - info = (struct kvmgt_guest_info *)handle; + vgpu = (struct intel_vgpu *)handle; - pfn = gfn_to_pfn(info->kvm, gfn); + pfn = gfn_to_pfn(vgpu->kvm, gfn); if (is_error_noslot_pfn(pfn)) return INTEL_GVT_INVALID_ADDR; @@ -2000,7 +1971,7 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, if (!handle_valid(handle)) return -EINVAL; - vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; + vgpu = (struct intel_vgpu *)handle; mutex_lock(&vgpu->cache_lock); @@ -2042,22 +2013,22 @@ err_unlock: static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) { - struct kvmgt_guest_info *info; + struct intel_vgpu *vgpu; struct gvt_dma *entry; int ret = 0; if (!handle_valid(handle)) return -ENODEV; - info = (struct kvmgt_guest_info *)handle; + vgpu = (struct intel_vgpu *)handle; - mutex_lock(&info->vgpu->cache_lock); - entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); + mutex_lock(&vgpu->cache_lock); + entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) kref_get(&entry->ref); else ret = -ENOMEM; - mutex_unlock(&info->vgpu->cache_lock); + mutex_unlock(&vgpu->cache_lock); return ret; } @@ -2079,7 +2050,7 @@ static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr if (!handle_valid(handle)) return; - vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; + vgpu = (struct intel_vgpu *)handle; mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); @@ -2091,14 +2062,14 @@ static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, void *buf, unsigned long len, bool write) { - struct kvmgt_guest_info *info; + struct intel_vgpu *vgpu; if (!handle_valid(handle)) return -ESRCH; - info = (struct kvmgt_guest_info *)handle; + vgpu = (struct intel_vgpu *)handle; - return vfio_dma_rw(info->vgpu->vfio_group, gpa, buf, len, write); + return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, write); } static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, @@ -2115,7 +2086,7 @@ static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa, static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) { - struct kvmgt_guest_info *info; + struct intel_vgpu *vgpu; struct kvm *kvm; int idx; bool ret; @@ -2123,8 +2094,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) if (!handle_valid(handle)) return false; - info = (struct kvmgt_guest_info *)handle; - kvm = info->kvm; + vgpu = (struct intel_vgpu *)handle; + kvm = vgpu->kvm; idx = srcu_read_lock(&kvm->srcu); ret = kvm_is_visible_gfn(kvm, gfn); From 3c340d05868d98bfded92c405363fd63bff3ca62 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:41 +0200 Subject: [PATCH 153/219] drm/i915/gvt: remove vgpu->handle Always pass the actual vgpu structure instead of encoding it as a "handle" and add a bool flag to denote if a VGPU is attached. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-13-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 3 +- drivers/gpu/drm/i915/gvt/hypercall.h | 32 +++---- drivers/gpu/drm/i915/gvt/kvmgt.c | 126 +++++++++------------------ drivers/gpu/drm/i915/gvt/mpt.h | 20 ++--- drivers/gpu/drm/i915/gvt/vgpu.c | 6 +- 5 files changed, 71 insertions(+), 116 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 16daa615f9c0..cda70ea3d174 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -181,8 +181,8 @@ struct intel_vgpu { struct intel_gvt *gvt; struct mutex vgpu_lock; int id; - unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ bool active; + bool attached; bool pv_notified; bool failsafe; unsigned int resetting_eng; @@ -449,7 +449,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); #define RING_CTX_SIZE 320 struct intel_vgpu_creation_params { - __u64 handle; __u64 low_gm_sz; /* in MB */ __u64 high_gm_sz; /* in MB */ __u64 fence_sz; diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index eacee6f41f9c..9f0475759825 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -36,6 +36,7 @@ #include struct device; +struct intel_vgpu; /* * Specific GVT-g MPT modules function collections. Currently GVT-g supports @@ -44,27 +45,28 @@ struct device; struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - void (*detach_vgpu)(void *vgpu); - int (*inject_msi)(unsigned long handle, u32 addr, u16 data); - int (*enable_page_track)(unsigned long handle, u64 gfn); - int (*disable_page_track)(unsigned long handle, u64 gfn); - int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf, + void (*detach_vgpu)(struct intel_vgpu *vgpu); + int (*inject_msi)(struct intel_vgpu *vgpu, u32 addr, u16 data); + int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn); + int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn); + int (*read_gpa)(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len); - int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf, + int (*write_gpa)(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len); - unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); + unsigned long (*gfn_to_mfn)(struct intel_vgpu *vgpu, unsigned long gfn); - int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn, + int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr); - void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr); + void (*dma_unmap_guest_page)(struct intel_vgpu *vgpu, + dma_addr_t dma_addr); - int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr); + int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr); - int (*set_opregion)(void *vgpu); - int (*set_edid)(void *vgpu, int port_num); - int (*get_vfio_device)(void *vgpu); - void (*put_vfio_device)(void *vgpu); - bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn); + int (*set_opregion)(struct intel_vgpu *vgpu); + int (*set_edid)(struct intel_vgpu *vgpu, int port_num); + int (*get_vfio_device)(struct intel_vgpu *vgpu); + void (*put_vfio_device)(struct intel_vgpu *vgpu); + bool (*is_valid_gfn)(struct intel_vgpu *vgpu, unsigned long gfn); }; #endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 37cdf092a714..ed010ab13310 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -101,11 +101,6 @@ struct gvt_dma { struct kref ref; }; -static inline bool handle_valid(unsigned long handle) -{ - return !!(handle & ~0xff); -} - static ssize_t available_instances_show(struct mdev_type *mtype, struct mdev_type_attribute *attr, char *buf) @@ -668,10 +663,8 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, return 0; } -static int kvmgt_get_vfio_device(void *p_vgpu) +static int kvmgt_get_vfio_device(struct intel_vgpu *vgpu) { - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; - vgpu->vfio_device = vfio_device_get_from_dev( mdev_dev(vgpu->mdev)); if (!vgpu->vfio_device) { @@ -682,9 +675,8 @@ static int kvmgt_get_vfio_device(void *p_vgpu) } -static int kvmgt_set_opregion(void *p_vgpu) +static int kvmgt_set_opregion(struct intel_vgpu *vgpu) { - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; void *base; int ret; @@ -710,9 +702,8 @@ static int kvmgt_set_opregion(void *p_vgpu) return ret; } -static int kvmgt_set_edid(void *p_vgpu, int port_num) +static int kvmgt_set_edid(struct intel_vgpu *vgpu, int port_num) { - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); struct vfio_edid_region *base; int ret; @@ -740,10 +731,8 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num) return ret; } -static void kvmgt_put_vfio_device(void *data) +static void kvmgt_put_vfio_device(struct intel_vgpu *vgpu) { - struct intel_vgpu *vgpu = data; - if (WARN_ON(!vgpu->vfio_device)) return; @@ -791,7 +780,7 @@ static int intel_vgpu_remove(struct mdev_device *mdev) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - if (handle_valid(vgpu->handle)) + if (vgpu->attached) return -EBUSY; intel_gvt_destroy_vgpu(vgpu); @@ -929,7 +918,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) struct drm_i915_private *i915 = vgpu->gvt->gt->i915; int ret; - if (!handle_valid(vgpu->handle)) + if (!vgpu->attached) return; if (atomic_cmpxchg(&vgpu->released, 0, 1)) @@ -956,7 +945,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) vfio_group_put_external_user(vgpu->vfio_group); vgpu->kvm = NULL; - vgpu->handle = 0; + vgpu->attached = false; } static void intel_vgpu_close_device(struct mdev_device *mdev) @@ -1734,19 +1723,15 @@ static void kvmgt_host_exit(struct device *dev, void *gvt) intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt); } -static int kvmgt_page_track_add(unsigned long handle, u64 gfn) +static int kvmgt_page_track_add(struct intel_vgpu *info, u64 gfn) { - struct intel_vgpu *info; - struct kvm *kvm; + struct kvm *kvm = info->kvm; struct kvm_memory_slot *slot; int idx; - if (!handle_valid(handle)) + if (!info->attached) return -ESRCH; - info = (struct intel_vgpu *)handle; - kvm = info->kvm; - idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); if (!slot) { @@ -1768,19 +1753,15 @@ out: return 0; } -static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) +static int kvmgt_page_track_remove(struct intel_vgpu *info, u64 gfn) { - struct intel_vgpu *info; - struct kvm *kvm; + struct kvm *kvm = info->kvm; struct kvm_memory_slot *slot; int idx; - if (!handle_valid(handle)) + if (!info->attached) return 0; - info = (struct intel_vgpu *)handle; - kvm = info->kvm; - idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); if (!slot) { @@ -1843,7 +1824,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) mutex_lock(&vgpu->gvt->lock); for_each_active_vgpu(vgpu->gvt, itr, id) { - if (!handle_valid(itr->handle)) + if (!itr->attached) continue; if (kvm && kvm == itr->kvm) { @@ -1858,14 +1839,12 @@ out: static int kvmgt_guest_init(struct mdev_device *mdev) { - struct intel_vgpu *vgpu; - struct kvm *kvm; + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct kvm *kvm = vgpu->kvm; - vgpu = mdev_get_drvdata(mdev); - if (handle_valid(vgpu->handle)) + if (vgpu->attached) return -EEXIST; - kvm = vgpu->kvm; if (!kvm || kvm->mm != current->mm) { gvt_vgpu_err("KVM is required to use Intel vGPU\n"); return -ESRCH; @@ -1874,7 +1853,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev) if (__kvmgt_vgpu_exist(vgpu, kvm)) return -EEXIST; - vgpu->handle = (unsigned long)vgpu; + vgpu->attached = true; kvm_get_kvm(vgpu->kvm); kvmgt_protect_table_init(vgpu); @@ -1900,10 +1879,9 @@ static bool kvmgt_guest_exit(struct intel_vgpu *info) return true; } -static void kvmgt_detach_vgpu(void *p_vgpu) +static void kvmgt_detach_vgpu(struct intel_vgpu *vgpu) { int i; - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; if (!vgpu->region) return; @@ -1917,15 +1895,11 @@ static void kvmgt_detach_vgpu(void *p_vgpu) vgpu->region = NULL; } -static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) +static int kvmgt_inject_msi(struct intel_vgpu *vgpu, u32 addr, u16 data) { - struct intel_vgpu *vgpu; - - if (!handle_valid(handle)) + if (!vgpu->attached) return -ESRCH; - vgpu = (struct intel_vgpu *)handle; - /* * When guest is poweroff, msi_trigger is set to NULL, but vgpu's * config and mmio register isn't restored to default during guest @@ -1944,16 +1918,14 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) return -EFAULT; } -static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) +static unsigned long kvmgt_gfn_to_pfn(struct intel_vgpu *vgpu, + unsigned long gfn) { - struct intel_vgpu *vgpu; kvm_pfn_t pfn; - if (!handle_valid(handle)) + if (!vgpu->attached) return INTEL_GVT_INVALID_ADDR; - vgpu = (struct intel_vgpu *)handle; - pfn = gfn_to_pfn(vgpu->kvm, gfn); if (is_error_noslot_pfn(pfn)) return INTEL_GVT_INVALID_ADDR; @@ -1961,18 +1933,15 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) return pfn; } -static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, +static int kvmgt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { - struct intel_vgpu *vgpu; struct gvt_dma *entry; int ret; - if (!handle_valid(handle)) + if (!vgpu->attached) return -EINVAL; - vgpu = (struct intel_vgpu *)handle; - mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_gfn(vgpu, gfn); @@ -2011,17 +1980,15 @@ err_unlock: return ret; } -static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) +static int kvmgt_dma_pin_guest_page(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) { - struct intel_vgpu *vgpu; struct gvt_dma *entry; int ret = 0; - if (!handle_valid(handle)) + if (!vgpu->attached) return -ENODEV; - vgpu = (struct intel_vgpu *)handle; - mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) @@ -2042,16 +2009,14 @@ static void __gvt_dma_release(struct kref *ref) __gvt_cache_remove_entry(entry->vgpu, entry); } -static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) +static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) { - struct intel_vgpu *vgpu; struct gvt_dma *entry; - if (!handle_valid(handle)) + if (!vgpu->attached) return; - vgpu = (struct intel_vgpu *)handle; - mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) @@ -2059,44 +2024,35 @@ static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr mutex_unlock(&vgpu->cache_lock); } -static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, +static int kvmgt_rw_gpa(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len, bool write) { - struct intel_vgpu *vgpu; - - if (!handle_valid(handle)) + if (!vgpu->attached) return -ESRCH; - - vgpu = (struct intel_vgpu *)handle; - return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, write); } -static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, +static int kvmgt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len) { - return kvmgt_rw_gpa(handle, gpa, buf, len, false); + return kvmgt_rw_gpa(vgpu, gpa, buf, len, false); } -static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa, +static int kvmgt_write_gpa(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len) { - return kvmgt_rw_gpa(handle, gpa, buf, len, true); + return kvmgt_rw_gpa(vgpu, gpa, buf, len, true); } -static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) +static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) { - struct intel_vgpu *vgpu; - struct kvm *kvm; + struct kvm *kvm = vgpu->kvm; int idx; bool ret; - if (!handle_valid(handle)) + if (!vgpu->attached) return false; - vgpu = (struct intel_vgpu *)handle; - kvm = vgpu->kvm; - idx = srcu_read_lock(&kvm->srcu); ret = kvm_is_visible_gfn(kvm, gfn); srcu_read_unlock(&kvm->srcu, idx); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 8a659301d78b..ba0c31c4a705 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -118,7 +118,7 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) trace_inject_msi(vgpu->id, addr, data); - ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data); + ret = intel_gvt_host.mpt->inject_msi(vgpu, addr, data); if (ret) return ret; return 0; @@ -135,7 +135,7 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) static inline int intel_gvt_hypervisor_enable_page_track( struct intel_vgpu *vgpu, unsigned long gfn) { - return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn); + return intel_gvt_host.mpt->enable_page_track(vgpu, gfn); } /** @@ -149,7 +149,7 @@ static inline int intel_gvt_hypervisor_enable_page_track( static inline int intel_gvt_hypervisor_disable_page_track( struct intel_vgpu *vgpu, unsigned long gfn) { - return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn); + return intel_gvt_host.mpt->disable_page_track(vgpu, gfn); } /** @@ -165,7 +165,7 @@ static inline int intel_gvt_hypervisor_disable_page_track( static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len) { - return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len); + return intel_gvt_host.mpt->read_gpa(vgpu, gpa, buf, len); } /** @@ -181,7 +181,7 @@ static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu, static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len) { - return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len); + return intel_gvt_host.mpt->write_gpa(vgpu, gpa, buf, len); } /** @@ -195,7 +195,7 @@ static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu, static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn( struct intel_vgpu *vgpu, unsigned long gfn) { - return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); + return intel_gvt_host.mpt->gfn_to_mfn(vgpu, gfn); } /** @@ -212,7 +212,7 @@ static inline int intel_gvt_hypervisor_dma_map_guest_page( struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { - return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size, + return intel_gvt_host.mpt->dma_map_guest_page(vgpu, gfn, size, dma_addr); } @@ -224,7 +224,7 @@ static inline int intel_gvt_hypervisor_dma_map_guest_page( static inline void intel_gvt_hypervisor_dma_unmap_guest_page( struct intel_vgpu *vgpu, dma_addr_t dma_addr) { - intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr); + intel_gvt_host.mpt->dma_unmap_guest_page(vgpu, dma_addr); } /** @@ -239,7 +239,7 @@ static inline int intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { - return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr); + return intel_gvt_host.mpt->dma_pin_guest_page(vgpu, dma_addr); } /** @@ -318,7 +318,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn( if (!intel_gvt_host.mpt->is_valid_gfn) return true; - return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); + return intel_gvt_host.mpt->is_valid_gfn(vgpu, gfn); } #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index fd335a7bcb84..5356aa866968 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -370,8 +370,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu *vgpu; int ret; - gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n", - param->handle, param->low_gm_sz, param->high_gm_sz, + gvt_dbg_core("low %llu MB high %llu MB fence %llu\n", + param->low_gm_sz, param->high_gm_sz, param->fence_sz); vgpu = vzalloc(sizeof(*vgpu)); @@ -384,7 +384,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, goto out_free_vgpu; vgpu->id = ret; - vgpu->handle = param->handle; vgpu->gvt = gvt; vgpu->sched_ctl.weight = param->weight; mutex_init(&vgpu->vgpu_lock); @@ -477,7 +476,6 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_creation_params param; struct intel_vgpu *vgpu; - param.handle = 0; param.primary = 1; param.low_gm_sz = type->low_gm_size; param.high_gm_sz = type->high_gm_size; From e3d7640eeeb3066772500581172129a151a1a917 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:42 +0200 Subject: [PATCH 154/219] drm/i915/gvt: devirtualize ->{read,write}_gpa Just call the VFIO functions directly instead of through the method table. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-14-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 4 +-- drivers/gpu/drm/i915/gvt/execlist.c | 12 ++++----- drivers/gpu/drm/i915/gvt/gtt.c | 6 ++--- drivers/gpu/drm/i915/gvt/gvt.h | 37 +++++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/hypercall.h | 4 --- drivers/gpu/drm/i915/gvt/kvmgt.c | 23 ----------------- drivers/gpu/drm/i915/gvt/mmio.c | 4 +-- drivers/gpu/drm/i915/gvt/mpt.h | 32 ----------------------- drivers/gpu/drm/i915/gvt/opregion.c | 10 +++----- drivers/gpu/drm/i915/gvt/scheduler.c | 37 +++++++++++++-------------- 10 files changed, 72 insertions(+), 97 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 2459213b6c87..b9eb75a2b400 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1011,7 +1011,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, if (GRAPHICS_VER(s->engine->i915) == 9 && intel_gvt_mmio_is_sr_in_ctx(gvt, offset) && !strncmp(cmd, "lri", 3)) { - intel_gvt_hypervisor_read_gpa(s->vgpu, + intel_gvt_read_gpa(s->vgpu, s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); /* check inhibit context */ if (ctx_sr_ctl & 1) { @@ -1775,7 +1775,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ? I915_GTT_PAGE_SIZE - offset : end_gma - gma; - intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len); + intel_gvt_read_gpa(vgpu, gpa, va + len, copy_len); len += copy_len; gma += copy_len; diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 66d354c4195b..274c6ef42400 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -159,12 +159,12 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist, hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, vgpu->hws_pga[execlist->engine->id]); if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) { - intel_gvt_hypervisor_write_gpa(vgpu, - hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8, - status, 8); - intel_gvt_hypervisor_write_gpa(vgpu, - hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4, - &write_pointer, 4); + intel_gvt_write_gpa(vgpu, + hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8, + status, 8); + intel_gvt_write_gpa(vgpu, + hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4, + &write_pointer, 4); } gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n", diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d4082f4b9be1..9b696e5705eb 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -314,7 +314,7 @@ static inline int gtt_get_entry64(void *pt, return -EINVAL; if (hypervisor_access) { - ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + + ret = intel_gvt_read_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); if (WARN_ON(ret)) @@ -339,7 +339,7 @@ static inline int gtt_set_entry64(void *pt, return -EINVAL; if (hypervisor_access) { - ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa + + ret = intel_gvt_write_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); if (WARN_ON(ret)) @@ -1497,7 +1497,7 @@ static int attach_oos_page(struct intel_vgpu_oos_page *oos_page, struct intel_gvt *gvt = spt->vgpu->gvt; int ret; - ret = intel_gvt_hypervisor_read_gpa(spt->vgpu, + ret = intel_gvt_read_gpa(spt->vgpu, spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, oos_page->mem, I915_GTT_PAGE_SIZE); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index cda70ea3d174..ea07f4513805 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -35,6 +35,7 @@ #include #include +#include #include "i915_drv.h" #include "intel_gvt.h" @@ -720,6 +721,42 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch( return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH; } +/** + * intel_gvt_read_gpa - copy data from GPA to host data buffer + * @vgpu: a vGPU + * @gpa: guest physical address + * @buf: host data buffer + * @len: data length + * + * Returns: + * Zero on success, negative error code if failed. + */ +static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, + void *buf, unsigned long len) +{ + if (!vgpu->attached) + return -ESRCH; + return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false); +} + +/** + * intel_gvt_write_gpa - copy data from host data buffer to GPA + * @vgpu: a vGPU + * @gpa: guest physical address + * @buf: host data buffer + * @len: data length + * + * Returns: + * Zero on success, negative error code if failed. + */ +static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu, + unsigned long gpa, void *buf, unsigned long len) +{ + if (!vgpu->attached) + return -ESRCH; + return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true); +} + void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 9f0475759825..61e493e2de85 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -49,10 +49,6 @@ struct intel_gvt_mpt { int (*inject_msi)(struct intel_vgpu *vgpu, u32 addr, u16 data); int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn); int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn); - int (*read_gpa)(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, - unsigned long len); - int (*write_gpa)(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, - unsigned long len); unsigned long (*gfn_to_mfn)(struct intel_vgpu *vgpu, unsigned long gfn); int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index ed010ab13310..e4e3d0cc66fc 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include @@ -2024,26 +2023,6 @@ static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, mutex_unlock(&vgpu->cache_lock); } -static int kvmgt_rw_gpa(struct intel_vgpu *vgpu, unsigned long gpa, - void *buf, unsigned long len, bool write) -{ - if (!vgpu->attached) - return -ESRCH; - return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, write); -} - -static int kvmgt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, - void *buf, unsigned long len) -{ - return kvmgt_rw_gpa(vgpu, gpa, buf, len, false); -} - -static int kvmgt_write_gpa(struct intel_vgpu *vgpu, unsigned long gpa, - void *buf, unsigned long len) -{ - return kvmgt_rw_gpa(vgpu, gpa, buf, len, true); -} - static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) { struct kvm *kvm = vgpu->kvm; @@ -2067,8 +2046,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .inject_msi = kvmgt_inject_msi, .enable_page_track = kvmgt_page_track_add, .disable_page_track = kvmgt_page_track_remove, - .read_gpa = kvmgt_read_gpa, - .write_gpa = kvmgt_write_gpa, .gfn_to_mfn = kvmgt_gfn_to_pfn, .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 5db0ef83d522..9acc00505fde 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -139,7 +139,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, } if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) { - ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); + ret = intel_gvt_read_gpa(vgpu, pa, p_data, bytes); goto out; } @@ -215,7 +215,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, } if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) { - ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); + ret = intel_gvt_write_gpa(vgpu, pa, p_data, bytes); goto out; } diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index ba0c31c4a705..72388ceec596 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -152,38 +152,6 @@ static inline int intel_gvt_hypervisor_disable_page_track( return intel_gvt_host.mpt->disable_page_track(vgpu, gfn); } -/** - * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer - * @vgpu: a vGPU - * @gpa: guest physical address - * @buf: host data buffer - * @len: data length - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu, - unsigned long gpa, void *buf, unsigned long len) -{ - return intel_gvt_host.mpt->read_gpa(vgpu, gpa, buf, len); -} - -/** - * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA - * @vgpu: a vGPU - * @gpa: guest physical address - * @buf: host data buffer - * @len: data length - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu, - unsigned long gpa, void *buf, unsigned long len) -{ - return intel_gvt_host.mpt->write_gpa(vgpu, gpa, buf, len); -} - /** * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN * @vgpu: a vGPU diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 286ac6d7c6ce..d2bed466540a 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -421,14 +421,14 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) INTEL_GVT_OPREGION_SCIC; parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + INTEL_GVT_OPREGION_PARM; - ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa, &scic, sizeof(scic)); + ret = intel_gvt_read_gpa(vgpu, scic_pa, &scic, sizeof(scic)); if (ret) { gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", ret, scic_pa, sizeof(scic)); return ret; } - ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa, &parm, sizeof(parm)); + ret = intel_gvt_read_gpa(vgpu, parm_pa, &parm, sizeof(parm)); if (ret) { gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", ret, scic_pa, sizeof(scic)); @@ -465,16 +465,14 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) parm = 0; out: - ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa, &scic, - sizeof(scic)); + ret = intel_gvt_write_gpa(vgpu, scic_pa, &scic, sizeof(scic)); if (ret) { gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", ret, scic_pa, sizeof(scic)); return ret; } - ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa, &parm, - sizeof(parm)); + ret = intel_gvt_write_gpa(vgpu, parm_pa, &parm, sizeof(parm)); if (ret) { gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", ret, scic_pa, sizeof(scic)); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 679476da0640..d6fe94cd0fdb 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -150,10 +150,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) sr_oa_regs(workload, (u32 *)shadow_ring_context, true); #define COPY_REG(name) \ - intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ + intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) #define COPY_REG_MASKED(name) {\ - intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ + intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ + RING_CTX_OFF(name.val),\ &shadow_ring_context->name.val, 4);\ shadow_ring_context->name.val |= 0xffff << 16;\ @@ -167,7 +167,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) COPY_REG(rcs_indirect_ctx); COPY_REG(rcs_indirect_ctx_offset); } else if (workload->engine->id == BCS0) - intel_gvt_hypervisor_read_gpa(vgpu, + intel_gvt_read_gpa(vgpu, workload->ring_context_gpa + BCS_TILE_REGISTER_VAL_OFFSET, (void *)shadow_ring_context + @@ -178,7 +178,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) /* don't copy Ring Context (the first 0x50 dwords), * only copy the Engine Context part from guest */ - intel_gvt_hypervisor_read_gpa(vgpu, + intel_gvt_read_gpa(vgpu, workload->ring_context_gpa + RING_CTX_SIZE, (void *)shadow_ring_context + @@ -245,7 +245,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) continue; read: - intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size); + intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size); gpa_base = context_gpa; gpa_size = I915_GTT_PAGE_SIZE; dst = context_base + (i << I915_GTT_PAGE_SHIFT); @@ -911,8 +911,7 @@ static void update_guest_pdps(struct intel_vgpu *vgpu, gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); for (i = 0; i < 8; i++) - intel_gvt_hypervisor_write_gpa(vgpu, - gpa + i * 8, &pdp[7 - i], 4); + intel_gvt_write_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4); } static __maybe_unused bool @@ -1007,13 +1006,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload) continue; write: - intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size); + intel_gvt_write_gpa(vgpu, gpa_base, src, gpa_size); gpa_base = context_gpa; gpa_size = I915_GTT_PAGE_SIZE; src = context_base + (i << I915_GTT_PAGE_SHIFT); } - intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + + intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); shadow_ring_context = (void *) ctx->lrc_reg_state; @@ -1028,7 +1027,7 @@ write: } #define COPY_REG(name) \ - intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ + intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) COPY_REG(ctx_ctrl); @@ -1036,7 +1035,7 @@ write: #undef COPY_REG - intel_gvt_hypervisor_write_gpa(vgpu, + intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + sizeof(*shadow_ring_context), (void *)shadow_ring_context + @@ -1573,7 +1572,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu, gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); for (i = 0; i < 8; i++) - intel_gvt_hypervisor_read_gpa(vgpu, + intel_gvt_read_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4); } @@ -1644,10 +1643,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, return ERR_PTR(-EINVAL); } - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ring_header.val), &head, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ring_tail.val), &tail, 4); guest_head = head; @@ -1674,11 +1673,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, gvt_dbg_el("ring %s begin a new workload\n", engine->name); /* record some ring buffer register values for scan and shadow */ - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rb_start.val), &start, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rb_ctrl.val), &ctl, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); if (!intel_gvt_ggtt_validate_range(vgpu, start, @@ -1701,9 +1700,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, workload->rb_ctl = ctl; if (engine->id == RCS0) { - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4); workload->wa_ctx.indirect_ctx.guest_gma = From fe902f0ce686e8dbdaea7dd3c40271640857328a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:43 +0200 Subject: [PATCH 155/219] drm/i915/gvt: devirtualize ->{get,put}_vfio_device Just open code the calls to the VFIO APIs. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-15-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 12 ++++++----- drivers/gpu/drm/i915/gvt/hypercall.h | 2 -- drivers/gpu/drm/i915/gvt/kvmgt.c | 22 -------------------- drivers/gpu/drm/i915/gvt/mpt.h | 30 ---------------------------- 4 files changed, 7 insertions(+), 59 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index c95c25d2addb..cc1a9ac0d272 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -29,7 +29,7 @@ */ #include -#include +#include #include #include @@ -157,7 +157,7 @@ static void dmabuf_gem_object_free(struct kref *kref) dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj == obj) { list_del(pos); - intel_gvt_hypervisor_put_vfio_device(vgpu); + vfio_device_put(vgpu->vfio_device); idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); kfree(dmabuf_obj->info); @@ -492,9 +492,11 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) kref_init(&dmabuf_obj->kref); mutex_lock(&vgpu->dmabuf_lock); - if (intel_gvt_hypervisor_get_vfio_device(vgpu)) { - gvt_vgpu_err("get vfio device failed\n"); + vgpu->vfio_device = vfio_device_get_from_dev(mdev_dev(vgpu->mdev)); + if (!vgpu->vfio_device) { + gvt_vgpu_err("failed to get vfio device\n"); mutex_unlock(&vgpu->dmabuf_lock); + ret = -ENODEV; goto out_free_info; } mutex_unlock(&vgpu->dmabuf_lock); @@ -603,7 +605,7 @@ void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu) dmabuf_obj->vgpu = NULL; idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); - intel_gvt_hypervisor_put_vfio_device(vgpu); + vfio_device_put(vgpu->vfio_device); list_del(pos); /* dmabuf_obj might be freed in dmabuf_obj_put */ diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 61e493e2de85..fd903d52f431 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -60,8 +60,6 @@ struct intel_gvt_mpt { int (*set_opregion)(struct intel_vgpu *vgpu); int (*set_edid)(struct intel_vgpu *vgpu, int port_num); - int (*get_vfio_device)(struct intel_vgpu *vgpu); - void (*put_vfio_device)(struct intel_vgpu *vgpu); bool (*is_valid_gfn)(struct intel_vgpu *vgpu, unsigned long gfn); }; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index e4e3d0cc66fc..1f70cbd51a3f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -662,18 +662,6 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, return 0; } -static int kvmgt_get_vfio_device(struct intel_vgpu *vgpu) -{ - vgpu->vfio_device = vfio_device_get_from_dev( - mdev_dev(vgpu->mdev)); - if (!vgpu->vfio_device) { - gvt_vgpu_err("failed to get vfio device\n"); - return -ENODEV; - } - return 0; -} - - static int kvmgt_set_opregion(struct intel_vgpu *vgpu) { void *base; @@ -730,14 +718,6 @@ static int kvmgt_set_edid(struct intel_vgpu *vgpu, int port_num) return ret; } -static void kvmgt_put_vfio_device(struct intel_vgpu *vgpu) -{ - if (WARN_ON(!vgpu->vfio_device)) - return; - - vfio_device_put(vgpu->vfio_device); -} - static int intel_vgpu_create(struct mdev_device *mdev) { struct intel_vgpu *vgpu = NULL; @@ -2052,8 +2032,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .dma_pin_guest_page = kvmgt_dma_pin_guest_page, .set_opregion = kvmgt_set_opregion, .set_edid = kvmgt_set_edid, - .get_vfio_device = kvmgt_get_vfio_device, - .put_vfio_device = kvmgt_put_vfio_device, .is_valid_gfn = kvmgt_is_valid_gfn, }; diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 72388ceec596..2196187203af 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -242,36 +242,6 @@ static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu, return intel_gvt_host.mpt->set_edid(vgpu, port_num); } -/** - * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count - * @vgpu: a vGPU - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu) -{ - if (!intel_gvt_host.mpt->get_vfio_device) - return 0; - - return intel_gvt_host.mpt->get_vfio_device(vgpu); -} - -/** - * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count - * @vgpu: a vGPU - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu) -{ - if (!intel_gvt_host.mpt->put_vfio_device) - return; - - intel_gvt_host.mpt->put_vfio_device(vgpu); -} - /** * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn * @vgpu: a vGPU From f9399b0e4a9555227559f0adaa8e861bedb4b735 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:44 +0200 Subject: [PATCH 156/219] drm/i915/gvt: devirtualize ->set_edid and ->set_opregion Just call the code to setup the opregions and EDID data directly. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-16-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 3 +++ drivers/gpu/drm/i915/gvt/hypercall.h | 3 --- drivers/gpu/drm/i915/gvt/kvmgt.c | 6 ++---- drivers/gpu/drm/i915/gvt/mpt.h | 32 ---------------------------- drivers/gpu/drm/i915/gvt/vgpu.c | 6 +++--- 5 files changed, 8 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index ea07f4513805..d389c9c3822b 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -517,6 +517,9 @@ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); +int intel_gvt_set_opregion(struct intel_vgpu *vgpu); +int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num); + /* validating GM functions */ #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \ diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index fd903d52f431..091249a924a8 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -57,9 +57,6 @@ struct intel_gvt_mpt { dma_addr_t dma_addr); int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr); - - int (*set_opregion)(struct intel_vgpu *vgpu); - int (*set_edid)(struct intel_vgpu *vgpu, int port_num); bool (*is_valid_gfn)(struct intel_vgpu *vgpu, unsigned long gfn); }; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 1f70cbd51a3f..006db16c7d03 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -662,7 +662,7 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, return 0; } -static int kvmgt_set_opregion(struct intel_vgpu *vgpu) +int intel_gvt_set_opregion(struct intel_vgpu *vgpu) { void *base; int ret; @@ -689,7 +689,7 @@ static int kvmgt_set_opregion(struct intel_vgpu *vgpu) return ret; } -static int kvmgt_set_edid(struct intel_vgpu *vgpu, int port_num) +int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num) { struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); struct vfio_edid_region *base; @@ -2030,8 +2030,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, .dma_pin_guest_page = kvmgt_dma_pin_guest_page, - .set_opregion = kvmgt_set_opregion, - .set_edid = kvmgt_set_edid, .is_valid_gfn = kvmgt_is_valid_gfn, }; diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 2196187203af..9738aa3377b4 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -210,38 +210,6 @@ intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, return intel_gvt_host.mpt->dma_pin_guest_page(vgpu, dma_addr); } -/** - * intel_gvt_hypervisor_set_opregion - Set opregion for guest - * @vgpu: a vGPU - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu) -{ - if (!intel_gvt_host.mpt->set_opregion) - return 0; - - return intel_gvt_host.mpt->set_opregion(vgpu); -} - -/** - * intel_gvt_hypervisor_set_edid - Set EDID region for guest - * @vgpu: a vGPU - * @port_num: display port number - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu, - int port_num) -{ - if (!intel_gvt_host.mpt->set_edid) - return 0; - - return intel_gvt_host.mpt->set_edid(vgpu, port_num); -} - /** * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn * @vgpu: a vGPU diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 5356aa866968..69c1af3d6704 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -426,14 +426,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, intel_gvt_debugfs_add_vgpu(vgpu); - ret = intel_gvt_hypervisor_set_opregion(vgpu); + ret = intel_gvt_set_opregion(vgpu); if (ret) goto out_clean_sched_policy; if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv)) - ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); + ret = intel_gvt_set_edid(vgpu, PORT_B); else - ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); + ret = intel_gvt_set_edid(vgpu, PORT_D); if (ret) goto out_clean_sched_policy; From 4c705ad0d784fd9ae7160d8c4e0a151abe465dbc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:45 +0200 Subject: [PATCH 157/219] drm/i915/gvt: devirtualize ->detach_vgpu Just call the function directly. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-17-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 1 + drivers/gpu/drm/i915/gvt/hypercall.h | 1 - drivers/gpu/drm/i915/gvt/kvmgt.c | 3 +-- drivers/gpu/drm/i915/gvt/mpt.h | 16 ---------------- drivers/gpu/drm/i915/gvt/vgpu.c | 2 +- 5 files changed, 3 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index d389c9c3822b..52ef88d2bf21 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -581,6 +581,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu); int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason); +void intel_vgpu_detach_regions(struct intel_vgpu *vgpu); enum { GVT_FAILSAFE_UNSUPPORTED_GUEST, diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 091249a924a8..08c622c4079b 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -45,7 +45,6 @@ struct intel_vgpu; struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - void (*detach_vgpu)(struct intel_vgpu *vgpu); int (*inject_msi)(struct intel_vgpu *vgpu, u32 addr, u16 data); int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn); int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 006db16c7d03..4a6fed80c629 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1858,7 +1858,7 @@ static bool kvmgt_guest_exit(struct intel_vgpu *info) return true; } -static void kvmgt_detach_vgpu(struct intel_vgpu *vgpu) +void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) { int i; @@ -2022,7 +2022,6 @@ static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, - .detach_vgpu = kvmgt_detach_vgpu, .inject_msi = kvmgt_inject_msi, .enable_page_track = kvmgt_page_track_add, .disable_page_track = kvmgt_page_track_remove, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 9738aa3377b4..78efcf1e6946 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -71,22 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) intel_gvt_host.mpt->host_exit(dev, gvt); } -/** - * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU - * related stuffs inside hypervisor. - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) -{ - /* optional to provide */ - if (!intel_gvt_host.mpt->detach_vgpu) - return; - - intel_gvt_host.mpt->detach_vgpu(vgpu); -} - #define MSI_CAP_CONTROL(offset) (offset + 2) #define MSI_CAP_ADDRESS(offset) (offset + 4) #define MSI_CAP_DATA(offset) (offset + 8) diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 69c1af3d6704..46da19b3225d 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -293,7 +293,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_opregion(vgpu); intel_vgpu_reset_ggtt(vgpu, true); intel_vgpu_clean_gtt(vgpu); - intel_gvt_hypervisor_detach_vgpu(vgpu); + intel_vgpu_detach_regions(vgpu); intel_vgpu_free_resource(vgpu); intel_vgpu_clean_mmio(vgpu); intel_vgpu_dmabuf_cleanup(vgpu); From b3bece34956f86dcc8307f20b41a072ccdc917dc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:46 +0200 Subject: [PATCH 158/219] drm/i915/gvt: devirtualize ->inject_msi Just open code the MSI injection in a single place instead of going through the method table. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-18-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/hypercall.h | 1 - drivers/gpu/drm/i915/gvt/interrupt.c | 40 +++++++++++++++++++++++++++- drivers/gpu/drm/i915/gvt/kvmgt.c | 24 ----------------- drivers/gpu/drm/i915/gvt/mpt.h | 37 ------------------------- 4 files changed, 39 insertions(+), 63 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 08c622c4079b..de63bd8dd05b 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -45,7 +45,6 @@ struct intel_vgpu; struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - int (*inject_msi)(struct intel_vgpu *vgpu, u32 addr, u16 data); int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn); int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn); unsigned long (*gfn_to_mfn)(struct intel_vgpu *vgpu, unsigned long gfn); diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 228f623d466d..a6b2021b665f 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -29,6 +29,8 @@ * */ +#include + #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" @@ -397,9 +399,45 @@ static void init_irq_map(struct intel_gvt_irq *irq) } /* =======================vEvent injection===================== */ + +#define MSI_CAP_CONTROL(offset) (offset + 2) +#define MSI_CAP_ADDRESS(offset) (offset + 4) +#define MSI_CAP_DATA(offset) (offset + 8) +#define MSI_CAP_EN 0x1 + static int inject_virtual_interrupt(struct intel_vgpu *vgpu) { - return intel_gvt_hypervisor_inject_msi(vgpu); + unsigned long offset = vgpu->gvt->device_info.msi_cap_offset; + u16 control, data; + u32 addr; + + control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset)); + addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset)); + data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset)); + + /* Do not generate MSI if MSIEN is disabled */ + if (!(control & MSI_CAP_EN)) + return 0; + + if (WARN(control & GENMASK(15, 1), "only support one MSI format\n")) + return -EINVAL; + + trace_inject_msi(vgpu->id, addr, data); + + /* + * When guest is powered off, msi_trigger is set to NULL, but vgpu's + * config and mmio register isn't restored to default during guest + * poweroff. If this vgpu is still used in next vm, this vgpu's pipe + * may be enabled, then once this vgpu is active, it will get inject + * vblank interrupt request. But msi_trigger is null until msi is + * enabled by guest. so if msi_trigger is null, success is still + * returned and don't inject interrupt into guest. + */ + if (!vgpu->attached) + return -ESRCH; + if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1) + return -EFAULT; + return 0; } static void propagate_event(struct intel_gvt_irq *irq, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 4a6fed80c629..1d1c026fd825 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1874,29 +1874,6 @@ void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) vgpu->region = NULL; } -static int kvmgt_inject_msi(struct intel_vgpu *vgpu, u32 addr, u16 data) -{ - if (!vgpu->attached) - return -ESRCH; - - /* - * When guest is poweroff, msi_trigger is set to NULL, but vgpu's - * config and mmio register isn't restored to default during guest - * poweroff. If this vgpu is still used in next vm, this vgpu's pipe - * may be enabled, then once this vgpu is active, it will get inject - * vblank interrupt request. But msi_trigger is null until msi is - * enabled by guest. so if msi_trigger is null, success is still - * returned and don't inject interrupt into guest. - */ - if (vgpu->msi_trigger == NULL) - return 0; - - if (eventfd_signal(vgpu->msi_trigger, 1) == 1) - return 0; - - return -EFAULT; -} - static unsigned long kvmgt_gfn_to_pfn(struct intel_vgpu *vgpu, unsigned long gfn) { @@ -2022,7 +1999,6 @@ static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, - .inject_msi = kvmgt_inject_msi, .enable_page_track = kvmgt_page_track_add, .disable_page_track = kvmgt_page_track_remove, .gfn_to_mfn = kvmgt_gfn_to_pfn, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 78efcf1e6946..59369e8b3b69 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -71,43 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) intel_gvt_host.mpt->host_exit(dev, gvt); } -#define MSI_CAP_CONTROL(offset) (offset + 2) -#define MSI_CAP_ADDRESS(offset) (offset + 4) -#define MSI_CAP_DATA(offset) (offset + 8) -#define MSI_CAP_EN 0x1 - -/** - * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) -{ - unsigned long offset = vgpu->gvt->device_info.msi_cap_offset; - u16 control, data; - u32 addr; - int ret; - - control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset)); - addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset)); - data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset)); - - /* Do not generate MSI if MSIEN is disable */ - if (!(control & MSI_CAP_EN)) - return 0; - - if (WARN(control & GENMASK(15, 1), "only support one MSI format\n")) - return -EINVAL; - - trace_inject_msi(vgpu->id, addr, data); - - ret = intel_gvt_host.mpt->inject_msi(vgpu, addr, data); - if (ret) - return ret; - return 0; -} - /** * intel_gvt_hypervisor_enable_page_track - track a guest page * @vgpu: a vGPU From bd73b4b193d45074ff48705d21d4fbecc3fcfac8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:47 +0200 Subject: [PATCH 159/219] drm/i915/gvt: devirtualize ->is_valid_gfn Just call the code directly and move towards the callers. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-19-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 20 ++++++++++++++++++-- drivers/gpu/drm/i915/gvt/hypercall.h | 1 - drivers/gpu/drm/i915/gvt/kvmgt.c | 17 ----------------- drivers/gpu/drm/i915/gvt/mpt.h | 17 ----------------- 4 files changed, 18 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 9b696e5705eb..1360412f8ef8 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -49,6 +49,22 @@ static bool enable_out_of_sync = false; static int preallocated_oos_pages = 8192; +static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) +{ + struct kvm *kvm = vgpu->kvm; + int idx; + bool ret; + + if (!vgpu->attached) + return false; + + idx = srcu_read_lock(&kvm->srcu); + ret = kvm_is_visible_gfn(kvm, gfn); + srcu_read_unlock(&kvm->srcu, idx); + + return ret; +} + /* * validate a gm address and related range size, * translate it to host gm address @@ -1331,7 +1347,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) ppgtt_set_shadow_entry(spt, &se, i); } else { gfn = ops->get_pfn(&ge); - if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { + if (!intel_gvt_is_valid_gfn(vgpu, gfn)) { ops->set_pfn(&se, gvt->gtt.scratch_mfn); ppgtt_set_shadow_entry(spt, &se, i); continue; @@ -2315,7 +2331,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, /* one PTE update may be issued in multiple writes and the * first write may not construct a valid gfn */ - if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { + if (!intel_gvt_is_valid_gfn(vgpu, gfn)) { ops->set_pfn(&m, gvt->gtt.scratch_mfn); goto out; } diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index de63bd8dd05b..c1a9eeed0460 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -55,7 +55,6 @@ struct intel_gvt_mpt { dma_addr_t dma_addr); int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr); - bool (*is_valid_gfn)(struct intel_vgpu *vgpu, unsigned long gfn); }; #endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 1d1c026fd825..93fd7f997c8a 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1980,22 +1980,6 @@ static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, mutex_unlock(&vgpu->cache_lock); } -static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) -{ - struct kvm *kvm = vgpu->kvm; - int idx; - bool ret; - - if (!vgpu->attached) - return false; - - idx = srcu_read_lock(&kvm->srcu); - ret = kvm_is_visible_gfn(kvm, gfn); - srcu_read_unlock(&kvm->srcu, idx); - - return ret; -} - static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, @@ -2005,7 +1989,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, .dma_pin_guest_page = kvmgt_dma_pin_guest_page, - .is_valid_gfn = kvmgt_is_valid_gfn, }; struct intel_gvt_host intel_gvt_host = { diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 59369e8b3b69..1a796f2181ba 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -157,21 +157,4 @@ intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, return intel_gvt_host.mpt->dma_pin_guest_page(vgpu, dma_addr); } -/** - * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn - * @vgpu: a vGPU - * @gfn: guest PFN - * - * Returns: - * true on valid gfn, false on not. - */ -static inline bool intel_gvt_hypervisor_is_valid_gfn( - struct intel_vgpu *vgpu, unsigned long gfn) -{ - if (!intel_gvt_host.mpt->is_valid_gfn) - return true; - - return intel_gvt_host.mpt->is_valid_gfn(vgpu, gfn); -} - #endif /* _GVT_MPT_H_ */ From 4050dab5981cd48f67d2367fa90ae030bcc8f7dd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:48 +0200 Subject: [PATCH 160/219] drm/i915/gvt: devirtualize ->gfn_to_mfn Just open code it in the only caller. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-20-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 9 +++++---- drivers/gpu/drm/i915/gvt/hypercall.h | 1 - drivers/gpu/drm/i915/gvt/kvmgt.c | 16 ---------------- drivers/gpu/drm/i915/gvt/mpt.h | 14 -------------- 4 files changed, 5 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 1360412f8ef8..f6f3b22a70d2 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1178,15 +1178,16 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *entry) { const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - unsigned long pfn; + kvm_pfn_t pfn; if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) return 0; - pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry)); - if (pfn == INTEL_GVT_INVALID_ADDR) + if (!vgpu->attached) + return -EINVAL; + pfn = gfn_to_pfn(vgpu->kvm, ops->get_pfn(entry)); + if (is_error_noslot_pfn(pfn)) return -EINVAL; - return PageTransHuge(pfn_to_page(pfn)); } diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index c1a9eeed0460..dbde492cafc8 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -47,7 +47,6 @@ struct intel_gvt_mpt { void (*host_exit)(struct device *dev, void *gvt); int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn); int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn); - unsigned long (*gfn_to_mfn)(struct intel_vgpu *vgpu, unsigned long gfn); int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 93fd7f997c8a..6d4c67270172 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1874,21 +1874,6 @@ void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) vgpu->region = NULL; } -static unsigned long kvmgt_gfn_to_pfn(struct intel_vgpu *vgpu, - unsigned long gfn) -{ - kvm_pfn_t pfn; - - if (!vgpu->attached) - return INTEL_GVT_INVALID_ADDR; - - pfn = gfn_to_pfn(vgpu->kvm, gfn); - if (is_error_noslot_pfn(pfn)) - return INTEL_GVT_INVALID_ADDR; - - return pfn; -} - static int kvmgt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { @@ -1985,7 +1970,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .host_exit = kvmgt_host_exit, .enable_page_track = kvmgt_page_track_add, .disable_page_track = kvmgt_page_track_remove, - .gfn_to_mfn = kvmgt_gfn_to_pfn, .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, .dma_pin_guest_page = kvmgt_dma_pin_guest_page, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 1a796f2181ba..2d4bb6eaa08e 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -99,20 +99,6 @@ static inline int intel_gvt_hypervisor_disable_page_track( return intel_gvt_host.mpt->disable_page_track(vgpu, gfn); } -/** - * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN - * @vgpu: a vGPU - * @gpfn: guest pfn - * - * Returns: - * MFN on success, INTEL_GVT_INVALID_ADDR if failed. - */ -static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn( - struct intel_vgpu *vgpu, unsigned long gfn) -{ - return intel_gvt_host.mpt->gfn_to_mfn(vgpu, gfn); -} - /** * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page * @vgpu: a vGPU From 4c2baaaf764bfb6c293c75bc911b9366d35ee085 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:49 +0200 Subject: [PATCH 161/219] drm/i915/gvt: devirtualize ->{enable,disable}_page_track Just call the kvmgt functions directly. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-21-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 3 +++ drivers/gpu/drm/i915/gvt/hypercall.h | 2 -- drivers/gpu/drm/i915/gvt/kvmgt.c | 6 ++---- drivers/gpu/drm/i915/gvt/mpt.h | 28 --------------------------- drivers/gpu/drm/i915/gvt/page_track.c | 8 ++++---- 5 files changed, 9 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 52ef88d2bf21..a8a8728cd54a 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -765,6 +765,9 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); +int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn); +int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn); + #include "trace.h" #include "mpt.h" diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index dbde492cafc8..ded13a63ab66 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -45,8 +45,6 @@ struct intel_vgpu; struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn); - int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn); int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 6d4c67270172..fbef3d3dfb1f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1702,7 +1702,7 @@ static void kvmgt_host_exit(struct device *dev, void *gvt) intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt); } -static int kvmgt_page_track_add(struct intel_vgpu *info, u64 gfn) +int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) { struct kvm *kvm = info->kvm; struct kvm_memory_slot *slot; @@ -1732,7 +1732,7 @@ out: return 0; } -static int kvmgt_page_track_remove(struct intel_vgpu *info, u64 gfn) +int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) { struct kvm *kvm = info->kvm; struct kvm_memory_slot *slot; @@ -1968,8 +1968,6 @@ static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, - .enable_page_track = kvmgt_page_track_add, - .disable_page_track = kvmgt_page_track_remove, .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, .dma_pin_guest_page = kvmgt_dma_pin_guest_page, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 2d4bb6eaa08e..d2723ac8bb04 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -71,34 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) intel_gvt_host.mpt->host_exit(dev, gvt); } -/** - * intel_gvt_hypervisor_enable_page_track - track a guest page - * @vgpu: a vGPU - * @gfn: the gfn of guest - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_enable_page_track( - struct intel_vgpu *vgpu, unsigned long gfn) -{ - return intel_gvt_host.mpt->enable_page_track(vgpu, gfn); -} - -/** - * intel_gvt_hypervisor_disable_page_track - untrack a guest page - * @vgpu: a vGPU - * @gfn: the gfn of guest - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_disable_page_track( - struct intel_vgpu *vgpu, unsigned long gfn) -{ - return intel_gvt_host.mpt->disable_page_track(vgpu, gfn); -} - /** * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page * @vgpu: a vGPU diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c index 84856022528e..3375b51c75f1 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.c +++ b/drivers/gpu/drm/i915/gvt/page_track.c @@ -87,7 +87,7 @@ void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu, track = radix_tree_delete(&vgpu->page_track_tree, gfn); if (track) { if (track->tracked) - intel_gvt_hypervisor_disable_page_track(vgpu, gfn); + intel_gvt_page_track_remove(vgpu, gfn); kfree(track); } } @@ -112,7 +112,7 @@ int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) if (track->tracked) return 0; - ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn); + ret = intel_gvt_page_track_add(vgpu, gfn); if (ret) return ret; track->tracked = true; @@ -139,7 +139,7 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) if (!track->tracked) return 0; - ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn); + ret = intel_gvt_page_track_remove(vgpu, gfn); if (ret) return ret; track->tracked = false; @@ -172,7 +172,7 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, if (unlikely(vgpu->failsafe)) { /* Remove write protection to prevent furture traps. */ - intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT); + intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT); } else { ret = page_track->handler(page_track, gpa, data, bytes); if (ret) From 8398eee85fd009bfb2797ea4d0a63b7854d05e46 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:50 +0200 Subject: [PATCH 162/219] drm/i915/gvt: devirtualize ->dma_{,un}map_guest_page Just call the functions directly. Also remove a pointless wrapper. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-22-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 10 ++-------- drivers/gpu/drm/i915/gvt/gtt.c | 20 +++++++++---------- drivers/gpu/drm/i915/gvt/gvt.h | 4 ++++ drivers/gpu/drm/i915/gvt/hypercall.h | 5 ----- drivers/gpu/drm/i915/gvt/kvmgt.c | 6 ++---- drivers/gpu/drm/i915/gvt/mpt.h | 29 ---------------------------- 6 files changed, 17 insertions(+), 57 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index cc1a9ac0d272..db93b6354327 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -54,12 +54,6 @@ static int vgpu_pin_dma_address(struct intel_vgpu *vgpu, return ret; } -static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu, - dma_addr_t dma_addr) -{ - intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr); -} - static int vgpu_gem_get_pages( struct drm_i915_gem_object *obj) { @@ -114,7 +108,7 @@ out: for_each_sg(st->sgl, sg, i, j) { dma_addr = sg_dma_address(sg); if (dma_addr) - vgpu_unpin_dma_address(vgpu, dma_addr); + intel_gvt_dma_unmap_guest_page(vgpu, dma_addr); } sg_free_table(st); kfree(st); @@ -136,7 +130,7 @@ static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, int i; for_each_sg(pages->sgl, sg, fb_info->size, i) - vgpu_unpin_dma_address(vgpu, + intel_gvt_dma_unmap_guest_page(vgpu, sg_dma_address(sg)); } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index f6f3b22a70d2..9c5cc2800975 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1013,7 +1013,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn) return; - intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); + intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); } static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) @@ -1212,8 +1212,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, return PTR_ERR(sub_spt); for_each_shadow_entry(sub_spt, &sub_se, sub_index) { - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, - start_gfn + sub_index, PAGE_SIZE, &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index, + PAGE_SIZE, &dma_addr); if (ret) { ppgtt_invalidate_spt(spt); return ret; @@ -1258,8 +1258,8 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, ops->set_64k_splited(&entry); for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, - start_gfn + i, PAGE_SIZE, &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i, + PAGE_SIZE, &dma_addr); if (ret) return ret; @@ -1313,8 +1313,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, } /* direct shadow */ - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, - &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr); if (ret) return -ENXIO; @@ -2245,8 +2244,7 @@ static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, pfn = pte_ops->get_pfn(entry); if (pfn != vgpu->gvt->gtt.scratch_mfn) - intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, - pfn << PAGE_SHIFT); + intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); } static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, @@ -2337,8 +2335,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, goto out; } - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, - PAGE_SIZE, &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, + &dma_addr); if (ret) { gvt_vgpu_err("fail to populate guest ggtt entry\n"); /* guest driver may read/write the entry when partial diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index a8a8728cd54a..4e42e6ac4077 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -767,6 +767,10 @@ void intel_gvt_debugfs_clean(struct intel_gvt *gvt); int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn); int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn); +int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, + unsigned long size, dma_addr_t *dma_addr); +void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, + dma_addr_t dma_addr); #include "trace.h" #include "mpt.h" diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index ded13a63ab66..ba03b3368a95 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -46,11 +46,6 @@ struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn, - unsigned long size, dma_addr_t *dma_addr); - void (*dma_unmap_guest_page)(struct intel_vgpu *vgpu, - dma_addr_t dma_addr); - int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr); }; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index fbef3d3dfb1f..a02274036487 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1874,7 +1874,7 @@ void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) vgpu->region = NULL; } -static int kvmgt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, +int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { struct gvt_dma *entry; @@ -1950,7 +1950,7 @@ static void __gvt_dma_release(struct kref *ref) __gvt_cache_remove_entry(entry->vgpu, entry); } -static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, +void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { struct gvt_dma *entry; @@ -1968,8 +1968,6 @@ static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, - .dma_map_guest_page = kvmgt_dma_map_guest_page, - .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, .dma_pin_guest_page = kvmgt_dma_pin_guest_page, }; diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index d2723ac8bb04..26c1ee690f7e 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -71,35 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) intel_gvt_host.mpt->host_exit(dev, gvt); } -/** - * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page - * @vgpu: a vGPU - * @gfn: guest pfn - * @size: page size - * @dma_addr: retrieve allocated dma addr - * - * Returns: - * 0 on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_dma_map_guest_page( - struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, - dma_addr_t *dma_addr) -{ - return intel_gvt_host.mpt->dma_map_guest_page(vgpu, gfn, size, - dma_addr); -} - -/** - * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page - * @vgpu: a vGPU - * @dma_addr: the mapped dma addr - */ -static inline void intel_gvt_hypervisor_dma_unmap_guest_page( - struct intel_vgpu *vgpu, dma_addr_t dma_addr) -{ - intel_gvt_host.mpt->dma_unmap_guest_page(vgpu, dma_addr); -} - /** * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf * @vgpu: a vGPU From 91879bbaf8890fe3595e1e580354462f80dc93de Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:51 +0200 Subject: [PATCH 163/219] drm/i915/gvt: devirtualize dma_pin_guest_page Just call the function directly and remove a pointless wrapper. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-23-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 14 +------------- drivers/gpu/drm/i915/gvt/gvt.h | 1 + drivers/gpu/drm/i915/gvt/hypercall.h | 2 -- drivers/gpu/drm/i915/gvt/kvmgt.c | 4 +--- drivers/gpu/drm/i915/gvt/mpt.h | 15 --------------- 5 files changed, 3 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index db93b6354327..90443306a9ad 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -42,18 +42,6 @@ #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) -static int vgpu_pin_dma_address(struct intel_vgpu *vgpu, - unsigned long size, - dma_addr_t dma_addr) -{ - int ret = 0; - - if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr)) - ret = -EINVAL; - - return ret; -} - static int vgpu_gem_get_pages( struct drm_i915_gem_object *obj) { @@ -89,7 +77,7 @@ static int vgpu_gem_get_pages( for_each_sg(st->sgl, sg, page_num, i) { dma_addr_t dma_addr = GEN8_DECODE_PTE(readq(>t_entries[i])); - if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) { + if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) { ret = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 4e42e6ac4077..938f57271737 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -767,6 +767,7 @@ void intel_gvt_debugfs_clean(struct intel_gvt *gvt); int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn); int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn); +int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr); int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr); void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index ba03b3368a95..d49437aeabac 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -45,8 +45,6 @@ struct intel_vgpu; struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - - int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr); }; #endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index a02274036487..606b2cb923d8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1921,8 +1921,7 @@ err_unlock: return ret; } -static int kvmgt_dma_pin_guest_page(struct intel_vgpu *vgpu, - dma_addr_t dma_addr) +int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { struct gvt_dma *entry; int ret = 0; @@ -1968,7 +1967,6 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, - .dma_pin_guest_page = kvmgt_dma_pin_guest_page, }; struct intel_gvt_host intel_gvt_host = { diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 26c1ee690f7e..3be602a3f764 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -71,19 +71,4 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) intel_gvt_host.mpt->host_exit(dev, gvt); } -/** - * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf - * @vgpu: a vGPU - * @dma_addr: guest dma addr - * - * Returns: - * 0 on success, negative error code if failed. - */ -static inline int -intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, - dma_addr_t dma_addr) -{ - return intel_gvt_host.mpt->dma_pin_guest_page(vgpu, dma_addr); -} - #endif /* _GVT_MPT_H_ */ From 5f8f3fe67cd90807f01ebac744c7e6148a8f6cb7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:52 +0200 Subject: [PATCH 164/219] drm/i915/gvt: remove struct intel_gvt_mpt Just call the initializion and exit functions directly and remove this abstraction entirely. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-24-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 11 ++++- drivers/gpu/drm/i915/gvt/gvt.h | 12 ++--- drivers/gpu/drm/i915/gvt/hypercall.h | 50 ------------------- drivers/gpu/drm/i915/gvt/kvmgt.c | 39 ++------------- drivers/gpu/drm/i915/gvt/mpt.h | 74 ---------------------------- 5 files changed, 17 insertions(+), 169 deletions(-) delete mode 100644 drivers/gpu/drm/i915/gvt/hypercall.h delete mode 100644 drivers/gpu/drm/i915/gvt/mpt.h diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 9259f2a17398..047fb6c41788 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -135,7 +135,8 @@ static void intel_gvt_clean_device(struct drm_i915_private *i915) if (drm_WARN_ON(&i915->drm, !gvt)) return; - intel_gvt_hypervisor_host_exit(i915->drm.dev, gvt); + mdev_unregister_device(i915->drm.dev); + intel_gvt_cleanup_vgpu_type_groups(gvt); intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_clean_vgpu_types(gvt); @@ -235,13 +236,19 @@ static int intel_gvt_init_device(struct drm_i915_private *i915) intel_gvt_debugfs_init(gvt); - ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt); + ret = intel_gvt_init_vgpu_type_groups(gvt); if (ret) goto out_destroy_idle_vgpu; + ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_ops); + if (ret) + goto out_cleanup_vgpu_type_groups; + gvt_dbg_core("gvt device initialization is done\n"); return 0; +out_cleanup_vgpu_type_groups: + intel_gvt_cleanup_vgpu_type_groups(gvt); out_destroy_idle_vgpu: intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_debugfs_clean(gvt); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 938f57271737..865997c5005d 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -41,7 +41,6 @@ #include "intel_gvt.h" #include "debug.h" -#include "hypercall.h" #include "mmio.h" #include "reg.h" #include "interrupt.h" @@ -59,12 +58,6 @@ #define GVT_MAX_VGPU 8 -struct intel_gvt_host { - const struct intel_gvt_mpt *mpt; -}; - -extern struct intel_gvt_host intel_gvt_host; - /* Describe per-platform limitations. */ struct intel_gvt_device_info { u32 max_support_vgpus; @@ -773,9 +766,12 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr); +int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt); +void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt); + #include "trace.h" -#include "mpt.h" extern const struct intel_vgpu_ops intel_gvt_vgpu_ops; +extern const struct mdev_parent_ops intel_vgpu_mdev_ops; #endif diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h deleted file mode 100644 index d49437aeabac..000000000000 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * Authors: - * Eddie Dong - * Dexuan Cui - * Jike Song - * - * Contributors: - * Zhi Wang - * - */ - -#ifndef _GVT_HYPERCALL_H_ -#define _GVT_HYPERCALL_H_ - -#include - -struct device; -struct intel_vgpu; - -/* - * Specific GVT-g MPT modules function collections. Currently GVT-g supports - * both Xen and KVM by providing dedicated hypervisor-related MPT modules. - */ -struct intel_gvt_mpt { - int (*host_init)(struct device *dev, void *gvt); - void (*host_exit)(struct device *dev, void *gvt); -}; - -#endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 606b2cb923d8..f908867223ae 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -172,7 +172,7 @@ static struct attribute_group *gvt_vgpu_type_groups[] = { [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL, }; -static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) +int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) { int i, j; struct intel_vgpu_type *type; @@ -201,7 +201,7 @@ unwind: return -ENOMEM; } -static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) +void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) { int i; struct attribute_group *group; @@ -1665,8 +1665,9 @@ static const struct attribute_group *intel_vgpu_groups[] = { NULL, }; -static struct mdev_parent_ops intel_vgpu_mdev_ops = { +const struct mdev_parent_ops intel_vgpu_mdev_ops = { .mdev_attr_groups = intel_vgpu_groups, + .supported_type_groups = gvt_vgpu_type_groups, .create = intel_vgpu_create, .remove = intel_vgpu_remove, @@ -1679,29 +1680,6 @@ static struct mdev_parent_ops intel_vgpu_mdev_ops = { .ioctl = intel_vgpu_ioctl, }; -static int kvmgt_host_init(struct device *dev, void *gvt) -{ - int ret; - - ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt); - if (ret) - return ret; - - intel_vgpu_mdev_ops.supported_type_groups = gvt_vgpu_type_groups; - - ret = mdev_register_device(dev, &intel_vgpu_mdev_ops); - if (ret) - intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt); - - return ret; -} - -static void kvmgt_host_exit(struct device *dev, void *gvt) -{ - mdev_unregister_device(dev); - intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt); -} - int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) { struct kvm *kvm = info->kvm; @@ -1964,15 +1942,6 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, mutex_unlock(&vgpu->cache_lock); } -static const struct intel_gvt_mpt kvmgt_mpt = { - .host_init = kvmgt_host_init, - .host_exit = kvmgt_host_exit, -}; - -struct intel_gvt_host intel_gvt_host = { - .mpt = &kvmgt_mpt, -}; - static int __init kvmgt_init(void) { return intel_gvt_set_ops(&intel_gvt_vgpu_ops); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h deleted file mode 100644 index 3be602a3f764..000000000000 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * Authors: - * Eddie Dong - * Dexuan Cui - * Jike Song - * - * Contributors: - * Zhi Wang - * - */ - -#ifndef _GVT_MPT_H_ -#define _GVT_MPT_H_ - -#include "gvt.h" - -/** - * DOC: Hypervisor Service APIs for GVT-g Core Logic - * - * This is the glue layer between specific hypervisor MPT modules and GVT-g core - * logic. Each kind of hypervisor MPT module provides a collection of function - * callbacks and will be attached to GVT host when the driver is loading. - * GVT-g core logic will call these APIs to request specific services from - * hypervisor. - */ - -/** - * intel_gvt_hypervisor_host_init - init GVT-g host side - * - * Returns: - * Zero on success, negative error code if failed - */ -static inline int intel_gvt_hypervisor_host_init(struct device *dev, void *gvt) -{ - if (!intel_gvt_host.mpt->host_init) - return -ENODEV; - - return intel_gvt_host.mpt->host_init(dev, gvt); -} - -/** - * intel_gvt_hypervisor_host_exit - exit GVT-g host side - */ -static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) -{ - /* optional to provide */ - if (!intel_gvt_host.mpt->host_exit) - return; - - intel_gvt_host.mpt->host_exit(dev, gvt); -} - -#endif /* _GVT_MPT_H_ */ From 37e4bdbd5bad711c7db5458041416f3925d7aae5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:53 +0200 Subject: [PATCH 165/219] drm/i915/gvt: remove the extra vfio_device refcounting for dmabufs All the dmabufs are torn down when th VGPU is released, so there is no need for extra refcounting here. Based on an patch from Jason Gunthorpe. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-25-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 12 ------------ drivers/gpu/drm/i915/gvt/gvt.h | 1 - 2 files changed, 13 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 90443306a9ad..01e54b45c5c1 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -139,7 +139,6 @@ static void dmabuf_gem_object_free(struct kref *kref) dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj == obj) { list_del(pos); - vfio_device_put(vgpu->vfio_device); idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); kfree(dmabuf_obj->info); @@ -473,16 +472,6 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) kref_init(&dmabuf_obj->kref); - mutex_lock(&vgpu->dmabuf_lock); - vgpu->vfio_device = vfio_device_get_from_dev(mdev_dev(vgpu->mdev)); - if (!vgpu->vfio_device) { - gvt_vgpu_err("failed to get vfio device\n"); - mutex_unlock(&vgpu->dmabuf_lock); - ret = -ENODEV; - goto out_free_info; - } - mutex_unlock(&vgpu->dmabuf_lock); - update_fb_info(gfx_plane_info, &fb_info); INIT_LIST_HEAD(&dmabuf_obj->list); @@ -587,7 +576,6 @@ void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu) dmabuf_obj->vgpu = NULL; idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); - vfio_device_put(vgpu->vfio_device); list_del(pos); /* dmabuf_obj might be freed in dmabuf_obj_put */ diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 865997c5005d..8565189e0c0d 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -231,7 +231,6 @@ struct intel_vgpu { struct kvm *kvm; struct work_struct release_work; atomic_t released; - struct vfio_device *vfio_device; struct vfio_group *vfio_group; struct kvm_page_track_notifier_node track_node; From 4456641232e2c1b1eb7d179449c5800b3ce9e9c1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:54 +0200 Subject: [PATCH 166/219] drm/i915/gvt: streamline intel_vgpu_create Initialize variables at declaration time, avoid pointless gotos and cater for the fact that intel_gvt_create_vgpu can't return NULL. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-26-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index f908867223ae..11bce3a91a22 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -720,26 +720,19 @@ int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num) static int intel_vgpu_create(struct mdev_device *mdev) { - struct intel_vgpu *vgpu = NULL; + struct device *pdev = mdev_parent_dev(mdev); + struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt; struct intel_vgpu_type *type; - struct device *pdev; - struct intel_gvt *gvt; - int ret; - - pdev = mdev_parent_dev(mdev); - gvt = kdev_to_i915(pdev)->gvt; + struct intel_vgpu *vgpu; type = &gvt->types[mdev_get_type_group_id(mdev)]; - if (!type) { - ret = -EINVAL; - goto out; - } + if (!type) + return -EINVAL; vgpu = intel_gvt_create_vgpu(gvt, type); - if (IS_ERR_OR_NULL(vgpu)) { - ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); - gvt_err("failed to create intel vgpu: %d\n", ret); - goto out; + if (IS_ERR(vgpu)) { + gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu)); + return PTR_ERR(vgpu); } INIT_WORK(&vgpu->release_work, intel_vgpu_release_work); @@ -749,10 +742,7 @@ static int intel_vgpu_create(struct mdev_device *mdev) gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", dev_name(mdev_dev(mdev))); - ret = 0; - -out: - return ret; + return 0; } static int intel_vgpu_remove(struct mdev_device *mdev) From 7f11e6893ff01b63820a368851ca389293603dbe Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:55 +0200 Subject: [PATCH 167/219] drm/i915/gvt: pass a struct intel_vgpu to the vfio read/write helpers Pass the structure we actually care about instead of deriving it from the mdev_device in the lower level code. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-27-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 11bce3a91a22..5db74b6fe513 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1007,10 +1007,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, return 0; } -static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, +static ssize_t intel_vgpu_rw(struct intel_vgpu *vgpu, char *buf, size_t count, loff_t *ppos, bool is_write) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; int ret = -EINVAL; @@ -1056,9 +1055,8 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, return ret == 0 ? count : ret; } -static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) +static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); struct intel_gvt *gvt = vgpu->gvt; int offset; @@ -1078,6 +1076,7 @@ static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, size_t count, loff_t *ppos) { + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); unsigned int done = 0; int ret; @@ -1086,10 +1085,10 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, /* Only support GGTT entry 8 bytes read */ if (count >= 8 && !(*ppos % 8) && - gtt_entry(mdev, ppos)) { + gtt_entry(vgpu, ppos)) { u64 val; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; @@ -1101,7 +1100,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, } else if (count >= 4 && !(*ppos % 4)) { u32 val; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; @@ -1113,7 +1112,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, } else if (count >= 2 && !(*ppos % 2)) { u16 val; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; @@ -1125,7 +1124,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, } else { u8 val; - ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos, + ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; @@ -1152,6 +1151,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, const char __user *buf, size_t count, loff_t *ppos) { + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); unsigned int done = 0; int ret; @@ -1160,13 +1160,13 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, /* Only support GGTT entry 8 bytes write */ if (count >= 8 && !(*ppos % 8) && - gtt_entry(mdev, ppos)) { + gtt_entry(vgpu, ppos)) { u64 val; if (copy_from_user(&val, buf, sizeof(val))) goto write_err; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; @@ -1178,7 +1178,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, if (copy_from_user(&val, buf, sizeof(val))) goto write_err; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; @@ -1190,7 +1190,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, if (copy_from_user(&val, buf, sizeof(val))) goto write_err; - ret = intel_vgpu_rw(mdev, (char *)&val, + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; @@ -1202,7 +1202,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, if (copy_from_user(&val, buf, sizeof(val))) goto write_err; - ret = intel_vgpu_rw(mdev, &val, sizeof(val), + ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; From 0e09f4066ad11bcb4fa1b2d1a6f07fc0dc9788f9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:56 +0200 Subject: [PATCH 168/219] drm/i915/gvt: remove kvmgt_guest_{init,exit} Merge these into their only callers. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-28-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 129 ++++++++++++++----------------- 1 file changed, 60 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 5db74b6fe513..38629ab4c9bf 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -100,6 +100,13 @@ struct gvt_dma { struct kref ref; }; +static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, + const u8 *val, int len, + struct kvm_page_track_notifier_node *node); +static void kvmgt_page_track_flush_slot(struct kvm *kvm, + struct kvm_memory_slot *slot, + struct kvm_page_track_notifier_node *node); + static ssize_t available_instances_show(struct mdev_type *mtype, struct mdev_type_attribute *attr, char *buf) @@ -213,9 +220,7 @@ void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) } } -static int kvmgt_guest_init(struct mdev_device *mdev); static void intel_vgpu_release_work(struct work_struct *work); -static bool kvmgt_guest_exit(struct intel_vgpu *info); static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size) @@ -803,6 +808,27 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb, return NOTIFY_OK; } +static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu) +{ + struct intel_vgpu *itr; + int id; + bool ret = false; + + mutex_lock(&vgpu->gvt->lock); + for_each_active_vgpu(vgpu->gvt, itr, id) { + if (!itr->attached) + continue; + + if (vgpu->kvm == itr->kvm) { + ret = true; + goto out; + } + } +out: + mutex_unlock(&vgpu->gvt->lock); + return ret; +} + static int intel_vgpu_open_device(struct mdev_device *mdev) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); @@ -847,14 +873,37 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) goto undo_group; } - ret = kvmgt_guest_init(mdev); - if (ret) + ret = -EEXIST; + if (vgpu->attached) goto undo_group; + ret = -ESRCH; + if (!vgpu->kvm || vgpu->kvm->mm != current->mm) { + gvt_vgpu_err("KVM is required to use Intel vGPU\n"); + goto undo_group; + } + + ret = -EEXIST; + if (__kvmgt_vgpu_exist(vgpu)) + goto undo_group; + + vgpu->attached = true; + kvm_get_kvm(vgpu->kvm); + + kvmgt_protect_table_init(vgpu); + gvt_cache_init(vgpu); + + vgpu->track_node.track_write = kvmgt_page_track_write; + vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; + kvm_page_track_register_notifier(vgpu->kvm, &vgpu->track_node); + + debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, + &vgpu->nr_cache_entries); + intel_gvt_activate_vgpu(vgpu); atomic_set(&vgpu->released, 0); - return ret; + return 0; undo_group: vfio_group_put_external_user(vgpu->vfio_group); @@ -908,7 +957,12 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) /* dereference module reference taken at open */ module_put(THIS_MODULE); - kvmgt_guest_exit(vgpu); + debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs)); + + kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node); + kvm_put_kvm(vgpu->kvm); + kvmgt_protect_table_destroy(vgpu); + gvt_cache_destroy(vgpu); intel_vgpu_release_msi_eventfd_ctx(vgpu); vfio_group_put_external_user(vgpu->vfio_group); @@ -1763,69 +1817,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, write_unlock(&kvm->mmu_lock); } -static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) -{ - struct intel_vgpu *itr; - int id; - bool ret = false; - - mutex_lock(&vgpu->gvt->lock); - for_each_active_vgpu(vgpu->gvt, itr, id) { - if (!itr->attached) - continue; - - if (kvm && kvm == itr->kvm) { - ret = true; - goto out; - } - } -out: - mutex_unlock(&vgpu->gvt->lock); - return ret; -} - -static int kvmgt_guest_init(struct mdev_device *mdev) -{ - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - struct kvm *kvm = vgpu->kvm; - - if (vgpu->attached) - return -EEXIST; - - if (!kvm || kvm->mm != current->mm) { - gvt_vgpu_err("KVM is required to use Intel vGPU\n"); - return -ESRCH; - } - - if (__kvmgt_vgpu_exist(vgpu, kvm)) - return -EEXIST; - - vgpu->attached = true; - kvm_get_kvm(vgpu->kvm); - - kvmgt_protect_table_init(vgpu); - gvt_cache_init(vgpu); - - vgpu->track_node.track_write = kvmgt_page_track_write; - vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; - kvm_page_track_register_notifier(kvm, &vgpu->track_node); - - debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, - &vgpu->nr_cache_entries); - return 0; -} - -static bool kvmgt_guest_exit(struct intel_vgpu *info) -{ - debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, info->debugfs)); - - kvm_page_track_unregister_notifier(info->kvm, &info->track_node); - kvm_put_kvm(info->kvm); - kvmgt_protect_table_destroy(info); - gvt_cache_destroy(info); - return true; -} - void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) { int i; From 978cf586ac35f34604e2d252a51b71192c39f1e4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:57 +0200 Subject: [PATCH 169/219] drm/i915/gvt: convert to use vfio_register_emulated_iommu_dev This is straightforward conversion, the intel_vgpu already has a pointer to the vfio_dev, which can be replaced with the embedded structure and we can replace all the mdev_get_drvdata() with a simple container_of(). Based on an patch from Jason Gunthorpe. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-29-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 2 +- drivers/gpu/drm/i915/gvt/kvmgt.c | 190 ++++++++++++++++--------------- 2 files changed, 102 insertions(+), 90 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 8565189e0c0d..c32e8eb199f1 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -211,7 +211,7 @@ struct intel_vgpu { u32 scan_nonprivbb; - struct mdev_device *mdev; + struct vfio_device vfio_device; struct vfio_region *region; int num_regions; struct eventfd_ctx *intx_trigger; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 38629ab4c9bf..4113f850bf92 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -100,6 +100,9 @@ struct gvt_dma { struct kref ref; }; +#define vfio_dev_to_vgpu(vfio_dev) \ + container_of((vfio_dev), struct intel_vgpu, vfio_device) + static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *val, int len, struct kvm_page_track_notifier_node *node); @@ -723,44 +726,6 @@ int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num) return ret; } -static int intel_vgpu_create(struct mdev_device *mdev) -{ - struct device *pdev = mdev_parent_dev(mdev); - struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt; - struct intel_vgpu_type *type; - struct intel_vgpu *vgpu; - - type = &gvt->types[mdev_get_type_group_id(mdev)]; - if (!type) - return -EINVAL; - - vgpu = intel_gvt_create_vgpu(gvt, type); - if (IS_ERR(vgpu)) { - gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu)); - return PTR_ERR(vgpu); - } - - INIT_WORK(&vgpu->release_work, intel_vgpu_release_work); - - vgpu->mdev = mdev; - mdev_set_drvdata(mdev, vgpu); - - gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", - dev_name(mdev_dev(mdev))); - return 0; -} - -static int intel_vgpu_remove(struct mdev_device *mdev) -{ - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - - if (vgpu->attached) - return -EBUSY; - - intel_gvt_destroy_vgpu(vgpu); - return 0; -} - static int intel_vgpu_iommu_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -829,9 +794,9 @@ out: return ret; } -static int intel_vgpu_open_device(struct mdev_device *mdev) +static int intel_vgpu_open_device(struct vfio_device *vfio_dev) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned long events; int ret; struct vfio_group *vfio_group; @@ -840,7 +805,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier; events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; - ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, + ret = vfio_register_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &events, &vgpu->iommu_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", @@ -849,7 +814,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) } events = VFIO_GROUP_NOTIFY_SET_KVM; - ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, + ret = vfio_register_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &events, &vgpu->group_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", @@ -857,7 +822,8 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) goto undo_iommu; } - vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev)); + vfio_group = + vfio_group_get_external_user_from_dev(vgpu->vfio_device.dev); if (IS_ERR_OR_NULL(vfio_group)) { ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group); gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n"); @@ -865,14 +831,6 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) } vgpu->vfio_group = vfio_group; - /* Take a module reference as mdev core doesn't take - * a reference for vendor driver. - */ - if (!try_module_get(THIS_MODULE)) { - ret = -ENODEV; - goto undo_group; - } - ret = -EEXIST; if (vgpu->attached) goto undo_group; @@ -910,11 +868,11 @@ undo_group: vgpu->vfio_group = NULL; undo_register: - vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, + vfio_unregister_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &vgpu->group_notifier); undo_iommu: - vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + vfio_unregister_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &vgpu->iommu_notifier); out: return ret; @@ -944,19 +902,16 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) intel_gvt_release_vgpu(vgpu); - ret = vfio_unregister_notifier(mdev_dev(vgpu->mdev), VFIO_IOMMU_NOTIFY, + ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_IOMMU_NOTIFY, &vgpu->iommu_notifier); drm_WARN(&i915->drm, ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); - ret = vfio_unregister_notifier(mdev_dev(vgpu->mdev), VFIO_GROUP_NOTIFY, + ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_GROUP_NOTIFY, &vgpu->group_notifier); drm_WARN(&i915->drm, ret, "vfio_unregister_notifier for group failed: %d\n", ret); - /* dereference module reference taken at open */ - module_put(THIS_MODULE); - debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs)); kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node); @@ -971,11 +926,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) vgpu->attached = false; } -static void intel_vgpu_close_device(struct mdev_device *mdev) +static void intel_vgpu_close_device(struct vfio_device *vfio_dev) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - - __intel_vgpu_release(vgpu); + __intel_vgpu_release(vfio_dev_to_vgpu(vfio_dev)); } static void intel_vgpu_release_work(struct work_struct *work) @@ -1127,10 +1080,10 @@ static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos) true : false; } -static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, +static ssize_t intel_vgpu_read(struct vfio_device *vfio_dev, char __user *buf, size_t count, loff_t *ppos) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned int done = 0; int ret; @@ -1201,11 +1154,11 @@ read_err: return -EFAULT; } -static ssize_t intel_vgpu_write(struct mdev_device *mdev, +static ssize_t intel_vgpu_write(struct vfio_device *vfio_dev, const char __user *buf, size_t count, loff_t *ppos) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned int done = 0; int ret; @@ -1275,13 +1228,14 @@ write_err: return -EFAULT; } -static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) +static int intel_vgpu_mmap(struct vfio_device *vfio_dev, + struct vm_area_struct *vma) { + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned int index; u64 virtaddr; unsigned long req_size, pgoff, req_start; pgprot_t pg_prot; - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); if (index >= VFIO_PCI_ROM_REGION_INDEX) @@ -1404,10 +1358,10 @@ static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags, return func(vgpu, index, start, count, flags, data); } -static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, +static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd, unsigned long arg) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned long minsz; gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); @@ -1682,14 +1636,9 @@ static ssize_t vgpu_id_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mdev_device *mdev = mdev_from_dev(dev); + struct intel_vgpu *vgpu = dev_get_drvdata(dev); - if (mdev) { - struct intel_vgpu *vgpu = (struct intel_vgpu *) - mdev_get_drvdata(mdev); - return sprintf(buf, "%d\n", vgpu->id); - } - return sprintf(buf, "\n"); + return sprintf(buf, "%d\n", vgpu->id); } static DEVICE_ATTR_RO(vgpu_id); @@ -1709,19 +1658,72 @@ static const struct attribute_group *intel_vgpu_groups[] = { NULL, }; +static const struct vfio_device_ops intel_vgpu_dev_ops = { + .open_device = intel_vgpu_open_device, + .close_device = intel_vgpu_close_device, + .read = intel_vgpu_read, + .write = intel_vgpu_write, + .mmap = intel_vgpu_mmap, + .ioctl = intel_vgpu_ioctl, +}; + +static int intel_vgpu_probe(struct mdev_device *mdev) +{ + struct device *pdev = mdev_parent_dev(mdev); + struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt; + struct intel_vgpu_type *type; + struct intel_vgpu *vgpu; + int ret; + + type = &gvt->types[mdev_get_type_group_id(mdev)]; + if (!type) + return -EINVAL; + + vgpu = intel_gvt_create_vgpu(gvt, type); + if (IS_ERR(vgpu)) { + gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu)); + return PTR_ERR(vgpu); + } + + INIT_WORK(&vgpu->release_work, intel_vgpu_release_work); + vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev, + &intel_vgpu_dev_ops); + + dev_set_drvdata(&mdev->dev, vgpu); + ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device); + if (ret) { + intel_gvt_destroy_vgpu(vgpu); + return ret; + } + + gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", + dev_name(mdev_dev(mdev))); + return 0; +} + +static void intel_vgpu_remove(struct mdev_device *mdev) +{ + struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev); + + if (WARN_ON_ONCE(vgpu->attached)) + return; + intel_gvt_destroy_vgpu(vgpu); +} + +static struct mdev_driver intel_vgpu_mdev_driver = { + .driver = { + .name = "intel_vgpu_mdev", + .owner = THIS_MODULE, + .dev_groups = intel_vgpu_groups, + }, + .probe = intel_vgpu_probe, + .remove = intel_vgpu_remove, +}; + const struct mdev_parent_ops intel_vgpu_mdev_ops = { - .mdev_attr_groups = intel_vgpu_groups, + .owner = THIS_MODULE, .supported_type_groups = gvt_vgpu_type_groups, - .create = intel_vgpu_create, - .remove = intel_vgpu_remove, - - .open_device = intel_vgpu_open_device, - .close_device = intel_vgpu_close_device, - - .read = intel_vgpu_read, - .write = intel_vgpu_write, - .mmap = intel_vgpu_mmap, - .ioctl = intel_vgpu_ioctl, + .device_driver = &intel_vgpu_mdev_driver, }; int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) @@ -1925,11 +1927,21 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, static int __init kvmgt_init(void) { - return intel_gvt_set_ops(&intel_gvt_vgpu_ops); + int ret; + + ret = intel_gvt_set_ops(&intel_gvt_vgpu_ops); + if (ret) + return ret; + + ret = mdev_register_driver(&intel_vgpu_mdev_driver); + if (ret) + intel_gvt_clear_ops(&intel_gvt_vgpu_ops); + return ret; } static void __exit kvmgt_exit(void) { + mdev_unregister_driver(&intel_vgpu_mdev_driver); intel_gvt_clear_ops(&intel_gvt_vgpu_ops); } From cba619cb0d4d66c743cf001c6b13c171a769a65f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:58 +0200 Subject: [PATCH 170/219] drm/i915/gvt: merge gvt.c into kvmgvt.c The code in both files is deeply interconnected, so merge it and keep a bunch of structures and functions static. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-30-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/Makefile | 1 - drivers/gpu/drm/i915/gvt/gvt.c | 291 ------------------------------ drivers/gpu/drm/i915/gvt/gvt.h | 6 - drivers/gpu/drm/i915/gvt/kvmgt.c | 264 ++++++++++++++++++++++++++- 4 files changed, 260 insertions(+), 302 deletions(-) delete mode 100644 drivers/gpu/drm/i915/gvt/gvt.c diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index ca8894049dca..1699f644298e 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -12,7 +12,6 @@ kvmgt-$(CONFIG_DRM_I915_GVT) += \ gvt/fb_decoder.o \ gvt/firmware.o \ gvt/gtt.o \ - gvt/gvt.o \ gvt/handlers.o \ gvt/interrupt.o \ gvt/kvmgt.o \ diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c deleted file mode 100644 index 047fb6c41788..000000000000 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * Authors: - * Kevin Tian - * Eddie Dong - * - * Contributors: - * Niu Bing - * Zhi Wang - * - */ - -#include -#include - -#include "i915_drv.h" -#include "intel_gvt.h" -#include "gvt.h" -#include -#include - -static void init_device_info(struct intel_gvt *gvt) -{ - struct intel_gvt_device_info *info = &gvt->device_info; - struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); - - info->max_support_vgpus = 8; - info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; - info->mmio_size = 2 * 1024 * 1024; - info->mmio_bar = 0; - info->gtt_start_offset = 8 * 1024 * 1024; - info->gtt_entry_size = 8; - info->gtt_entry_size_shift = 3; - info->gmadr_bytes_in_cmd = 8; - info->max_surface_size = 36 * 1024 * 1024; - info->msi_cap_offset = pdev->msi_cap; -} - -static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt) -{ - struct intel_vgpu *vgpu; - int id; - - mutex_lock(&gvt->lock); - idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) { - if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id, - (void *)&gvt->service_request)) { - if (vgpu->active) - intel_vgpu_emulate_vblank(vgpu); - } - } - mutex_unlock(&gvt->lock); -} - -static int gvt_service_thread(void *data) -{ - struct intel_gvt *gvt = (struct intel_gvt *)data; - int ret; - - gvt_dbg_core("service thread start\n"); - - while (!kthread_should_stop()) { - ret = wait_event_interruptible(gvt->service_thread_wq, - kthread_should_stop() || gvt->service_request); - - if (kthread_should_stop()) - break; - - if (WARN_ONCE(ret, "service thread is waken up by signal.\n")) - continue; - - intel_gvt_test_and_emulate_vblank(gvt); - - if (test_bit(INTEL_GVT_REQUEST_SCHED, - (void *)&gvt->service_request) || - test_bit(INTEL_GVT_REQUEST_EVENT_SCHED, - (void *)&gvt->service_request)) { - intel_gvt_schedule(gvt); - } - } - - return 0; -} - -static void clean_service_thread(struct intel_gvt *gvt) -{ - kthread_stop(gvt->service_thread); -} - -static int init_service_thread(struct intel_gvt *gvt) -{ - init_waitqueue_head(&gvt->service_thread_wq); - - gvt->service_thread = kthread_run(gvt_service_thread, - gvt, "gvt_service_thread"); - if (IS_ERR(gvt->service_thread)) { - gvt_err("fail to start service thread.\n"); - return PTR_ERR(gvt->service_thread); - } - return 0; -} - -/** - * intel_gvt_clean_device - clean a GVT device - * @i915: i915 private - * - * This function is called at the driver unloading stage, to free the - * resources owned by a GVT device. - * - */ -static void intel_gvt_clean_device(struct drm_i915_private *i915) -{ - struct intel_gvt *gvt = fetch_and_zero(&i915->gvt); - - if (drm_WARN_ON(&i915->drm, !gvt)) - return; - - mdev_unregister_device(i915->drm.dev); - intel_gvt_cleanup_vgpu_type_groups(gvt); - intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); - intel_gvt_clean_vgpu_types(gvt); - - intel_gvt_debugfs_clean(gvt); - clean_service_thread(gvt); - intel_gvt_clean_cmd_parser(gvt); - intel_gvt_clean_sched_policy(gvt); - intel_gvt_clean_workload_scheduler(gvt); - intel_gvt_clean_gtt(gvt); - intel_gvt_free_firmware(gvt); - intel_gvt_clean_mmio_info(gvt); - idr_destroy(&gvt->vgpu_idr); - - kfree(i915->gvt); -} - -/** - * intel_gvt_init_device - initialize a GVT device - * @i915: drm i915 private data - * - * This function is called at the initialization stage, to initialize - * necessary GVT components. - * - * Returns: - * Zero on success, negative error code if failed. - * - */ -static int intel_gvt_init_device(struct drm_i915_private *i915) -{ - struct intel_gvt *gvt; - struct intel_vgpu *vgpu; - int ret; - - if (drm_WARN_ON(&i915->drm, i915->gvt)) - return -EEXIST; - - gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL); - if (!gvt) - return -ENOMEM; - - gvt_dbg_core("init gvt device\n"); - - idr_init_base(&gvt->vgpu_idr, 1); - spin_lock_init(&gvt->scheduler.mmio_context_lock); - mutex_init(&gvt->lock); - mutex_init(&gvt->sched_lock); - gvt->gt = to_gt(i915); - i915->gvt = gvt; - - init_device_info(gvt); - - ret = intel_gvt_setup_mmio_info(gvt); - if (ret) - goto out_clean_idr; - - intel_gvt_init_engine_mmio_context(gvt); - - ret = intel_gvt_load_firmware(gvt); - if (ret) - goto out_clean_mmio_info; - - ret = intel_gvt_init_irq(gvt); - if (ret) - goto out_free_firmware; - - ret = intel_gvt_init_gtt(gvt); - if (ret) - goto out_free_firmware; - - ret = intel_gvt_init_workload_scheduler(gvt); - if (ret) - goto out_clean_gtt; - - ret = intel_gvt_init_sched_policy(gvt); - if (ret) - goto out_clean_workload_scheduler; - - ret = intel_gvt_init_cmd_parser(gvt); - if (ret) - goto out_clean_sched_policy; - - ret = init_service_thread(gvt); - if (ret) - goto out_clean_cmd_parser; - - ret = intel_gvt_init_vgpu_types(gvt); - if (ret) - goto out_clean_thread; - - vgpu = intel_gvt_create_idle_vgpu(gvt); - if (IS_ERR(vgpu)) { - ret = PTR_ERR(vgpu); - gvt_err("failed to create idle vgpu\n"); - goto out_clean_types; - } - gvt->idle_vgpu = vgpu; - - intel_gvt_debugfs_init(gvt); - - ret = intel_gvt_init_vgpu_type_groups(gvt); - if (ret) - goto out_destroy_idle_vgpu; - - ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_ops); - if (ret) - goto out_cleanup_vgpu_type_groups; - - gvt_dbg_core("gvt device initialization is done\n"); - return 0; - -out_cleanup_vgpu_type_groups: - intel_gvt_cleanup_vgpu_type_groups(gvt); -out_destroy_idle_vgpu: - intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); - intel_gvt_debugfs_clean(gvt); -out_clean_types: - intel_gvt_clean_vgpu_types(gvt); -out_clean_thread: - clean_service_thread(gvt); -out_clean_cmd_parser: - intel_gvt_clean_cmd_parser(gvt); -out_clean_sched_policy: - intel_gvt_clean_sched_policy(gvt); -out_clean_workload_scheduler: - intel_gvt_clean_workload_scheduler(gvt); -out_clean_gtt: - intel_gvt_clean_gtt(gvt); -out_free_firmware: - intel_gvt_free_firmware(gvt); -out_clean_mmio_info: - intel_gvt_clean_mmio_info(gvt); -out_clean_idr: - idr_destroy(&gvt->vgpu_idr); - kfree(gvt); - i915->gvt = NULL; - return ret; -} - -static void intel_gvt_pm_resume(struct drm_i915_private *i915) -{ - struct intel_gvt *gvt = i915->gvt; - - intel_gvt_restore_fence(gvt); - intel_gvt_restore_mmio(gvt); - intel_gvt_restore_ggtt(gvt); -} - -const struct intel_vgpu_ops intel_gvt_vgpu_ops = { - .init_device = intel_gvt_init_device, - .clean_device = intel_gvt_clean_device, - .pm_resume = intel_gvt_pm_resume, -}; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index c32e8eb199f1..03ecffc2ba56 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -765,12 +765,6 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr); -int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt); -void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt); - #include "trace.h" -extern const struct intel_vgpu_ops intel_gvt_vgpu_ops; -extern const struct mdev_parent_ops intel_vgpu_mdev_ops; - #endif diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 4113f850bf92..f033031c676d 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1,7 +1,7 @@ /* * KVMGT - the implementation of Intel mediated pass-through framework for KVM * - * Copyright(c) 2014-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,6 +26,11 @@ * Kevin Tian * Jike Song * Xiaoguang Chen + * Eddie Dong + * + * Contributors: + * Niu Bing + * Zhi Wang */ #include @@ -182,7 +187,7 @@ static struct attribute_group *gvt_vgpu_type_groups[] = { [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL, }; -int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) +static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) { int i, j; struct intel_vgpu_type *type; @@ -211,7 +216,7 @@ unwind: return -ENOMEM; } -void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) +static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) { int i; struct attribute_group *group; @@ -1720,7 +1725,7 @@ static struct mdev_driver intel_vgpu_mdev_driver = { .remove = intel_vgpu_remove, }; -const struct mdev_parent_ops intel_vgpu_mdev_ops = { +static const struct mdev_parent_ops intel_vgpu_mdev_ops = { .owner = THIS_MODULE, .supported_type_groups = gvt_vgpu_type_groups, .device_driver = &intel_vgpu_mdev_driver, @@ -1925,6 +1930,257 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, mutex_unlock(&vgpu->cache_lock); } +static void init_device_info(struct intel_gvt *gvt) +{ + struct intel_gvt_device_info *info = &gvt->device_info; + struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); + + info->max_support_vgpus = 8; + info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; + info->mmio_size = 2 * 1024 * 1024; + info->mmio_bar = 0; + info->gtt_start_offset = 8 * 1024 * 1024; + info->gtt_entry_size = 8; + info->gtt_entry_size_shift = 3; + info->gmadr_bytes_in_cmd = 8; + info->max_surface_size = 36 * 1024 * 1024; + info->msi_cap_offset = pdev->msi_cap; +} + +static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + int id; + + mutex_lock(&gvt->lock); + idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) { + if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id, + (void *)&gvt->service_request)) { + if (vgpu->active) + intel_vgpu_emulate_vblank(vgpu); + } + } + mutex_unlock(&gvt->lock); +} + +static int gvt_service_thread(void *data) +{ + struct intel_gvt *gvt = (struct intel_gvt *)data; + int ret; + + gvt_dbg_core("service thread start\n"); + + while (!kthread_should_stop()) { + ret = wait_event_interruptible(gvt->service_thread_wq, + kthread_should_stop() || gvt->service_request); + + if (kthread_should_stop()) + break; + + if (WARN_ONCE(ret, "service thread is waken up by signal.\n")) + continue; + + intel_gvt_test_and_emulate_vblank(gvt); + + if (test_bit(INTEL_GVT_REQUEST_SCHED, + (void *)&gvt->service_request) || + test_bit(INTEL_GVT_REQUEST_EVENT_SCHED, + (void *)&gvt->service_request)) { + intel_gvt_schedule(gvt); + } + } + + return 0; +} + +static void clean_service_thread(struct intel_gvt *gvt) +{ + kthread_stop(gvt->service_thread); +} + +static int init_service_thread(struct intel_gvt *gvt) +{ + init_waitqueue_head(&gvt->service_thread_wq); + + gvt->service_thread = kthread_run(gvt_service_thread, + gvt, "gvt_service_thread"); + if (IS_ERR(gvt->service_thread)) { + gvt_err("fail to start service thread.\n"); + return PTR_ERR(gvt->service_thread); + } + return 0; +} + +/** + * intel_gvt_clean_device - clean a GVT device + * @i915: i915 private + * + * This function is called at the driver unloading stage, to free the + * resources owned by a GVT device. + * + */ +static void intel_gvt_clean_device(struct drm_i915_private *i915) +{ + struct intel_gvt *gvt = fetch_and_zero(&i915->gvt); + + if (drm_WARN_ON(&i915->drm, !gvt)) + return; + + mdev_unregister_device(i915->drm.dev); + intel_gvt_cleanup_vgpu_type_groups(gvt); + intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); + intel_gvt_clean_vgpu_types(gvt); + + intel_gvt_debugfs_clean(gvt); + clean_service_thread(gvt); + intel_gvt_clean_cmd_parser(gvt); + intel_gvt_clean_sched_policy(gvt); + intel_gvt_clean_workload_scheduler(gvt); + intel_gvt_clean_gtt(gvt); + intel_gvt_free_firmware(gvt); + intel_gvt_clean_mmio_info(gvt); + idr_destroy(&gvt->vgpu_idr); + + kfree(i915->gvt); +} + +/** + * intel_gvt_init_device - initialize a GVT device + * @i915: drm i915 private data + * + * This function is called at the initialization stage, to initialize + * necessary GVT components. + * + * Returns: + * Zero on success, negative error code if failed. + * + */ +static int intel_gvt_init_device(struct drm_i915_private *i915) +{ + struct intel_gvt *gvt; + struct intel_vgpu *vgpu; + int ret; + + if (drm_WARN_ON(&i915->drm, i915->gvt)) + return -EEXIST; + + gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL); + if (!gvt) + return -ENOMEM; + + gvt_dbg_core("init gvt device\n"); + + idr_init_base(&gvt->vgpu_idr, 1); + spin_lock_init(&gvt->scheduler.mmio_context_lock); + mutex_init(&gvt->lock); + mutex_init(&gvt->sched_lock); + gvt->gt = to_gt(i915); + i915->gvt = gvt; + + init_device_info(gvt); + + ret = intel_gvt_setup_mmio_info(gvt); + if (ret) + goto out_clean_idr; + + intel_gvt_init_engine_mmio_context(gvt); + + ret = intel_gvt_load_firmware(gvt); + if (ret) + goto out_clean_mmio_info; + + ret = intel_gvt_init_irq(gvt); + if (ret) + goto out_free_firmware; + + ret = intel_gvt_init_gtt(gvt); + if (ret) + goto out_free_firmware; + + ret = intel_gvt_init_workload_scheduler(gvt); + if (ret) + goto out_clean_gtt; + + ret = intel_gvt_init_sched_policy(gvt); + if (ret) + goto out_clean_workload_scheduler; + + ret = intel_gvt_init_cmd_parser(gvt); + if (ret) + goto out_clean_sched_policy; + + ret = init_service_thread(gvt); + if (ret) + goto out_clean_cmd_parser; + + ret = intel_gvt_init_vgpu_types(gvt); + if (ret) + goto out_clean_thread; + + vgpu = intel_gvt_create_idle_vgpu(gvt); + if (IS_ERR(vgpu)) { + ret = PTR_ERR(vgpu); + gvt_err("failed to create idle vgpu\n"); + goto out_clean_types; + } + gvt->idle_vgpu = vgpu; + + intel_gvt_debugfs_init(gvt); + + ret = intel_gvt_init_vgpu_type_groups(gvt); + if (ret) + goto out_destroy_idle_vgpu; + + ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_ops); + if (ret) + goto out_cleanup_vgpu_type_groups; + + gvt_dbg_core("gvt device initialization is done\n"); + return 0; + +out_cleanup_vgpu_type_groups: + intel_gvt_cleanup_vgpu_type_groups(gvt); +out_destroy_idle_vgpu: + intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); + intel_gvt_debugfs_clean(gvt); +out_clean_types: + intel_gvt_clean_vgpu_types(gvt); +out_clean_thread: + clean_service_thread(gvt); +out_clean_cmd_parser: + intel_gvt_clean_cmd_parser(gvt); +out_clean_sched_policy: + intel_gvt_clean_sched_policy(gvt); +out_clean_workload_scheduler: + intel_gvt_clean_workload_scheduler(gvt); +out_clean_gtt: + intel_gvt_clean_gtt(gvt); +out_free_firmware: + intel_gvt_free_firmware(gvt); +out_clean_mmio_info: + intel_gvt_clean_mmio_info(gvt); +out_clean_idr: + idr_destroy(&gvt->vgpu_idr); + kfree(gvt); + i915->gvt = NULL; + return ret; +} + +static void intel_gvt_pm_resume(struct drm_i915_private *i915) +{ + struct intel_gvt *gvt = i915->gvt; + + intel_gvt_restore_fence(gvt); + intel_gvt_restore_mmio(gvt); + intel_gvt_restore_ggtt(gvt); +} + +static const struct intel_vgpu_ops intel_gvt_vgpu_ops = { + .init_device = intel_gvt_init_device, + .clean_device = intel_gvt_clean_device, + .pm_resume = intel_gvt_pm_resume, +}; + static int __init kvmgt_init(void) { int ret; From 6c7f98b334a32df5cac8abac8983ac4ce17cab57 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:13:59 +0200 Subject: [PATCH 171/219] vfio/mdev: Remove vfio_mdev.c Now that all mdev drivers directly create their own mdev_device driver and directly register with the vfio core's vfio_device_ops this is all dead code. Delete vfio_mdev.c and the mdev_parent_ops members that are connected to it. Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-31-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- .../driver-api/vfio-mediated-device.rst | 3 - drivers/vfio/mdev/Makefile | 2 +- drivers/vfio/mdev/mdev_core.c | 40 +---- drivers/vfio/mdev/mdev_driver.c | 10 -- drivers/vfio/mdev/mdev_private.h | 2 - drivers/vfio/mdev/vfio_mdev.c | 152 ------------------ include/linux/mdev.h | 48 +----- 7 files changed, 6 insertions(+), 251 deletions(-) delete mode 100644 drivers/vfio/mdev/vfio_mdev.c diff --git a/Documentation/driver-api/vfio-mediated-device.rst b/Documentation/driver-api/vfio-mediated-device.rst index 9f26079cacae..5a6e18a651a1 100644 --- a/Documentation/driver-api/vfio-mediated-device.rst +++ b/Documentation/driver-api/vfio-mediated-device.rst @@ -138,9 +138,6 @@ The structures in the mdev_parent_ops structure are as follows: * supported_config: attributes to define supported configurations * device_driver: device driver to bind for mediated device instances -The mdev_parent_ops also still has various functions pointers. Theses exist -for historical reasons only and shall not be used for new drivers. - When a driver wants to add the GUID creation sysfs to an existing device it has probe'd to then it should call:: diff --git a/drivers/vfio/mdev/Makefile b/drivers/vfio/mdev/Makefile index ff9ecd802125..7c236ba1b90e 100644 --- a/drivers/vfio/mdev/Makefile +++ b/drivers/vfio/mdev/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o vfio_mdev.o +mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o obj-$(CONFIG_VFIO_MDEV) += mdev.o diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index b314101237fe..8b1e86b9e8bc 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -89,17 +89,10 @@ void mdev_release_parent(struct kref *kref) static void mdev_device_remove_common(struct mdev_device *mdev) { struct mdev_parent *parent = mdev->type->parent; - int ret; mdev_remove_sysfs_files(mdev); device_del(&mdev->dev); lockdep_assert_held(&parent->unreg_sem); - if (parent->ops->remove) { - ret = parent->ops->remove(mdev); - if (ret) - dev_err(&mdev->dev, "Remove failed: err=%d\n", ret); - } - /* Balances with device_initialize() */ put_device(&mdev->dev); } @@ -131,7 +124,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) /* check for mandatory ops */ if (!ops || !ops->supported_type_groups) return -EINVAL; - if (!ops->device_driver && (!ops->create || !ops->remove)) + if (!ops->device_driver) return -EINVAL; dev = get_device(dev); @@ -297,18 +290,10 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) goto out_put_device; } - if (parent->ops->create) { - ret = parent->ops->create(mdev); - if (ret) - goto out_unlock; - } - ret = device_add(&mdev->dev); if (ret) - goto out_remove; + goto out_unlock; - if (!drv) - drv = &vfio_mdev_driver; ret = device_driver_attach(&drv->driver, &mdev->dev); if (ret) goto out_del; @@ -325,9 +310,6 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) out_del: device_del(&mdev->dev); -out_remove: - if (parent->ops->remove) - parent->ops->remove(mdev); out_unlock: up_read(&parent->unreg_sem); out_put_device: @@ -370,28 +352,14 @@ int mdev_device_remove(struct mdev_device *mdev) static int __init mdev_init(void) { - int rc; - - rc = mdev_bus_register(); - if (rc) - return rc; - rc = mdev_register_driver(&vfio_mdev_driver); - if (rc) - goto err_bus; - return 0; -err_bus: - mdev_bus_unregister(); - return rc; + return bus_register(&mdev_bus_type); } static void __exit mdev_exit(void) { - mdev_unregister_driver(&vfio_mdev_driver); - if (mdev_bus_compat_class) class_compat_unregister(mdev_bus_compat_class); - - mdev_bus_unregister(); + bus_unregister(&mdev_bus_type); } subsys_initcall(mdev_init) diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c index 7927ed4f1711..9c2af59809e2 100644 --- a/drivers/vfio/mdev/mdev_driver.c +++ b/drivers/vfio/mdev/mdev_driver.c @@ -74,13 +74,3 @@ void mdev_unregister_driver(struct mdev_driver *drv) driver_unregister(&drv->driver); } EXPORT_SYMBOL(mdev_unregister_driver); - -int mdev_bus_register(void) -{ - return bus_register(&mdev_bus_type); -} - -void mdev_bus_unregister(void) -{ - bus_unregister(&mdev_bus_type); -} diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index afbad7b0a14a..6999c89db7b1 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -37,8 +37,6 @@ struct mdev_type { #define to_mdev_type(_kobj) \ container_of(_kobj, struct mdev_type, kobj) -extern struct mdev_driver vfio_mdev_driver; - int parent_create_sysfs_files(struct mdev_parent *parent); void parent_remove_sysfs_files(struct mdev_parent *parent); diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c deleted file mode 100644 index a90e24b0c851..000000000000 --- a/drivers/vfio/mdev/vfio_mdev.c +++ /dev/null @@ -1,152 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * VFIO based driver for Mediated device - * - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * Author: Neo Jia - * Kirti Wankhede - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "mdev_private.h" - -static int vfio_mdev_open_device(struct vfio_device *core_vdev) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->open_device)) - return 0; - - return parent->ops->open_device(mdev); -} - -static void vfio_mdev_close_device(struct vfio_device *core_vdev) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (likely(parent->ops->close_device)) - parent->ops->close_device(mdev); -} - -static long vfio_mdev_unlocked_ioctl(struct vfio_device *core_vdev, - unsigned int cmd, unsigned long arg) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->ioctl)) - return 0; - - return parent->ops->ioctl(mdev, cmd, arg); -} - -static ssize_t vfio_mdev_read(struct vfio_device *core_vdev, char __user *buf, - size_t count, loff_t *ppos) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->read)) - return -EINVAL; - - return parent->ops->read(mdev, buf, count, ppos); -} - -static ssize_t vfio_mdev_write(struct vfio_device *core_vdev, - const char __user *buf, size_t count, - loff_t *ppos) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->write)) - return -EINVAL; - - return parent->ops->write(mdev, buf, count, ppos); -} - -static int vfio_mdev_mmap(struct vfio_device *core_vdev, - struct vm_area_struct *vma) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->mmap)) - return -EINVAL; - - return parent->ops->mmap(mdev, vma); -} - -static void vfio_mdev_request(struct vfio_device *core_vdev, unsigned int count) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (parent->ops->request) - parent->ops->request(mdev, count); - else if (count == 0) - dev_notice(mdev_dev(mdev), - "No mdev vendor driver request callback support, blocked until released by user\n"); -} - -static const struct vfio_device_ops vfio_mdev_dev_ops = { - .name = "vfio-mdev", - .open_device = vfio_mdev_open_device, - .close_device = vfio_mdev_close_device, - .ioctl = vfio_mdev_unlocked_ioctl, - .read = vfio_mdev_read, - .write = vfio_mdev_write, - .mmap = vfio_mdev_mmap, - .request = vfio_mdev_request, -}; - -static int vfio_mdev_probe(struct mdev_device *mdev) -{ - struct vfio_device *vdev; - int ret; - - vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); - if (!vdev) - return -ENOMEM; - - vfio_init_group_dev(vdev, &mdev->dev, &vfio_mdev_dev_ops); - ret = vfio_register_emulated_iommu_dev(vdev); - if (ret) - goto out_uninit; - - dev_set_drvdata(&mdev->dev, vdev); - return 0; - -out_uninit: - vfio_uninit_group_dev(vdev); - kfree(vdev); - return ret; -} - -static void vfio_mdev_remove(struct mdev_device *mdev) -{ - struct vfio_device *vdev = dev_get_drvdata(&mdev->dev); - - vfio_unregister_group_dev(vdev); - vfio_uninit_group_dev(vdev); - kfree(vdev); -} - -struct mdev_driver vfio_mdev_driver = { - .driver = { - .name = "vfio_mdev", - .owner = THIS_MODULE, - .mod_name = KBUILD_MODNAME, - }, - .probe = vfio_mdev_probe, - .remove = vfio_mdev_remove, -}; diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 15d03f6532d0..192aed656116 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -40,40 +40,7 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); * @mdev_attr_groups: Attributes of the mediated device. * @supported_type_groups: Attributes to define supported types. It is mandatory * to provide supported types. - * @create: Called to allocate basic resources in parent device's - * driver for a particular mediated device. It is - * mandatory to provide create ops. - * @mdev: mdev_device structure on of mediated device - * that is being created - * Returns integer: success (0) or error (< 0) - * @remove: Called to free resources in parent device's driver for - * a mediated device. It is mandatory to provide 'remove' - * ops. - * @mdev: mdev_device device structure which is being - * destroyed - * Returns integer: success (0) or error (< 0) - * @read: Read emulation callback - * @mdev: mediated device structure - * @buf: read buffer - * @count: number of bytes to read - * @ppos: address. - * Retuns number on bytes read on success or error. - * @write: Write emulation callback - * @mdev: mediated device structure - * @buf: write buffer - * @count: number of bytes to be written - * @ppos: address. - * Retuns number on bytes written on success or error. - * @ioctl: IOCTL callback - * @mdev: mediated device structure - * @cmd: ioctl command - * @arg: arguments to ioctl - * @mmap: mmap callback - * @mdev: mediated device structure - * @vma: vma structure - * @request: request callback to release device - * @mdev: mediated device structure - * @count: request sequence number + * * Parent device that support mediated device should be registered with mdev * module with mdev_parent_ops structure. **/ @@ -83,19 +50,6 @@ struct mdev_parent_ops { const struct attribute_group **dev_attr_groups; const struct attribute_group **mdev_attr_groups; struct attribute_group **supported_type_groups; - - int (*create)(struct mdev_device *mdev); - int (*remove)(struct mdev_device *mdev); - int (*open_device)(struct mdev_device *mdev); - void (*close_device)(struct mdev_device *mdev); - ssize_t (*read)(struct mdev_device *mdev, char __user *buf, - size_t count, loff_t *ppos); - ssize_t (*write)(struct mdev_device *mdev, const char __user *buf, - size_t count, loff_t *ppos); - long (*ioctl)(struct mdev_device *mdev, unsigned int cmd, - unsigned long arg); - int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma); - void (*request)(struct mdev_device *mdev, unsigned int count); }; /* interface for exporting mdev supported type attributes */ From e6486939d8ea22569af942a1888aa3824f59cc5e Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:14:00 +0200 Subject: [PATCH 172/219] vfio/mdev: Remove mdev_parent_ops dev_attr_groups This is only used by one sample to print a fixed string that is pointless. In general, having a device driver attach sysfs attributes to the parent is horrific. This should never happen, and always leads to some kind of liftime bug as it become very difficult for the sysfs attribute to go back to any data owned by the device driver. Remove the general mechanism to create this abuse. Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-32-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- drivers/vfio/mdev/mdev_sysfs.c | 12 ++---------- include/linux/mdev.h | 2 -- samples/vfio-mdev/mtty.c | 30 +----------------------------- 3 files changed, 3 insertions(+), 41 deletions(-) diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index f5cf1931c54e..66eef08833a4 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -197,7 +197,6 @@ void parent_remove_sysfs_files(struct mdev_parent *parent) remove_mdev_supported_type(type); } - sysfs_remove_groups(&parent->dev->kobj, parent->ops->dev_attr_groups); kset_unregister(parent->mdev_types_kset); } @@ -213,17 +212,10 @@ int parent_create_sysfs_files(struct mdev_parent *parent) INIT_LIST_HEAD(&parent->type_list); - ret = sysfs_create_groups(&parent->dev->kobj, - parent->ops->dev_attr_groups); - if (ret) - goto create_err; - ret = add_mdev_supported_type_groups(parent); if (ret) - sysfs_remove_groups(&parent->dev->kobj, - parent->ops->dev_attr_groups); - else - return ret; + goto create_err; + return 0; create_err: kset_unregister(parent->mdev_types_kset); diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 192aed656116..69df1fa2cb6f 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -36,7 +36,6 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); * * @owner: The module owner. * @device_driver: Which device driver to probe() on newly created devices - * @dev_attr_groups: Attributes of the parent device. * @mdev_attr_groups: Attributes of the mediated device. * @supported_type_groups: Attributes to define supported types. It is mandatory * to provide supported types. @@ -47,7 +46,6 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); struct mdev_parent_ops { struct module *owner; struct mdev_driver *device_driver; - const struct attribute_group **dev_attr_groups; const struct attribute_group **mdev_attr_groups; struct attribute_group **supported_type_groups; }; diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c index a0e1a469bd47..4f227dc26785 100644 --- a/samples/vfio-mdev/mtty.c +++ b/samples/vfio-mdev/mtty.c @@ -1207,38 +1207,11 @@ static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd, return -ENOTTY; } -static ssize_t -sample_mtty_dev_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "This is phy device\n"); -} - -static DEVICE_ATTR_RO(sample_mtty_dev); - -static struct attribute *mtty_dev_attrs[] = { - &dev_attr_sample_mtty_dev.attr, - NULL, -}; - -static const struct attribute_group mtty_dev_group = { - .name = "mtty_dev", - .attrs = mtty_dev_attrs, -}; - -static const struct attribute_group *mtty_dev_groups[] = { - &mtty_dev_group, - NULL, -}; - static ssize_t sample_mdev_dev_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (mdev_from_dev(dev)) - return sprintf(buf, "This is MDEV %s\n", dev_name(dev)); - - return sprintf(buf, "\n"); + return sprintf(buf, "This is MDEV %s\n", dev_name(dev)); } static DEVICE_ATTR_RO(sample_mdev_dev); @@ -1333,7 +1306,6 @@ static struct mdev_driver mtty_driver = { static const struct mdev_parent_ops mdev_fops = { .owner = THIS_MODULE, .device_driver = &mtty_driver, - .dev_attr_groups = mtty_dev_groups, .supported_type_groups = mdev_type_groups, }; From 6b42f491e17ce13f5ff7f2d1f49c73a0f4c47b20 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:14:01 +0200 Subject: [PATCH 173/219] vfio/mdev: Remove mdev_parent_ops The last useful member in this struct is the supported_type_groups, move it to the mdev_driver and delete mdev_parent_ops. Replace it with mdev_driver as an argument to mdev_register_device() Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-33-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- .../driver-api/vfio-mediated-device.rst | 24 ++++-------------- drivers/gpu/drm/i915/gvt/kvmgt.c | 7 +----- drivers/s390/cio/vfio_ccw_ops.c | 7 +----- drivers/s390/crypto/vfio_ap_ops.c | 9 ++----- drivers/vfio/mdev/mdev_core.c | 13 ++++------ drivers/vfio/mdev/mdev_private.h | 2 +- drivers/vfio/mdev/mdev_sysfs.c | 6 ++--- include/linux/mdev.h | 25 +++---------------- samples/vfio-mdev/mbochs.c | 9 ++----- samples/vfio-mdev/mdpy.c | 9 ++----- samples/vfio-mdev/mtty.c | 9 ++----- 11 files changed, 28 insertions(+), 92 deletions(-) diff --git a/Documentation/driver-api/vfio-mediated-device.rst b/Documentation/driver-api/vfio-mediated-device.rst index 5a6e18a651a1..784bbeb22adc 100644 --- a/Documentation/driver-api/vfio-mediated-device.rst +++ b/Documentation/driver-api/vfio-mediated-device.rst @@ -105,6 +105,7 @@ structure to represent a mediated device's driver:: struct mdev_driver { int (*probe) (struct mdev_device *dev); void (*remove) (struct mdev_device *dev); + struct attribute_group **supported_type_groups; struct device_driver driver; }; @@ -119,30 +120,15 @@ to register and unregister itself with the core driver: extern void mdev_unregister_driver(struct mdev_driver *drv); -The mediated bus driver is responsible for adding mediated devices to the VFIO -group when devices are bound to the driver and removing mediated devices from -the VFIO when devices are unbound from the driver. - - -Physical Device Driver Interface --------------------------------- - -The physical device driver interface provides the mdev_parent_ops[3] structure -to define the APIs to manage work in the mediated core driver that is related -to the physical device. - -The structures in the mdev_parent_ops structure are as follows: - -* dev_attr_groups: attributes of the parent device -* mdev_attr_groups: attributes of the mediated device -* supported_config: attributes to define supported configurations -* device_driver: device driver to bind for mediated device instances +The mediated bus driver's probe function should create a vfio_device on top of +the mdev_device and connect it to an appropriate implementation of +vfio_device_ops. When a driver wants to add the GUID creation sysfs to an existing device it has probe'd to then it should call:: extern int mdev_register_device(struct device *dev, - const struct mdev_parent_ops *ops); + struct mdev_driver *mdev_driver); This will provide the 'mdev_supported_types/XX/create' files which can then be used to trigger the creation of a mdev_device. The created mdev_device will be diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index f033031c676d..0787ba5c301f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1723,12 +1723,7 @@ static struct mdev_driver intel_vgpu_mdev_driver = { }, .probe = intel_vgpu_probe, .remove = intel_vgpu_remove, -}; - -static const struct mdev_parent_ops intel_vgpu_mdev_ops = { - .owner = THIS_MODULE, .supported_type_groups = gvt_vgpu_type_groups, - .device_driver = &intel_vgpu_mdev_driver, }; int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) @@ -2131,7 +2126,7 @@ static int intel_gvt_init_device(struct drm_i915_private *i915) if (ret) goto out_destroy_idle_vgpu; - ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_ops); + ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver); if (ret) goto out_cleanup_vgpu_type_groups; diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index d8589afac272..c4d60cdbf247 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -656,17 +656,12 @@ struct mdev_driver vfio_ccw_mdev_driver = { }, .probe = vfio_ccw_mdev_probe, .remove = vfio_ccw_mdev_remove, -}; - -static const struct mdev_parent_ops vfio_ccw_mdev_ops = { - .owner = THIS_MODULE, - .device_driver = &vfio_ccw_mdev_driver, .supported_type_groups = mdev_type_groups, }; int vfio_ccw_mdev_reg(struct subchannel *sch) { - return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops); + return mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver); } void vfio_ccw_mdev_unreg(struct subchannel *sch) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 6e08d04b605d..ee0a3bf8f476 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -1496,12 +1496,7 @@ static struct mdev_driver vfio_ap_matrix_driver = { }, .probe = vfio_ap_mdev_probe, .remove = vfio_ap_mdev_remove, -}; - -static const struct mdev_parent_ops vfio_ap_matrix_ops = { - .owner = THIS_MODULE, - .device_driver = &vfio_ap_matrix_driver, - .supported_type_groups = vfio_ap_mdev_type_groups, + .supported_type_groups = vfio_ap_mdev_type_groups, }; int vfio_ap_mdev_register(void) @@ -1514,7 +1509,7 @@ int vfio_ap_mdev_register(void) if (ret) return ret; - ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops); + ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_driver); if (ret) goto err_driver; return 0; diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index 8b1e86b9e8bc..19a20ff420b7 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -109,12 +109,12 @@ static int mdev_device_remove_cb(struct device *dev, void *data) /* * mdev_register_device : Register a device * @dev: device structure representing parent device. - * @ops: Parent device operation structure to be registered. + * @mdev_driver: Device driver to bind to the newly created mdev * * Add device to list of registered parent devices. * Returns a negative value on error, otherwise 0. */ -int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) +int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver) { int ret; struct mdev_parent *parent; @@ -122,9 +122,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) char *envp[] = { env_string, NULL }; /* check for mandatory ops */ - if (!ops || !ops->supported_type_groups) - return -EINVAL; - if (!ops->device_driver) + if (!mdev_driver->supported_type_groups) return -EINVAL; dev = get_device(dev); @@ -151,7 +149,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) init_rwsem(&parent->unreg_sem); parent->dev = dev; - parent->ops = ops; + parent->mdev_driver = mdev_driver; if (!mdev_bus_compat_class) { mdev_bus_compat_class = class_compat_register("mdev_bus"); @@ -249,7 +247,7 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) int ret; struct mdev_device *mdev, *tmp; struct mdev_parent *parent = type->parent; - struct mdev_driver *drv = parent->ops->device_driver; + struct mdev_driver *drv = parent->mdev_driver; mutex_lock(&mdev_list_lock); @@ -271,7 +269,6 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) mdev->dev.parent = parent->dev; mdev->dev.bus = &mdev_bus_type; mdev->dev.release = mdev_device_release; - mdev->dev.groups = parent->ops->mdev_attr_groups; mdev->type = type; /* Pairs with the put in mdev_device_release() */ kobject_get(&type->kobj); diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index 6999c89db7b1..5a7ffd4e770f 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -15,7 +15,7 @@ void mdev_bus_unregister(void); struct mdev_parent { struct device *dev; - const struct mdev_parent_ops *ops; + struct mdev_driver *mdev_driver; struct kref ref; struct list_head next; struct kset *mdev_types_kset; diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index 66eef08833a4..5a3873d1a275 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -97,7 +97,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, { struct mdev_type *type; struct attribute_group *group = - parent->ops->supported_type_groups[type_group_id]; + parent->mdev_driver->supported_type_groups[type_group_id]; int ret; if (!group->name) { @@ -154,7 +154,7 @@ attr_create_failed: static void remove_mdev_supported_type(struct mdev_type *type) { struct attribute_group *group = - type->parent->ops->supported_type_groups[type->type_group_id]; + type->parent->mdev_driver->supported_type_groups[type->type_group_id]; sysfs_remove_files(&type->kobj, (const struct attribute **)group->attrs); @@ -168,7 +168,7 @@ static int add_mdev_supported_type_groups(struct mdev_parent *parent) { int i; - for (i = 0; parent->ops->supported_type_groups[i]; i++) { + for (i = 0; parent->mdev_driver->supported_type_groups[i]; i++) { struct mdev_type *type; type = add_mdev_supported_type(parent, i); diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 69df1fa2cb6f..1f6f57a3c316 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -30,26 +30,6 @@ unsigned int mdev_get_type_group_id(struct mdev_device *mdev); unsigned int mtype_get_type_group_id(struct mdev_type *mtype); struct device *mtype_get_parent_dev(struct mdev_type *mtype); -/** - * struct mdev_parent_ops - Structure to be registered for each parent device to - * register the device to mdev module. - * - * @owner: The module owner. - * @device_driver: Which device driver to probe() on newly created devices - * @mdev_attr_groups: Attributes of the mediated device. - * @supported_type_groups: Attributes to define supported types. It is mandatory - * to provide supported types. - * - * Parent device that support mediated device should be registered with mdev - * module with mdev_parent_ops structure. - **/ -struct mdev_parent_ops { - struct module *owner; - struct mdev_driver *device_driver; - const struct attribute_group **mdev_attr_groups; - struct attribute_group **supported_type_groups; -}; - /* interface for exporting mdev supported type attributes */ struct mdev_type_attribute { struct attribute attr; @@ -74,12 +54,15 @@ struct mdev_type_attribute mdev_type_attr_##_name = \ * struct mdev_driver - Mediated device driver * @probe: called when new device created * @remove: called when device removed + * @supported_type_groups: Attributes to define supported types. It is mandatory + * to provide supported types. * @driver: device driver structure * **/ struct mdev_driver { int (*probe)(struct mdev_device *dev); void (*remove)(struct mdev_device *dev); + struct attribute_group **supported_type_groups; struct device_driver driver; }; @@ -98,7 +81,7 @@ static inline const guid_t *mdev_uuid(struct mdev_device *mdev) extern struct bus_type mdev_bus_type; -int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops); +int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver); void mdev_unregister_device(struct device *dev); int mdev_register_driver(struct mdev_driver *drv); diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index e90c8552cc31..344c2901a82b 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c @@ -1412,12 +1412,7 @@ static struct mdev_driver mbochs_driver = { }, .probe = mbochs_probe, .remove = mbochs_remove, -}; - -static const struct mdev_parent_ops mdev_fops = { - .owner = THIS_MODULE, - .device_driver = &mbochs_driver, - .supported_type_groups = mdev_type_groups, + .supported_type_groups = mdev_type_groups, }; static const struct file_operations vd_fops = { @@ -1462,7 +1457,7 @@ static int __init mbochs_dev_init(void) if (ret) goto err_class; - ret = mdev_register_device(&mbochs_dev, &mdev_fops); + ret = mdev_register_device(&mbochs_dev, &mbochs_driver); if (ret) goto err_device; diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c index fe5d43e797b6..e8c46eb2e246 100644 --- a/samples/vfio-mdev/mdpy.c +++ b/samples/vfio-mdev/mdpy.c @@ -723,12 +723,7 @@ static struct mdev_driver mdpy_driver = { }, .probe = mdpy_probe, .remove = mdpy_remove, -}; - -static const struct mdev_parent_ops mdev_fops = { - .owner = THIS_MODULE, - .device_driver = &mdpy_driver, - .supported_type_groups = mdev_type_groups, + .supported_type_groups = mdev_type_groups, }; static const struct file_operations vd_fops = { @@ -771,7 +766,7 @@ static int __init mdpy_dev_init(void) if (ret) goto err_class; - ret = mdev_register_device(&mdpy_dev, &mdev_fops); + ret = mdev_register_device(&mdpy_dev, &mdpy_driver); if (ret) goto err_device; diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c index 4f227dc26785..f42a59ed2e3f 100644 --- a/samples/vfio-mdev/mtty.c +++ b/samples/vfio-mdev/mtty.c @@ -1301,12 +1301,7 @@ static struct mdev_driver mtty_driver = { }, .probe = mtty_probe, .remove = mtty_remove, -}; - -static const struct mdev_parent_ops mdev_fops = { - .owner = THIS_MODULE, - .device_driver = &mtty_driver, - .supported_type_groups = mdev_type_groups, + .supported_type_groups = mdev_type_groups, }; static void mtty_device_release(struct device *dev) @@ -1357,7 +1352,7 @@ static int __init mtty_dev_init(void) if (ret) goto err_class; - ret = mdev_register_device(&mtty_dev.dev, &mdev_fops); + ret = mdev_register_device(&mtty_dev.dev, &mtty_driver); if (ret) goto err_device; return 0; From 2aa72ec97ce9eb092cb69efd26f5eb2469e61734 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:14:02 +0200 Subject: [PATCH 174/219] vfio/mdev: Use the driver core to create the 'remove' file The device creator is supposed to use the dev.groups value to add sysfs files before device_add is called, not call sysfs_create_files() after device_add() returns. This creates a race with uevent delivery where the extra attribute will not be visible. This was being done because the groups had been co-opted by the mdev driver, now that prior patches have moved the driver's groups to the struct device_driver the dev.group is properly free for use here. Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-34-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- drivers/vfio/mdev/mdev_core.c | 1 + drivers/vfio/mdev/mdev_private.h | 2 ++ drivers/vfio/mdev/mdev_sysfs.c | 19 ++++++++++--------- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index 19a20ff420b7..b8b9e7911e55 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -269,6 +269,7 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) mdev->dev.parent = parent->dev; mdev->dev.bus = &mdev_bus_type; mdev->dev.release = mdev_device_release; + mdev->dev.groups = mdev_device_groups; mdev->type = type; /* Pairs with the put in mdev_device_release() */ kobject_get(&type->kobj); diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index 5a7ffd4e770f..7c9fc79f3d83 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -32,6 +32,8 @@ struct mdev_type { unsigned int type_group_id; }; +extern const struct attribute_group *mdev_device_groups[]; + #define to_mdev_type_attr(_attr) \ container_of(_attr, struct mdev_type_attribute, attr) #define to_mdev_type(_kobj) \ diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index 5a3873d1a275..0ccfeb3dda24 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -244,11 +244,20 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_WO(remove); -static const struct attribute *mdev_device_attrs[] = { +static struct attribute *mdev_device_attrs[] = { &dev_attr_remove.attr, NULL, }; +static const struct attribute_group mdev_device_group = { + .attrs = mdev_device_attrs, +}; + +const struct attribute_group *mdev_device_groups[] = { + &mdev_device_group, + NULL +}; + int mdev_create_sysfs_files(struct mdev_device *mdev) { struct mdev_type *type = mdev->type; @@ -262,15 +271,8 @@ int mdev_create_sysfs_files(struct mdev_device *mdev) ret = sysfs_create_link(kobj, &type->kobj, "mdev_type"); if (ret) goto type_link_failed; - - ret = sysfs_create_files(kobj, mdev_device_attrs); - if (ret) - goto create_files_failed; - return ret; -create_files_failed: - sysfs_remove_link(kobj, "mdev_type"); type_link_failed: sysfs_remove_link(mdev->type->devices_kobj, dev_name(&mdev->dev)); return ret; @@ -280,7 +282,6 @@ void mdev_remove_sysfs_files(struct mdev_device *mdev) { struct kobject *kobj = &mdev->dev.kobj; - sysfs_remove_files(kobj, mdev_device_attrs); sysfs_remove_link(kobj, "mdev_type"); sysfs_remove_link(mdev->type->devices_kobj, dev_name(&mdev->dev)); } From 2917f53113be3b7a0f374e02cebe6d6b749366b5 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:14:03 +0200 Subject: [PATCH 175/219] vfio/mdev: Remove mdev drvdata This is no longer used, remove it. All usages were moved over to either use container_of() from a vfio_device or to use dev_drvdata() directly on the mdev. Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-35-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- include/linux/mdev.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 1f6f57a3c316..bb539794f54a 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -15,7 +15,6 @@ struct mdev_type; struct mdev_device { struct device dev; guid_t uuid; - void *driver_data; struct list_head next; struct mdev_type *type; bool active; @@ -66,14 +65,6 @@ struct mdev_driver { struct device_driver driver; }; -static inline void *mdev_get_drvdata(struct mdev_device *mdev) -{ - return mdev->driver_data; -} -static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data) -{ - mdev->driver_data = data; -} static inline const guid_t *mdev_uuid(struct mdev_device *mdev) { return &mdev->uuid; From b4b157577cb1de13bee8bebc3576f1de6799a921 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jouni=20H=C3=B6gander?= Date: Wed, 13 Apr 2022 11:28:26 +0300 Subject: [PATCH 176/219] drm/i915: Check EDID for HDR static metadata when choosing blc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have now seen panel (XMG Core 15 e21 laptop) advertizing support for Intel proprietary eDP backlight control via DPCD registers, but actually working only with legacy pwm control. This patch adds panel EDID check for possible HDR static metadata and Intel proprietary eDP backlight control is used only if that exists. Missing HDR static metadata is ignored if user specifically asks for Intel proprietary eDP backlight control via enable_dpcd_backlight parameter. v2 : - Ignore missing HDR static metadata if Intel proprietary eDP backlight control is forced via i915.enable_dpcd_backlight - Printout info message if panel is missing HDR static metadata and support for Intel proprietary eDP backlight control is detected Fixes: 4a8d79901d5b ("drm/i915/dp: Enable Intel's HDR backlight interface (only SDR for now)") Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5284 Cc: Lyude Paul Cc: Mika Kahola Cc: Jani Nikula Cc: Filippo Falezza Cc: stable@vger.kernel.org Signed-off-by: Jouni Högander Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220413082826.120634-1-jouni.hogander@intel.com Reviewed-by: Lyude Paul --- .../drm/i915/display/intel_dp_aux_backlight.c | 34 ++++++++++++++----- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 97cf3cac0105..fb6cf30ee628 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -97,6 +97,14 @@ #define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359 +enum intel_dp_aux_backlight_modparam { + INTEL_DP_AUX_BACKLIGHT_AUTO = -1, + INTEL_DP_AUX_BACKLIGHT_OFF = 0, + INTEL_DP_AUX_BACKLIGHT_ON = 1, + INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2, + INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3, +}; + /* Intel EDP backlight callbacks */ static bool intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) @@ -126,6 +134,24 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) return false; } + /* + * If we don't have HDR static metadata there is no way to + * runtime detect used range for nits based control. For now + * do not use Intel proprietary eDP backlight control if we + * don't have this data in panel EDID. In case we find panel + * which supports only nits based control, but doesn't provide + * HDR static metadata we need to start maintaining table of + * ranges for such panels. + */ + if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL && + !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type & + BIT(HDMI_STATIC_METADATA_TYPE1))) { + drm_info(&i915->drm, + "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n", + INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL); + return false; + } + panel->backlight.edp.intel.sdr_uses_aux = tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP; @@ -413,14 +439,6 @@ static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = { .get = intel_dp_aux_vesa_get_backlight, }; -enum intel_dp_aux_backlight_modparam { - INTEL_DP_AUX_BACKLIGHT_AUTO = -1, - INTEL_DP_AUX_BACKLIGHT_OFF = 0, - INTEL_DP_AUX_BACKLIGHT_ON = 1, - INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2, - INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3, -}; - int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; From af2cbc6ef967f61711a3c40fca5366ea0bc7fecc Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 21 Apr 2022 19:22:21 +0300 Subject: [PATCH 177/219] drm/i915: Fix SEL_FETCH_PLANE_*(PIPE_B+) register addresses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix typo in the _SEL_FETCH_PLANE_BASE_1_B register base address. Fixes: a5523e2ff074a5 ("drm/i915: Add PSR2 selective fetch registers") References: https://gitlab.freedesktop.org/drm/intel/-/issues/5400 Cc: José Roberto de Souza Cc: # v5.9+ Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20220421162221.2261895-1-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index aa0062d07269..c997bd604c49 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5161,7 +5161,7 @@ #define _SEL_FETCH_PLANE_BASE_6_A 0x70940 #define _SEL_FETCH_PLANE_BASE_7_A 0x70960 #define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880 -#define _SEL_FETCH_PLANE_BASE_1_B 0x70990 +#define _SEL_FETCH_PLANE_BASE_1_B 0x71890 #define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \ _SEL_FETCH_PLANE_BASE_1_A, \ From 3e1faae3398789abe8d4797255bfe28d95d81308 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 13 Apr 2022 18:28:51 +0300 Subject: [PATCH 178/219] drm/i915/fbc: Consult hw.crtc instead of uapi.crtc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit plane_state->uapi.crtc is not what we want to be looking at. If bigjoiner is used hw.crtc is what tells us what crtc the plane is supposedly using. Not an actual problem on current hardware as the only FBC capable pipe (A) can't be a bigjoiner slave and thus uapi.crtc==hw.crtc always here. But when we get more FBC instances this will become actually important. Fixes: 2e6c99f88679 ("drm/i915/fbc: Nuke lots of crap from intel_fbc_state_cache") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220413152852.7336-1-ville.syrjala@linux.intel.com Reviewed-by: Manasi Navare --- drivers/gpu/drm/i915/display/intel_fbc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index b7bdb0739744..3cbc65ab7173 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1053,7 +1053,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); const struct drm_framebuffer *fb = plane_state->hw.fb; - struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); const struct intel_crtc_state *crtc_state; struct intel_fbc *fbc = plane->fbc; From 14eb76f73e4390e1da448e86b9a8ad22c3f25119 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 13 Apr 2022 18:28:52 +0300 Subject: [PATCH 179/219] drm/i915/fbc: s/false/0/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit intel_fbc_check_plane() is supposed to an int, not a boolean. So replace the bogus 'return false's with the correct 'return 0's. These were accidental copy-paste mistakes when the code got moved into intel_fbc_check_plane() from somewhere else tht did return a boolean. No functional issue here since false==0. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220413152852.7336-2-ville.syrjala@linux.intel.com Reviewed-by: Manasi Navare --- drivers/gpu/drm/i915/display/intel_fbc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 3cbc65ab7173..bbdc34a23d54 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1094,7 +1094,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, */ if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) { plane_state->no_fbc_reason = "PSR2 enabled"; - return false; + return 0; } if (!pixel_format_is_valid(plane_state)) { @@ -1120,7 +1120,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && fb->format->has_alpha) { plane_state->no_fbc_reason = "per-pixel alpha not supported"; - return false; + return 0; } if (!intel_fbc_hw_tracking_covers_screen(plane_state)) { @@ -1136,7 +1136,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, if (DISPLAY_VER(i915) >= 9 && plane_state->view.color_plane[0].y & 3) { plane_state->no_fbc_reason = "plane start Y offset misaligned"; - return false; + return 0; } /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ @@ -1144,7 +1144,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, (plane_state->view.color_plane[0].y + (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) { plane_state->no_fbc_reason = "plane end Y offset misaligned"; - return false; + return 0; } /* WaFbcExceedCdClockThreshold:hsw,bdw */ From 9274229af2cfe4597f805071e42be8763331898f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 25 Mar 2022 14:31:53 +0200 Subject: [PATCH 180/219] drm/i915: Make .get_dplls() return int MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Get rid of the confusing back and forth between bools and ints in the .get_dplls() stuff. Just make everything return an int. Initial conversion done with cocci, with some manual fixups on top: @find@ identifier func !~ "get_hw_state|_is_|needed"; typedef bool; parameter list[N] P; @@ - bool + int func(P) { <... ( - return true; + return 0; | - return false; + return -EINVAL; ) ...> } @@ identifier find.func; expression list[find.N] E; expression X; @@ - if (!func(E)) + ret = func(E); + if (ret) { ... - return X; + return ret; } @@ identifier find.func; expression X; expression list[find.N] E; @@ - if (!func(E)) + ret = func(E); + if (ret) - return X; + return ret; @@ identifier find.func; expression list[find.N] E; expression O, X; typedef bool; bool B; @@ - B = func(E); - if (O && !B) + if (O) { + ret = func(E); + if (ret) - return X; + return ret; + } @@ identifier find.func; expression list[find.N] E; expression O, X; @@ - if (O && !func(E)) + if (O) { + ret = func(E); + if (ret) - return X; + return ret; + } @@ identifier find.func; expression list[find.N] E; expression X; typedef bool; bool B; @@ - B = func(E); - if (!B) + ret = func(E); + if (ret) { ... - return X; + return ret; } Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220325123205.22140-2-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_dpll.c | 12 +- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 242 +++++++++--------- drivers/gpu/drm/i915/display/intel_dpll_mgr.h | 6 +- 3 files changed, 131 insertions(+), 129 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 95b9d327ed4d..1bd4a05dff7c 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -935,6 +935,7 @@ static int hsw_crtc_compute_clock(struct intel_crtc_state *crtc_state) to_intel_atomic_state(crtc_state->uapi.state); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); + int ret; if (IS_DG2(dev_priv)) return intel_mpllb_calc_state(crtc_state, encoder); @@ -943,11 +944,12 @@ static int hsw_crtc_compute_clock(struct intel_crtc_state *crtc_state) intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; - if (!intel_reserve_shared_dplls(state, crtc, encoder)) { + ret = intel_reserve_shared_dplls(state, crtc, encoder); + if (ret) { drm_dbg_kms(&dev_priv->drm, "failed to find PLL for pipe %c\n", pipe_name(crtc->pipe)); - return -EINVAL; + return ret; } return 0; @@ -1076,6 +1078,7 @@ static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state) to_intel_atomic_state(crtc_state->uapi.state); const struct intel_limit *limit; int refclk = 120000; + int ret; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -1118,11 +1121,12 @@ static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state) ilk_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); - if (!intel_reserve_shared_dplls(state, crtc, NULL)) { + ret = intel_reserve_shared_dplls(state, crtc, NULL); + if (ret) { drm_dbg_kms(&dev_priv->drm, "failed to find PLL for pipe %c\n", pipe_name(crtc->pipe)); - return -EINVAL; + return ret; } return 0; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index b7071da4b7e5..1d52796333ab 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -90,9 +90,9 @@ struct intel_shared_dpll_funcs { struct intel_dpll_mgr { const struct dpll_info *dpll_info; - bool (*get_dplls)(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder); + int (*get_dplls)(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); void (*put_dplls)(struct intel_atomic_state *state, struct intel_crtc *crtc); void (*update_active_dpll)(struct intel_atomic_state *state, @@ -514,9 +514,9 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, udelay(200); } -static bool ibx_get_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int ibx_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -541,7 +541,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state, } if (!pll) - return false; + return -EINVAL; /* reference the pll */ intel_reference_shared_dpll(state, crtc, @@ -549,7 +549,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state, crtc_state->shared_dpll = pll; - return true; + return 0; } static void ibx_dump_hw_state(struct drm_i915_private *dev_priv, @@ -584,7 +584,7 @@ static const struct intel_dpll_mgr pch_pll_mgr = { }; static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; @@ -1060,13 +1060,13 @@ static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915, return link_clock * 2; } -static bool hsw_get_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int hsw_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct intel_shared_dpll *pll; + struct intel_shared_dpll *pll = NULL; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -1077,18 +1077,16 @@ static bool hsw_get_dpll(struct intel_atomic_state *state, pll = hsw_ddi_lcpll_get_dpll(crtc_state); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) pll = hsw_ddi_spll_get_dpll(state, crtc); - else - return false; if (!pll) - return false; + return -EINVAL; intel_reference_shared_dpll(state, crtc, pll, &crtc_state->dpll_hw_state); crtc_state->shared_dpll = pll; - return true; + return 0; } static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915) @@ -1493,7 +1491,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params, params->dco_integer * MHz(1)) * 0x8000, MHz(1)); } -static bool +static int skl_ddi_calculate_wrpll(int clock /* in Hz */, int ref_clock, struct skl_wrpll_params *wrpll_params) @@ -1552,7 +1550,7 @@ skip_remaining_dividers: if (!ctx.p) { DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock); - return false; + return -EINVAL; } /* @@ -1564,14 +1562,15 @@ skip_remaining_dividers: skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock, ctx.central_freq, p0, p1, p2); - return true; + return 0; } -static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) +static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct skl_wrpll_params wrpll_params = {}; u32 ctrl1, cfgcr1, cfgcr2; + int ret; /* * See comment in intel_dpll_hw_state to understand why we always use 0 @@ -1581,10 +1580,10 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); - if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, - i915->dpll.ref_clks.nssc, - &wrpll_params)) - return false; + ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, + i915->dpll.ref_clks.nssc, &wrpll_params); + if (ret) + return ret; cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | @@ -1602,7 +1601,8 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) crtc_state->dpll_hw_state.ctrl1 = ctrl1; crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; - return true; + + return 0; } static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, @@ -1676,7 +1676,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, return dco_freq / (p0 * p1 * p2 * 5); } -static bool +static int skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { u32 ctrl1; @@ -1713,7 +1713,7 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) crtc_state->dpll_hw_state.ctrl1 = ctrl1; - return true; + return 0; } static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915, @@ -1750,33 +1750,23 @@ static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915, return link_clock * 2; } -static bool skl_get_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int skl_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; - bool bret; + int ret; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - bret = skl_ddi_hdmi_pll_dividers(crtc_state); - if (!bret) { - drm_dbg_kms(&i915->drm, - "Could not get HDMI pll dividers.\n"); - return false; - } - } else if (intel_crtc_has_dp_encoder(crtc_state)) { - bret = skl_ddi_dp_set_dpll_hw_state(crtc_state); - if (!bret) { - drm_dbg_kms(&i915->drm, - "Could not set DP dpll HW state.\n"); - return false; - } - } else { - return false; - } + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + ret = skl_ddi_hdmi_pll_dividers(crtc_state); + else if (intel_crtc_has_dp_encoder(crtc_state)) + ret = skl_ddi_dp_set_dpll_hw_state(crtc_state); + else + ret = -EINVAL; + if (ret) + return ret; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) pll = intel_find_shared_dpll(state, crtc, @@ -1789,14 +1779,14 @@ static bool skl_get_dpll(struct intel_atomic_state *state, BIT(DPLL_ID_SKL_DPLL2) | BIT(DPLL_ID_SKL_DPLL1)); if (!pll) - return false; + return -EINVAL; intel_reference_shared_dpll(state, crtc, pll, &crtc_state->dpll_hw_state); crtc_state->shared_dpll = pll; - return true; + return 0; } static int skl_ddi_pll_get_freq(struct drm_i915_private *i915, @@ -2095,7 +2085,7 @@ static const struct dpll bxt_dp_clk_val[] = { { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ }, }; -static bool +static int bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state, struct dpll *clk_div) { @@ -2111,12 +2101,12 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state, drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n", crtc_state->port_clock, pipe_name(crtc->pipe)); - return false; + return -EINVAL; } drm_WARN_ON(&i915->drm, clk_div->m1 != 2); - return true; + return 0; } static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state, @@ -2139,8 +2129,8 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state, clk_div->dot != crtc_state->port_clock); } -static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, - const struct dpll *clk_div) +static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, + const struct dpll *clk_div) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state; @@ -2169,7 +2159,7 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, targ_cnt = 9; } else { drm_err(&i915->drm, "Invalid VCO\n"); - return false; + return -EINVAL; } if (clock > 270000) @@ -2206,10 +2196,10 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger; - return true; + return 0; } -static bool +static int bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { struct dpll clk_div = {}; @@ -2219,7 +2209,7 @@ bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); } -static bool +static int bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { struct dpll clk_div = {}; @@ -2246,23 +2236,25 @@ static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915, return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock); } -static bool bxt_get_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int bxt_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum intel_dpll_id id; + int ret; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && - !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state)) - return false; - - if (intel_crtc_has_dp_encoder(crtc_state) && - !bxt_ddi_dp_set_dpll_hw_state(crtc_state)) - return false; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + ret = bxt_ddi_hdmi_set_dpll_hw_state(crtc_state); + else if (intel_crtc_has_dp_encoder(crtc_state)) + ret = bxt_ddi_dp_set_dpll_hw_state(crtc_state); + else + ret = -EINVAL; + if (ret) + return ret; /* 1:1 mapping between ports and PLLs */ id = (enum intel_dpll_id) encoder->port; @@ -2276,7 +2268,7 @@ static bool bxt_get_dpll(struct intel_atomic_state *state, crtc_state->shared_dpll = pll; - return true; + return 0; } static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915) @@ -2513,8 +2505,8 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = { /* the following params are unused */ }; -static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, - struct skl_wrpll_params *pll_params) +static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, + struct skl_wrpll_params *pll_params) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct icl_combo_pll_params *params = @@ -2527,16 +2519,16 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) { if (clock == params[i].clock) { *pll_params = params[i].wrpll; - return true; + return 0; } } MISSING_CASE(clock); - return false; + return -EINVAL; } -static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, - struct skl_wrpll_params *pll_params) +static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, + struct skl_wrpll_params *pll_params) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); @@ -2568,7 +2560,7 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, } } - return true; + return 0; } static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915, @@ -2598,7 +2590,7 @@ static int icl_wrpll_ref_clock(struct drm_i915_private *i915) return ref_clock; } -static bool +static int icl_calc_wrpll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *wrpll_params) { @@ -2633,13 +2625,13 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state, } if (best_div == 0) - return false; + return -EINVAL; icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv); icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv, kdiv); - return true; + return 0; } static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915, @@ -2731,10 +2723,10 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915, pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val); } -static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, - u32 *target_dco_khz, - struct intel_dpll_hw_state *state, - bool is_dkl) +static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, + u32 *target_dco_khz, + struct intel_dpll_hw_state *state, + bool is_dkl) { static const u8 div1_vals[] = { 7, 5, 3, 2 }; u32 dco_min_freq, dco_max_freq; @@ -2800,19 +2792,19 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, hsdiv | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2); - return true; + return 0; } } - return false; + return -EINVAL; } /* * The specification for this function uses real numbers, so the math had to be * adapted to integer-only calculation, that's why it looks so different. */ -static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, - struct intel_dpll_hw_state *pll_state) +static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, + struct intel_dpll_hw_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int refclk_khz = dev_priv->dpll.ref_clks.nssc; @@ -2826,14 +2818,16 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, bool use_ssc = false; bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI); bool is_dkl = DISPLAY_VER(dev_priv) >= 12; + int ret; memset(pll_state, 0, sizeof(*pll_state)); - if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, - pll_state, is_dkl)) { + ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, + pll_state, is_dkl); + if (ret) { drm_dbg_kms(&dev_priv->drm, "Failed to find divisors for clock %d\n", clock); - return false; + return ret; } m1div = 2; @@ -2848,7 +2842,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, drm_dbg_kms(&dev_priv->drm, "Failed to find mdiv for clock %d\n", clock); - return false; + return -EINVAL; } } m2div_rem = dco_khz % (refclk_khz * m1div); @@ -2875,7 +2869,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, break; default: MISSING_CASE(refclk_khz); - return false; + return -EINVAL; } /* @@ -3018,7 +3012,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask; } - return true; + return 0; } static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv, @@ -3140,9 +3134,9 @@ static u32 intel_get_hti_plls(struct drm_i915_private *i915) return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state); } -static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int icl_get_combo_phy_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -3160,11 +3154,10 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, else ret = icl_calc_dp_combo_pll(crtc_state, &pll_params); - if (!ret) { + if (ret) { drm_dbg_kms(&dev_priv->drm, "Could not calculate combo PHY PLL state.\n"); - - return false; + return ret; } icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); @@ -3209,7 +3202,7 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, drm_dbg_kms(&dev_priv->drm, "No combo PHY PLL found for [ENCODER:%d:%s]\n", encoder->base.base.id, encoder->base.name); - return false; + return -EINVAL; } intel_reference_shared_dpll(state, crtc, @@ -3217,12 +3210,12 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, icl_update_active_dpll(state, crtc, encoder); - return true; + return 0; } -static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int icl_get_tc_phy_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = @@ -3230,12 +3223,14 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, struct skl_wrpll_params pll_params = { }; struct icl_port_dpll *port_dpll; enum intel_dpll_id dpll_id; + int ret; port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; - if (!icl_calc_tbt_pll(crtc_state, &pll_params)) { + ret = icl_calc_tbt_pll(crtc_state, &pll_params); + if (ret) { drm_dbg_kms(&dev_priv->drm, "Could not calculate TBT PLL state.\n"); - return false; + return ret; } icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); @@ -3245,14 +3240,15 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, BIT(DPLL_ID_ICL_TBTPLL)); if (!port_dpll->pll) { drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n"); - return false; + return -EINVAL; } intel_reference_shared_dpll(state, crtc, port_dpll->pll, &port_dpll->hw_state); port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY]; - if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) { + ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state); + if (ret) { drm_dbg_kms(&dev_priv->drm, "Could not calculate MG PHY PLL state.\n"); goto err_unreference_tbt_pll; @@ -3264,6 +3260,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, &port_dpll->hw_state, BIT(dpll_id)); if (!port_dpll->pll) { + ret = -EINVAL; drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n"); goto err_unreference_tbt_pll; } @@ -3272,18 +3269,18 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, icl_update_active_dpll(state, crtc, encoder); - return true; + return 0; err_unreference_tbt_pll: port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; intel_unreference_shared_dpll(state, crtc, port_dpll->pll); - return false; + return ret; } -static bool icl_get_dplls(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int icl_get_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); @@ -3295,7 +3292,7 @@ static bool icl_get_dplls(struct intel_atomic_state *state, MISSING_CASE(phy); - return false; + return -EINVAL; } static void icl_put_dplls(struct intel_atomic_state *state, @@ -4154,17 +4151,18 @@ void intel_shared_dpll_init(struct drm_device *dev) * intel_release_shared_dplls(). * * Returns: - * True if all required DPLLs were successfully reserved. + * 0 if all required DPLLs were successfully reserved, + * negative error code otherwise. */ -bool intel_reserve_shared_dplls(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +int intel_reserve_shared_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) - return false; + return -EINVAL; return dpll_mgr->get_dplls(state, crtc, encoder); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index ba2fdfce1579..499a43e39123 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -337,9 +337,9 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, bool state); #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) -bool intel_reserve_shared_dplls(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder); +int intel_reserve_shared_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); void intel_release_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc); void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, From 8e272b3af396d84d8da418b480e13964fd48f028 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 25 Mar 2022 14:31:54 +0200 Subject: [PATCH 181/219] drm/i915: Pass dev_priv to intel_shared_dpll_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stop passing around the drm_device and just pass the dev_priv instead. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220325123205.22140-3-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 2 +- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 9 ++++----- drivers/gpu/drm/i915/display/intel_dpll_mgr.h | 3 +-- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 0ddfce21a828..68aa7c10e9c1 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -9665,7 +9665,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) } intel_plane_possible_crtcs_init(i915); - intel_shared_dpll_init(dev); + intel_shared_dpll_init(i915); intel_fdi_pll_freq_update(i915); intel_update_czclk(i915); diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 1d52796333ab..dc3c889b0aa6 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -4078,13 +4078,12 @@ static const struct intel_dpll_mgr adlp_pll_mgr = { /** * intel_shared_dpll_init - Initialize shared DPLLs - * @dev: drm device + * @dev_priv: i915 device * - * Initialize shared DPLLs for @dev. + * Initialize shared DPLLs for @dev_priv. */ -void intel_shared_dpll_init(struct drm_device *dev) +void intel_shared_dpll_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); const struct intel_dpll_mgr *dpll_mgr = NULL; const struct dpll_info *dpll_info; int i; @@ -4123,7 +4122,7 @@ void intel_shared_dpll_init(struct drm_device *dev) dpll_info = dpll_mgr->dpll_info; for (i = 0; dpll_info[i].name; i++) { - drm_WARN_ON(dev, i != dpll_info[i].id); + drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id); dev_priv->dpll.shared_dplls[i].info = &dpll_info[i]; } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 499a43e39123..f7c96a1f13c8 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -37,7 +37,6 @@ __a > __b ? (__a - __b) : (__b - __a); }) enum tc_port; -struct drm_device; struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; @@ -356,7 +355,7 @@ bool intel_dpll_get_hw_state(struct drm_i915_private *i915, void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_shared_dpll_swap_state(struct intel_atomic_state *state); -void intel_shared_dpll_init(struct drm_device *dev); +void intel_shared_dpll_init(struct drm_i915_private *dev_priv); void intel_dpll_update_ref_clks(struct drm_i915_private *dev_priv); void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv); void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv); From e87ba18b4c80cb4bb28a9a1b76edad79b34cb1fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 25 Mar 2022 14:31:55 +0200 Subject: [PATCH 182/219] drm/i915: Remove pointless dpll_funcs checks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All platforms have dpll_funcs. Remove the pointless NULL checks. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220325123205.22140-4-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 68aa7c10e9c1..671e2a7ce889 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6958,14 +6958,10 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) static void intel_modeset_clear_plls(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int i; - if (!dev_priv->dpll_funcs) - return; - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state)) continue; From 7275f630d842222c093376246136e0cf1c657af2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 25 Mar 2022 14:31:56 +0200 Subject: [PATCH 183/219] drm/i915: Adjust .crtc_compute_clock() calling convention MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass the full atomic state+crtc rather than the redundant crtc+crtc_state pair. We already need the full atomic state in the hsw+ codepath anyway. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220325123205.22140-5-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 2 +- drivers/gpu/drm/i915/display/intel_dpll.c | 83 ++++++++++++-------- drivers/gpu/drm/i915/display/intel_dpll.h | 4 +- 3 files changed, 53 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 671e2a7ce889..d3a0e2930301 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -4906,7 +4906,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, if (mode_changed && crtc_state->hw.enable && !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { - ret = intel_dpll_crtc_compute_clock(crtc_state); + ret = intel_dpll_crtc_compute_clock(state, crtc); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 1bd4a05dff7c..1c05ec167046 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -18,7 +18,8 @@ #include "vlv_sideband.h" struct intel_dpll_funcs { - int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state); + int (*crtc_compute_clock)(struct intel_atomic_state *state, + struct intel_crtc *crtc); }; struct intel_limit { @@ -759,8 +760,8 @@ chv_find_best_dpll(const struct intel_limit *limit, bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, struct dpll *best_clock) { - int refclk = 100000; const struct intel_limit *limit = &intel_limits_bxt; + int refclk = 100000; return chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, @@ -927,12 +928,12 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state, crtc_state->dpll_hw_state.dpll = dpll; } -static int hsw_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int hsw_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_atomic_state *state = - to_intel_atomic_state(crtc_state->uapi.state); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); int ret; @@ -1070,12 +1071,12 @@ static void ilk_compute_dpll(struct intel_crtc_state *crtc_state, crtc_state->dpll_hw_state.dpll = dpll; } -static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int ilk_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_atomic_state *state = - to_intel_atomic_state(crtc_state->uapi.state); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 120000; int ret; @@ -1167,11 +1168,14 @@ void chv_compute_dpll(struct intel_crtc_state *crtc_state) (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; } -static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int chv_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - int refclk = 100000; + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit = &intel_limits_chv; - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + int refclk = 100000; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -1188,11 +1192,14 @@ static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state) return 0; } -static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int vlv_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - int refclk = 100000; + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit = &intel_limits_vlv; - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + int refclk = 100000; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -1209,10 +1216,12 @@ static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state) return 0; } -static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int g4x_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 96000; @@ -1255,10 +1264,12 @@ static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state) return 0; } -static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int pnv_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 96000; @@ -1292,10 +1303,12 @@ static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state) return 0; } -static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 96000; @@ -1329,10 +1342,12 @@ static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state) return 0; } -static int i8xx_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 48000; @@ -1400,12 +1415,12 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = { .crtc_compute_clock = i8xx_crtc_compute_clock, }; -int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state) +int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(state->base.dev); - return i915->dpll_funcs->crtc_compute_clock(crtc_state); + return i915->dpll_funcs->crtc_compute_clock(state, crtc); } void diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h index 69b06a9e473e..e9731b2dd01c 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.h +++ b/drivers/gpu/drm/i915/display/intel_dpll.h @@ -10,12 +10,14 @@ struct dpll; struct drm_i915_private; +struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; enum pipe; void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv); -int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state); +int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc); int vlv_calc_dpll_params(int refclk, struct dpll *clock); int pnv_calc_dpll_params(int refclk, struct dpll *clock); int i9xx_calc_dpll_params(int refclk, struct dpll *clock); From 155a27172f823583a6a6d48ec0285baca0512cc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 25 Mar 2022 14:31:57 +0200 Subject: [PATCH 184/219] drm/i915: Move stuff into intel_dpll_crtc_compute_clock() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move some checks into intel_dpll_crtc_compute_clock() from the caller. Avoids the caller from having to worry about all this crap. We'll also reorder the hw.enable vs. shared_dpll checks since it makes sense to sanity check that we've cleared out the old shared_dpll even if the pipe is getting disabled. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220325123205.22140-6-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 3 +-- drivers/gpu/drm/i915/display/intel_dpll.c | 10 ++++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index d3a0e2930301..d3aff02f2fa0 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -4904,8 +4904,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, mode_changed && !crtc_state->hw.active) crtc_state->update_wm_post = true; - if (mode_changed && crtc_state->hw.enable && - !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { + if (mode_changed) { ret = intel_dpll_crtc_compute_clock(state, crtc); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 1c05ec167046..88d78a585304 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -1419,6 +1419,16 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); + + if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll)) + return 0; + + if (!crtc_state->hw.enable) + return 0; return i915->dpll_funcs->crtc_compute_clock(state, crtc); } From ad3da340f903b7f1d35a134144e8550a79d5a1db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 25 Mar 2022 14:31:58 +0200 Subject: [PATCH 185/219] drm/i915: Move the dpll_hw_state clearing to intel_dpll_crtc_compute_clock() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All .crtc_compute_clock() implementations do the same memset() to clear the dpll_hw_state (since we preserve it across intel_crtc_prepare_cleared_state()). Move the memset() to the common wrapper. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220325123205.22140-7-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_dpll.c | 24 +++---------------- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 15 ------------ 2 files changed, 3 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 88d78a585304..494a343850e7 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -1081,9 +1081,6 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, int refclk = 120000; int ret; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ if (!crtc_state->has_pch_encoder) return 0; @@ -1177,9 +1174,6 @@ static int chv_crtc_compute_clock(struct intel_atomic_state *state, const struct intel_limit *limit = &intel_limits_chv; int refclk = 100000; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - if (!crtc_state->clock_set && !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) { @@ -1201,9 +1195,6 @@ static int vlv_crtc_compute_clock(struct intel_atomic_state *state, const struct intel_limit *limit = &intel_limits_vlv; int refclk = 100000; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - if (!crtc_state->clock_set && !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) { @@ -1225,9 +1216,6 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state, const struct intel_limit *limit; int refclk = 96000; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; @@ -1273,9 +1261,6 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state, const struct intel_limit *limit; int refclk = 96000; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; @@ -1312,9 +1297,6 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, const struct intel_limit *limit; int refclk = 96000; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; @@ -1351,9 +1333,6 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, const struct intel_limit *limit; int refclk = 48000; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; @@ -1430,6 +1409,9 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, if (!crtc_state->hw.enable) return 0; + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + return i915->dpll_funcs->crtc_compute_clock(state, crtc); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index dc3c889b0aa6..22f55574a35c 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -1068,9 +1068,6 @@ static int hsw_get_dpll(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); struct intel_shared_dpll *pll = NULL; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) pll = hsw_ddi_wrpll_get_dpll(state, crtc); else if (intel_crtc_has_dp_encoder(crtc_state)) @@ -1595,9 +1592,6 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | wrpll_params.central_freq; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - crtc_state->dpll_hw_state.ctrl1 = ctrl1; crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; @@ -1708,9 +1702,6 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) break; } - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - crtc_state->dpll_hw_state.ctrl1 = ctrl1; return 0; @@ -2139,8 +2130,6 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, u32 prop_coef, int_coef, gain_ctl, targ_cnt; u32 lanestagger; - memset(dpll_hw_state, 0, sizeof(*dpll_hw_state)); - if (vco >= 6200000 && vco <= 6700000) { prop_coef = 4; int_coef = 9; @@ -2701,8 +2690,6 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915, { u32 dco_fraction = pll_params->dco_fraction; - memset(pll_state, 0, sizeof(*pll_state)); - if (ehl_combo_pll_div_frac_wa_needed(i915)) dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2); @@ -2820,8 +2807,6 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, bool is_dkl = DISPLAY_VER(dev_priv) >= 12; int ret; - memset(pll_state, 0, sizeof(*pll_state)); - ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, pll_state, is_dkl); if (ret) { From e2f5f399af316204656caf072d60ac19f4c978fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 25 Mar 2022 14:31:59 +0200 Subject: [PATCH 186/219] drm/i915: Clear the dpll_hw_state when disabling a pipe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clear the dpll_hw_state when we're about disable the pipe. Previously it looks like we just left the old junk in there. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220325123205.22140-8-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_dpll.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 494a343850e7..7960f1d52eaa 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -1406,12 +1406,12 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll)) return 0; - if (!crtc_state->hw.enable) - return 0; - memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); + if (!crtc_state->hw.enable) + return 0; + return i915->dpll_funcs->crtc_compute_clock(state, crtc); } From 3bb9e25767ba786f240c419cc1db39d01232fa0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 25 Mar 2022 14:32:00 +0200 Subject: [PATCH 187/219] drm/i915: Split out dg2_crtc_compute_clock() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DG2 doesn't currently used the shared_dpll stuff so let's just split it out from hsw_crtc_compute_clock() entirely. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220325123205.22140-9-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_dpll.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 7960f1d52eaa..bc59efe18e89 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -938,9 +938,6 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state, intel_get_crtc_new_encoder(state, crtc_state); int ret; - if (IS_DG2(dev_priv)) - return intel_mpllb_calc_state(crtc_state, encoder); - if (DISPLAY_VER(dev_priv) < 11 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; @@ -956,6 +953,17 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state, return 0; } +static int dg2_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_encoder *encoder = + intel_get_crtc_new_encoder(state, crtc_state); + + return intel_mpllb_calc_state(crtc_state, encoder); +} + static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor) { return dpll->m < factor * dpll->n; @@ -1362,6 +1370,10 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, return 0; } +static const struct intel_dpll_funcs dg2_dpll_funcs = { + .crtc_compute_clock = dg2_crtc_compute_clock, +}; + static const struct intel_dpll_funcs hsw_dpll_funcs = { .crtc_compute_clock = hsw_crtc_compute_clock, }; @@ -1418,7 +1430,9 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) { - if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) + if (IS_DG2(dev_priv)) + dev_priv->dpll_funcs = &dg2_dpll_funcs; + else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) dev_priv->dpll_funcs = &hsw_dpll_funcs; else if (HAS_PCH_SPLIT(dev_priv)) dev_priv->dpll_funcs = &ilk_dpll_funcs; From a8e85faaa51fc9cada6ea272cbf60d6ad880eddf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 25 Mar 2022 14:32:01 +0200 Subject: [PATCH 188/219] drm/i915: Add crtc .crtc_get_shared_dpll() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Start splitting the .compute_crtc_clock() into two parts; one part does the computation, the second part does the shared dpll assignment. I want to move the actual computation part much earlier into the compute_config() phase. v2: dg2_crtc_get_shared_dpll() not needed (Jani) Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220325123205.22140-10-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 4 ++ drivers/gpu/drm/i915/display/intel_dpll.c | 47 +++++++++++++++++++- drivers/gpu/drm/i915/display/intel_dpll.h | 2 + 3 files changed, 52 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index d3aff02f2fa0..3266310dd9ef 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -4908,6 +4908,10 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, ret = intel_dpll_crtc_compute_clock(state, crtc); if (ret) return ret; + + ret = intel_dpll_crtc_get_shared_dpll(state, crtc); + if (ret) + return ret; } /* diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index bc59efe18e89..6eef0b8a91eb 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -20,6 +20,8 @@ struct intel_dpll_funcs { int (*crtc_compute_clock)(struct intel_atomic_state *state, struct intel_crtc *crtc); + int (*crtc_get_shared_dpll)(struct intel_atomic_state *state, + struct intel_crtc *crtc); }; struct intel_limit { @@ -930,6 +932,12 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state, static int hsw_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) +{ + return 0; +} + +static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = @@ -1087,7 +1095,6 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 120000; - int ret; /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ if (!crtc_state->has_pch_encoder) @@ -1127,6 +1134,21 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, ilk_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); + return 0; +} + +static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + int ret; + + /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ + if (!crtc_state->has_pch_encoder) + return 0; + ret = intel_reserve_shared_dplls(state, crtc, NULL); if (ret) { drm_dbg_kms(&dev_priv->drm, @@ -1376,10 +1398,12 @@ static const struct intel_dpll_funcs dg2_dpll_funcs = { static const struct intel_dpll_funcs hsw_dpll_funcs = { .crtc_compute_clock = hsw_crtc_compute_clock, + .crtc_get_shared_dpll = hsw_crtc_get_shared_dpll, }; static const struct intel_dpll_funcs ilk_dpll_funcs = { .crtc_compute_clock = ilk_crtc_compute_clock, + .crtc_get_shared_dpll = ilk_crtc_get_shared_dpll, }; static const struct intel_dpll_funcs chv_dpll_funcs = { @@ -1427,6 +1451,27 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, return i915->dpll_funcs->crtc_compute_clock(state, crtc); } +int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); + + if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll)) + return 0; + + if (!crtc_state->hw.enable) + return 0; + + if (!i915->dpll_funcs->crtc_get_shared_dpll) + return 0; + + return i915->dpll_funcs->crtc_get_shared_dpll(state, crtc); +} + void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) { diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h index e9731b2dd01c..bbc30542f29f 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.h +++ b/drivers/gpu/drm/i915/display/intel_dpll.h @@ -18,6 +18,8 @@ enum pipe; void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv); int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc); +int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc); int vlv_calc_dpll_params(int refclk, struct dpll *clock); int pnv_calc_dpll_params(int refclk, struct dpll *clock); int i9xx_calc_dpll_params(int refclk, struct dpll *clock); From 5e9ae5c47052e28a31fb4f55a6e735c28d4c3948 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Mon, 25 Apr 2022 18:03:31 -0400 Subject: [PATCH 189/219] drm/i915/gvt: Add missing symbol export. When CONFIG_DRM_I915_DEBUG_RUNTIME and CONFIG_DRM_I915_DEBUG_PM are enabled, two more extra symols in i915 are required to be exported. Cc: Jani Nikula Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220425220331.24865-1-zhi.a.wang@intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_gvt.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 7c03d975069e..24bc693439e8 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -309,6 +309,7 @@ EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT); EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT); @@ -316,3 +317,4 @@ EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT); EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT); EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT); EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_fence_ops, I915_GVT); From eddbb074ce6a3af5fe1d9c546e5d474ca9fa799d Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Sat, 9 Apr 2022 01:46:29 +0300 Subject: [PATCH 190/219] drm/i915/dp: Add workaround for spurious AUX timeouts/hotplugs on LTTPR links To avoid AUX timeouts and subsequent spurious hotplug interrupts, make sure that the first DPCD access during detection is a read from an LTTPR register. Some ADLP DP link configuration at least with multiple LTTPRs expects the first DPCD access during the LTTPR/DPCD detection after hotplug to be a read from the LTTPR range starting with DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV. The side effect of this read is to put each LTTPR into the LTTPR transparent or LTTPR non-transparent mode. The lack of the above read may leave some of the LTTPRs in non-LTTPR mode, while other LTTPRs in LTTPR transparent or LTTPR non-transparent mode (for instance LTTPRs after system suspend/resume that kept their mode from before suspend). Due to the different AUX timeouts the different modes imply, the DPCD access from a non-LTTPR range will timeout and lead to an LTTPR generated hotplug towards the source (which the LTTPR firmware uses to account for buggy TypeC adapters with a long wake-up delay). SYSCROS: 72939 v2: Keep DPCD read-out working on non-LTTPR platforms. v3: Summarize what and why the patch does at the beginning of the commit log. (Jani) Signed-off-by: Imre Deak Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220408224629.845887-1-imre.deak@intel.com --- .../drm/i915/display/intel_dp_link_training.c | 33 ++++++++++--------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 26f9e2b748e4..9feaf1a589f3 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -82,19 +82,8 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); int ret; - if (intel_dp_is_edp(intel_dp)) - return false; - - /* - * Detecting LTTPRs must be avoided on platforms with an AUX timeout - * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). - */ - if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915)) - return false; - ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd, intel_dp->lttpr_common_caps); if (ret < 0) @@ -197,13 +186,25 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI */ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) { - u8 dpcd[DP_RECEIVER_CAP_SIZE]; - int lttpr_count; + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int lttpr_count = 0; - if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd)) - return -EIO; + /* + * Detecting LTTPRs must be avoided on platforms with an AUX timeout + * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). + */ + if (!intel_dp_is_edp(intel_dp) && + (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) { + u8 dpcd[DP_RECEIVER_CAP_SIZE]; - lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd); + if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV)) + return -EIO; + + if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd)) + return -EIO; + + lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd); + } /* * The DPTX shall read the DPRX caps after LTTPR detection, so re-read From 1bc4ae0ccbc661b58d3e8f7b633c7786eaf7929f Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 25 Apr 2022 14:12:50 -0700 Subject: [PATCH 191/219] drm/i915: Add first set of DG2 PCI IDs The IDs added here are the subset reserved for 'motherboard down' designs of DG2. We have all the necessary support upstream to enable these now (although they'll continue to require force_probe until the usual requirements are met). The remaining DG2 IDs for add-in cards will come in a future patch once some additional required functionality has fully landed. Bspec: 44477 Cc: Lucas De Marchi Cc: Daniel Vetter Cc: Dave Airlie Cc: Rodrigo Vivi Cc: Joonas Lahtinen Cc: Jani Nikula Cc: Tvrtko Ursulin Signed-off-by: Matt Roper Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20220425211251.77154-3-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/i915_pci.c | 2 +- drivers/gpu/drm/i915/intel_device_info.c | 21 +++++++++++++++++++++ include/drm/i915_pciids.h | 22 ++++++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index f59e526b03fc..c88e454a74bb 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -1056,7 +1056,6 @@ static const struct intel_device_info xehpsdv_info = { BIT(VECS0) | BIT(VECS1) | \ BIT(VCS0) | BIT(VCS2) -__maybe_unused static const struct intel_device_info dg2_info = { DG2_FEATURES, XE_LPD_FEATURES, @@ -1152,6 +1151,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_DG1_IDS(&dg1_info), INTEL_RPLS_IDS(&adl_s_info), INTEL_RPLP_IDS(&adl_p_info), + INTEL_DG2_IDS(&dg2_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 74c3ffb66b8d..cefa9ed784ff 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -186,6 +186,18 @@ static const u16 subplatform_rpl_ids[] = { INTEL_RPLP_IDS(0), }; +static const u16 subplatform_g10_ids[] = { + INTEL_DG2_G10_IDS(0), +}; + +static const u16 subplatform_g11_ids[] = { + INTEL_DG2_G11_IDS(0), +}; + +static const u16 subplatform_g12_ids[] = { + INTEL_DG2_G12_IDS(0), +}; + static bool find_devid(u16 id, const u16 *p, unsigned int num) { for (; num; num--, p++) { @@ -231,6 +243,15 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915) } else if (find_devid(devid, subplatform_rpl_ids, ARRAY_SIZE(subplatform_rpl_ids))) { mask = BIT(INTEL_SUBPLATFORM_RPL); + } else if (find_devid(devid, subplatform_g10_ids, + ARRAY_SIZE(subplatform_g10_ids))) { + mask = BIT(INTEL_SUBPLATFORM_G10); + } else if (find_devid(devid, subplatform_g11_ids, + ARRAY_SIZE(subplatform_g11_ids))) { + mask = BIT(INTEL_SUBPLATFORM_G11); + } else if (find_devid(devid, subplatform_g12_ids, + ARRAY_SIZE(subplatform_g12_ids))) { + mask = BIT(INTEL_SUBPLATFORM_G12); } GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK); diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index a7b5eea7ffaa..283dadfbb4db 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -692,4 +692,26 @@ INTEL_VGA_DEVICE(0xA7A8, info), \ INTEL_VGA_DEVICE(0xA7A9, info) +/* DG2 */ +#define INTEL_DG2_G10_IDS(info) \ + INTEL_VGA_DEVICE(0x5690, info), \ + INTEL_VGA_DEVICE(0x5691, info), \ + INTEL_VGA_DEVICE(0x5692, info) + +#define INTEL_DG2_G11_IDS(info) \ + INTEL_VGA_DEVICE(0x5693, info), \ + INTEL_VGA_DEVICE(0x5694, info), \ + INTEL_VGA_DEVICE(0x5695, info), \ + INTEL_VGA_DEVICE(0x56B0, info) + +#define INTEL_DG2_G12_IDS(info) \ + INTEL_VGA_DEVICE(0x5696, info), \ + INTEL_VGA_DEVICE(0x5697, info), \ + INTEL_VGA_DEVICE(0x56B2, info) + +#define INTEL_DG2_IDS(info) \ + INTEL_DG2_G10_IDS(info), \ + INTEL_DG2_G11_IDS(info), \ + INTEL_DG2_G12_IDS(info) + #endif /* _I915_PCIIDS_H */ From fa630c304b934bee63d437010fb3cbca55c8ee83 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Wed, 27 Apr 2022 17:28:48 -0400 Subject: [PATCH 192/219] drm/i915/gvt: Make intel_gvt_match_device() static After the refactor of GVT-g, the reference of intel_gvt_match_device() only happens in handlers.c. Make it static to let the compiler be happy. Fixes: e0f74ed4634d ("i915/gvt: Separate the MMIO tracking table from GVT-g") Cc: Jason Gunthorpe Cc: Jani Nikula Cc: Robert Beckett Cc: Lucas De Marchi Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220427212849.18109-1-zhi.a.wang@intel.com Reviewed-by: Cc: Jani Nikula --- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index b7bab95da3c5..3ce1a5a9e2da 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -71,7 +71,7 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) return 0; } -bool intel_gvt_match_device(struct intel_gvt *gvt, +static bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device) { return intel_gvt_get_device_type(gvt) & device; From 5b95b9d58fb0d7418c3d2d020099db789f66e7a1 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Wed, 27 Apr 2022 17:28:49 -0400 Subject: [PATCH 193/219] drm/i915/gvt: Fix the compiling error when CONFIG_DRM_I915_DEBUG_RUNTIME_PM=n A compiling error was reported when CONFIG_DRM_I915_DEBUG_RUNTIME_PM=n. Fix the problem by using the pre-defined macro. Cc: Jason Gunthorpe Cc: Jani Nikula Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220427212849.18109-2-zhi.a.wang@intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_gvt.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 24bc693439e8..e98b6d69a91a 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -309,7 +309,9 @@ EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT); EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT); +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, I915_GVT); +#endif EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT); From 419f8299ddad6070a6c95aaedf78e50265871f36 Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Wed, 27 Apr 2022 19:54:56 +0800 Subject: [PATCH 194/219] i915/gvt: Fix NULL pointer dereference in init_mmio_block_handlers Fix following coccicheck error: ./drivers/gpu/drm/i915/gvt/handlers.c:2925:35-41: ERROR: block is NULL but dereferenced. Use gvt->mmio.mmio_block instead of block to avoid NULL pointer dereference when find_mmio_block returns NULL. Fixes: e0f74ed4634d ("i915/gvt: Separate the MMIO tracking table from GVT-g") Signed-off-by: Wan Jiabing Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220427115457.836729-1-wanjiabing@vivo.com Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 3ce1a5a9e2da..ad31640c9bf2 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2916,7 +2916,7 @@ static int init_mmio_block_handlers(struct intel_gvt *gvt) block = find_mmio_block(gvt, VGT_PVINFO_PAGE); if (!block) { WARN(1, "fail to assign handlers to mmio block %x\n", - i915_mmio_reg_offset(block->offset)); + i915_mmio_reg_offset(gvt->mmio.mmio_block->offset)); return -ENODEV; } From c140915c00c92e3ca2a4f8e5748f0b9ef3e5a418 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 29 Apr 2022 17:21:40 +0300 Subject: [PATCH 195/219] drm/i915: move tons of power well initializers to rodata MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using compound literals for initialization can be tricky. Lacking a const qualifier, they won't end up in rodata, which is probably not expected or intended. Add const to move a whopping 136 initializers to rodata. Compare: $ objdump --syms drivers/gpu/drm/i915/display/intel_display_power_map.o | grep "\.rodata.*__compound_literal" $ objdump --syms drivers/gpu/drm/i915/display/intel_display_power_map.o | grep "\.data.*__compound_literal" Before and after the change. Fixes: c32ffce42aa5 ("drm/i915: Convert the power well descriptor domain mask to an array of domains") Fixes: 4a845ff0c0d4 ("drm/i915: Simplify power well definitions by adding power well instances") Cc: Imre Deak Cc: Jouni Högander Signed-off-by: Jani Nikula Reviewed-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20220429142140.2671828-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_display_power_map.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index af6f54a26a35..97b367f39f35 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -21,7 +21,7 @@ #define I915_PW_DOMAINS(...) \ (const struct i915_power_domain_list) \ - __LIST(__LIST_INLINE_ELEMS(enum intel_display_power_domain, __VA_ARGS__)) + __LIST(__LIST_INLINE_ELEMS(const enum intel_display_power_domain, __VA_ARGS__)) #define I915_DECL_PW_DOMAINS(__name, ...) \ static const struct i915_power_domain_list __name = I915_PW_DOMAINS(__VA_ARGS__) @@ -32,7 +32,7 @@ #define I915_PW_INSTANCES(...) \ (const struct i915_power_well_instance_list) \ - __LIST(__LIST_INLINE_ELEMS(struct i915_power_well_instance, __VA_ARGS__)) + __LIST(__LIST_INLINE_ELEMS(const struct i915_power_well_instance, __VA_ARGS__)) #define I915_PW(_name, _domain_list, ...) \ { .name = _name, .domain_list = _domain_list, ## __VA_ARGS__ } From e0602d3a13e230fd5fb00236f7dc355f1447d8cb Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 3 May 2022 11:21:34 +0300 Subject: [PATCH 196/219] drm/i915: warn about missing ->get_buf_trans initialization Make sure each DDI platform has sane ->get_buf_trans initialized. Suggested-by: Matt Roper Cc: Matt Roper Signed-off-by: Jani Nikula Reviewed-by: Arun R Murthy Link: https://patchwork.freedesktop.org/patch/msgid/20220503082134.4128355-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index 94e64661b4fd..85f58dd3df72 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -1673,7 +1673,9 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder) encoder->get_buf_trans = skl_get_buf_trans; } else if (IS_BROADWELL(i915)) { encoder->get_buf_trans = bdw_get_buf_trans; - } else { + } else if (IS_HASWELL(i915)) { encoder->get_buf_trans = hsw_get_buf_trans; + } else { + MISSING_CASE(INTEL_INFO(i915)->platform); } } From bb7acf59a11e013b5d548215918df9eb27dd35b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 18 Feb 2022 12:03:59 +0200 Subject: [PATCH 197/219] drm/i915: Use drm_mode_init() for on-stack modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initialize on-stack modes with drm_mode_init() to guarantee no stack garbage in the list head, or that we aren't copying over another mode's list head. Based on the following cocci script, with manual fixups: @decl@ identifier M; expression E; @@ - struct drm_display_mode M = E; + struct drm_display_mode M; @@ identifier decl.M; expression decl.E; statement S, S1; @@ struct drm_display_mode M; ... when != S + drm_mode_init(&M, &E); + S1 @@ expression decl.E; @@ - &*E + E Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220218100403.7028-19-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3266310dd9ef..9f105250f474 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6902,8 +6902,9 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct drm_display_mode adjusted_mode = - crtc_state->hw.adjusted_mode; + struct drm_display_mode adjusted_mode; + + drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode); if (crtc_state->vrr.enable) { adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax; From 13367132a7ad2e29d58d55f7755fb1844db5f362 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 4 May 2022 18:04:32 +0300 Subject: [PATCH 198/219] drm/i915/bios: Reorder panel DTD parsing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reorder things so that we can parse the entier LFP data block in one go. For now we just stick to parsing the DTD from it. Also fix the misleading comment about block 42 being deprecated. Only the DTD part is deprecated, the rest is still very much needed. v2: Move the version check+comment into parse_generic_dtd() (Jani) Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220504150440.13748-2-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 64 ++++++++++++----------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index bc195769dd07..0f1c9820a2bd 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -488,25 +488,16 @@ parse_panel_options(struct drm_i915_private *i915) } } -/* Try to find integrated panel timing data */ static void -parse_lfp_panel_dtd(struct drm_i915_private *i915) +parse_lfp_panel_dtd(struct drm_i915_private *i915, + const struct bdb_lvds_lfp_data *lvds_lfp_data, + const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs) { - const struct bdb_lvds_lfp_data *lvds_lfp_data; - const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; const struct lvds_dvo_timing *panel_dvo_timing; const struct lvds_fp_timing *fp_timing; struct drm_display_mode *panel_fixed_mode; int panel_type = i915->vbt.panel_type; - lvds_lfp_data = find_section(i915, BDB_LVDS_LFP_DATA); - if (!lvds_lfp_data) - return; - - lvds_lfp_data_ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); - if (!lvds_lfp_data_ptrs) - return; - panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, panel_type); @@ -537,6 +528,24 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915) } } +static void +parse_lfp_data(struct drm_i915_private *i915) +{ + const struct bdb_lvds_lfp_data *data; + const struct bdb_lvds_lfp_data_ptrs *ptrs; + + ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); + if (!ptrs) + return; + + data = find_section(i915, BDB_LVDS_LFP_DATA); + if (!data) + return; + + if (!i915->vbt.lfp_lvds_vbt_mode) + parse_lfp_panel_dtd(i915, data, ptrs); +} + static void parse_generic_dtd(struct drm_i915_private *i915) { @@ -545,6 +554,17 @@ parse_generic_dtd(struct drm_i915_private *i915) struct drm_display_mode *panel_fixed_mode; int num_dtd; + /* + * Older VBTs provided DTD information for internal displays through + * the "LFP panel tables" block (42). As of VBT revision 229 the + * DTD information should be provided via a newer "generic DTD" + * block (58). Just to be safe, we'll try the new generic DTD block + * first on VBT >= 229, but still fall back to trying the old LFP + * block if that fails. + */ + if (i915->vbt.version < 229) + return; + generic_dtd = find_section(i915, BDB_GENERIC_DTD); if (!generic_dtd) return; @@ -615,23 +635,6 @@ parse_generic_dtd(struct drm_i915_private *i915) i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode; } -static void -parse_panel_dtd(struct drm_i915_private *i915) -{ - /* - * Older VBTs provided provided DTD information for internal displays - * through the "LFP panel DTD" block (42). As of VBT revision 229, - * that block is now deprecated and DTD information should be provided - * via a newer "generic DTD" block (58). Just to be safe, we'll - * try the new generic DTD block first on VBT >= 229, but still fall - * back to trying the old LFP block if that fails. - */ - if (i915->vbt.version >= 229) - parse_generic_dtd(i915); - if (!i915->vbt.lfp_lvds_vbt_mode) - parse_lfp_panel_dtd(i915); -} - static void parse_lfp_backlight(struct drm_i915_private *i915) { @@ -2708,7 +2711,8 @@ void intel_bios_init(struct drm_i915_private *i915) parse_general_features(i915); parse_general_definitions(i915); parse_panel_options(i915); - parse_panel_dtd(i915); + parse_generic_dtd(i915); + parse_lfp_data(i915); parse_lfp_backlight(i915); parse_sdvo_panel_data(i915); parse_driver_features(i915); From a87d0a84760726445dcc0f0177623f0d683f3559 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 4 May 2022 18:04:33 +0300 Subject: [PATCH 199/219] drm/i915/bios: Generate LFP data table pointers if the VBT lacks them MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Modern VBTs no longer contain the LFP data table pointers block (41). We are expecting to have one in order to be able to parse the LFP data block (42), so let's make one up. Since the fp_timing table has variable size we must somehow determine its size. Rather than just hardcode it we look for the terminator bytes (0xffff) to figure out where each table entry starts. dvo_timing, panel_pnp_id, and panel_name are expected to have fixed size. This has been observed on various machines, eg. TGL with BDB version 240, CML with BDB version 231, etc. The most recent VBT I've observed that still had block 41 had BDB version 228. So presumably the cutoff (if an exact cutoff even exists) is somewhere around BDB version 229-231. v2: kfree the thing we allocated, not the thing+3 bytes v3: Do the debugprint only if we found the LFP data block v4: Fix t0 null check (Jani) Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220504150440.13748-3-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 134 +++++++++++++++++++++- 1 file changed, 133 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 0f1c9820a2bd..4cea9c27b198 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -310,16 +310,144 @@ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block) return validate_lfp_data_ptrs(bdb, ptrs); } +static const void *find_fp_timing_terminator(const u8 *data, int size) +{ + int i; + + for (i = 0; i < size - 1; i++) { + if (data[i] == 0xff && data[i+1] == 0xff) + return &data[i]; + } + + return NULL; +} + +static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table, + int table_size, int total_size) +{ + if (total_size < table_size) + return total_size; + + table->table_size = table_size; + table->offset = total_size - table_size; + + return total_size - table_size; +} + +static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next, + const struct lvds_lfp_data_ptr_table *prev, + int size) +{ + next->table_size = prev->table_size; + next->offset = prev->offset + size; +} + +static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, + const void *bdb) +{ + int i, size, table_size, block_size, offset; + const void *t0, *t1, *block; + struct bdb_lvds_lfp_data_ptrs *ptrs; + void *ptrs_block; + + block = find_raw_section(bdb, BDB_LVDS_LFP_DATA); + if (!block) + return NULL; + + drm_dbg_kms(&i915->drm, "Generating LFP data table pointers\n"); + + block_size = get_blocksize(block); + + size = block_size; + t0 = find_fp_timing_terminator(block, size); + if (!t0) + return NULL; + + size -= t0 - block - 2; + t1 = find_fp_timing_terminator(t0 + 2, size); + if (!t1) + return NULL; + + size = t1 - t0; + if (size * 16 > block_size) + return NULL; + + ptrs_block = kzalloc(sizeof(*ptrs) + 3, GFP_KERNEL); + if (!ptrs_block) + return NULL; + + *(u8 *)(ptrs_block + 0) = BDB_LVDS_LFP_DATA_PTRS; + *(u16 *)(ptrs_block + 1) = sizeof(*ptrs); + ptrs = ptrs_block + 3; + + table_size = sizeof(struct lvds_pnp_id); + size = make_lfp_data_ptr(&ptrs->ptr[0].panel_pnp_id, table_size, size); + + table_size = sizeof(struct lvds_dvo_timing); + size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size); + + table_size = t0 - block + 2; + size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size); + + if (ptrs->ptr[0].fp_timing.table_size) + ptrs->lvds_entries++; + if (ptrs->ptr[0].dvo_timing.table_size) + ptrs->lvds_entries++; + if (ptrs->ptr[0].panel_pnp_id.table_size) + ptrs->lvds_entries++; + + if (size != 0 || ptrs->lvds_entries != 3) { + kfree(ptrs); + return NULL; + } + + size = t1 - t0; + for (i = 1; i < 16; i++) { + next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size); + next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size); + next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size); + } + + size = t1 - t0; + table_size = sizeof(struct lvds_lfp_panel_name); + + if (16 * (size + table_size) <= block_size) { + ptrs->panel_name.table_size = table_size; + ptrs->panel_name.offset = size * 16; + } + + offset = block - bdb; + + for (i = 0; i < 16; i++) { + ptrs->ptr[i].fp_timing.offset += offset; + ptrs->ptr[i].dvo_timing.offset += offset; + ptrs->ptr[i].panel_pnp_id.offset += offset; + } + + if (ptrs->panel_name.table_size) + ptrs->panel_name.offset += offset; + + return ptrs_block; +} + static void init_bdb_block(struct drm_i915_private *i915, const void *bdb, enum bdb_block_id section_id, size_t min_size) { struct bdb_block_entry *entry; + void *temp_block = NULL; const void *block; size_t block_size; block = find_raw_section(bdb, section_id); + + /* Modern VBTs lack the LFP data table pointers block, make one up */ + if (!block && section_id == BDB_LVDS_LFP_DATA_PTRS) { + temp_block = generate_lfp_data_ptrs(i915, bdb); + if (temp_block) + block = temp_block + 3; + } if (!block) return; @@ -330,12 +458,16 @@ init_bdb_block(struct drm_i915_private *i915, entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3), GFP_KERNEL); - if (!entry) + if (!entry) { + kfree(temp_block); return; + } entry->section_id = section_id; memcpy(entry->data, block - 3, block_size + 3); + kfree(temp_block); + drm_dbg_kms(&i915->drm, "Found BDB block %d (size %zu, min size %zu)\n", section_id, block_size, min_size); From 901a0cad2ab8f6edcb3f21f88037eab54c48fd1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 4 May 2022 18:04:34 +0300 Subject: [PATCH 200/219] drm/i915/bios: Get access to the tail end of the LFP data block MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to start parsing stuff from the tail end of the LFP data block. This is made awkward by the fact that the fp_timing table has variable size. So we must use a bit more finesse to get the tail end, and to make sure we allocate enough memory for it to make sure our struct representation fits. v2: Rebase due to the preallocation of BDB blocks v3: Rebase due to min_size WARN relocation v4: Document BDB_LVDS_LFP_DATA vs. BDB_LVDS_LFP_DATA_PTRS order (Jani) Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220504150440.13748-4-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 43 ++++++++++++++++++- drivers/gpu/drm/i915/display/intel_vbt_defs.h | 17 ++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 4cea9c27b198..4d7953e74814 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -185,10 +185,14 @@ static const struct { .min_size = sizeof(struct bdb_edp), }, { .section_id = BDB_LVDS_OPTIONS, .min_size = sizeof(struct bdb_lvds_options), }, + /* + * BDB_LVDS_LFP_DATA depends on BDB_LVDS_LFP_DATA_PTRS, + * so keep the two ordered. + */ { .section_id = BDB_LVDS_LFP_DATA_PTRS, .min_size = sizeof(struct bdb_lvds_lfp_data_ptrs), }, { .section_id = BDB_LVDS_LFP_DATA, - .min_size = sizeof(struct bdb_lvds_lfp_data), }, + .min_size = 0, /* special case */ }, { .section_id = BDB_LVDS_BACKLIGHT, .min_size = sizeof(struct bdb_lfp_backlight_data), }, { .section_id = BDB_LFP_POWER, @@ -203,6 +207,23 @@ static const struct { .min_size = sizeof(struct bdb_generic_dtd), }, }; +static size_t lfp_data_min_size(struct drm_i915_private *i915) +{ + const struct bdb_lvds_lfp_data_ptrs *ptrs; + size_t size; + + ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); + if (!ptrs) + return 0; + + size = sizeof(struct bdb_lvds_lfp_data); + if (ptrs->panel_name.table_size) + size = max(size, ptrs->panel_name.offset + + sizeof(struct bdb_lvds_lfp_data_tail)); + + return size; +} + static bool validate_lfp_data_ptrs(const void *bdb, const struct bdb_lvds_lfp_data_ptrs *ptrs) { @@ -490,6 +511,9 @@ static void init_bdb_blocks(struct drm_i915_private *i915, enum bdb_block_id section_id = bdb_blocks[i].section_id; size_t min_size = bdb_blocks[i].min_size; + if (section_id == BDB_LVDS_LFP_DATA) + min_size = lfp_data_min_size(i915); + init_bdb_block(i915, bdb, section_id, min_size); } } @@ -560,6 +584,16 @@ get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data, return (const void *)data + ptrs->ptr[index].fp_timing.offset; } +static const struct bdb_lvds_lfp_data_tail * +get_lfp_data_tail(const struct bdb_lvds_lfp_data *data, + const struct bdb_lvds_lfp_data_ptrs *ptrs) +{ + if (ptrs->panel_name.table_size) + return (const void *)data + ptrs->panel_name.offset; + else + return NULL; +} + /* Parse general panel options */ static void parse_panel_options(struct drm_i915_private *i915) @@ -664,6 +698,7 @@ static void parse_lfp_data(struct drm_i915_private *i915) { const struct bdb_lvds_lfp_data *data; + const struct bdb_lvds_lfp_data_tail *tail; const struct bdb_lvds_lfp_data_ptrs *ptrs; ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); @@ -676,6 +711,12 @@ parse_lfp_data(struct drm_i915_private *i915) if (!i915->vbt.lfp_lvds_vbt_mode) parse_lfp_panel_dtd(i915, data, ptrs); + + tail = get_lfp_data_tail(data, ptrs); + if (!tail) + return; + + (void)tail; } static void diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index e4a11c3e3f3e..2554f4b9b4da 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -783,6 +783,23 @@ struct lvds_lfp_panel_name { u8 name[13]; } __packed; +struct lvds_lfp_black_border { + u8 top; /* 227 */ + u8 bottom; /* 227 */ + u8 left; /* 238 */ + u8 right; /* 238 */ +} __packed; + +struct bdb_lvds_lfp_data_tail { + struct lvds_lfp_panel_name panel_name[16]; /* 156-163? */ + u16 scaling_enable; /* 187 */ + u8 seamless_drrs_min_refresh_rate[16]; /* 188 */ + u8 pixel_overlap_count[16]; /* 208 */ + struct lvds_lfp_black_border black_border[16]; /* 227 */ + u16 dual_lfp_port_sync_enable; /* 231 */ + u16 gpu_dithering_for_banding_artifacts; /* 245 */ +} __packed; + /* * Block 43 - LFP Backlight Control Data Block */ From 9adf7d4186772ab9fc9fd68d03b1dd2a81a6a36b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 4 May 2022 18:04:35 +0300 Subject: [PATCH 201/219] drm/i915/bios: Document the mess around the LFP data tables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Document the fact that struct lvds_lfp_data_entry can't be used directly and instead must be accessed via the data table pointers. Also remove the bogus comment implying that there might be a variable number of panel entries in the table. There are always exactly 16. Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220504150440.13748-5-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_vbt_defs.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 2554f4b9b4da..4b98bab3b890 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -735,7 +735,7 @@ struct lvds_lfp_data_ptr { } __packed; struct bdb_lvds_lfp_data_ptrs { - u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ + u8 lvds_entries; struct lvds_lfp_data_ptr ptr[16]; struct lvds_lfp_data_ptr_table panel_name; /* 156-163? */ } __packed; @@ -769,6 +769,11 @@ struct lvds_pnp_id { u8 mfg_year; } __packed; +/* + * For reference only. fp_timing has variable size so + * the data must be accessed using the data table pointers. + * Do not use this directly! + */ struct lvds_lfp_data_entry { struct lvds_fp_timing fp_timing; struct lvds_dvo_timing dvo_timing; From 4d1b21605d3922c18ceea7fb610a0e17cf7700f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 4 May 2022 18:04:36 +0300 Subject: [PATCH 202/219] drm/i915/bios: Assume panel_type==0 if the VBT has bogus data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just assume panel_type==0 always if the VBT gives us bogus data. We actually already do this everywhere else except in parse_panel_options() since we just leave i915->vbt.panel_type zeroed. This also seems to be what Windows does. Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220504150440.13748-6-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 4d7953e74814..bbf49e98fe02 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -618,13 +618,14 @@ parse_panel_options(struct drm_i915_private *i915) } else { if (lvds_options->panel_type > 0xf) { drm_dbg_kms(&i915->drm, - "Invalid VBT panel type 0x%x\n", + "Invalid VBT panel type 0x%x, assuming 0\n", lvds_options->panel_type); - return; + panel_type = 0; + } else { + panel_type = lvds_options->panel_type; + drm_dbg_kms(&i915->drm, "Panel type: %d (VBT)\n", + panel_type); } - panel_type = lvds_options->panel_type; - drm_dbg_kms(&i915->drm, "Panel type: %d (VBT)\n", - panel_type); } i915->vbt.panel_type = panel_type; From 719f4c51e2c9198bbe1e5e194d7467ac0413cfbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 4 May 2022 18:04:37 +0300 Subject: [PATCH 203/219] drm/i915/bios: Extract get_panel_type() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pull the code to determine the panel type into its own set of sane functions. v2: rebase Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220504150440.13748-7-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 58 +++++++++++++++-------- 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index bbf49e98fe02..5248ca42e543 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -594,6 +594,44 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data, return NULL; } +static int vbt_get_panel_type(struct drm_i915_private *i915) +{ + const struct bdb_lvds_options *lvds_options; + + lvds_options = find_section(i915, BDB_LVDS_OPTIONS); + if (!lvds_options) + return -1; + + if (lvds_options->panel_type > 0xf) { + drm_dbg_kms(&i915->drm, "Invalid VBT panel type 0x%x\n", + lvds_options->panel_type); + return -1; + } + + return lvds_options->panel_type; +} + +static int get_panel_type(struct drm_i915_private *i915) +{ + int ret; + + ret = intel_opregion_get_panel_type(i915); + if (ret >= 0) { + drm_WARN_ON(&i915->drm, ret > 0xf); + drm_dbg_kms(&i915->drm, "Panel type: %d (OpRegion)\n", ret); + return ret; + } + + ret = vbt_get_panel_type(i915); + if (ret >= 0) { + drm_WARN_ON(&i915->drm, ret > 0xf); + drm_dbg_kms(&i915->drm, "Panel type: %d (VBT)\n", ret); + return ret; + } + + return 0; /* fallback */ +} + /* Parse general panel options */ static void parse_panel_options(struct drm_i915_private *i915) @@ -601,7 +639,6 @@ parse_panel_options(struct drm_i915_private *i915) const struct bdb_lvds_options *lvds_options; int panel_type; int drrs_mode; - int ret; lvds_options = find_section(i915, BDB_LVDS_OPTIONS); if (!lvds_options) @@ -609,24 +646,7 @@ parse_panel_options(struct drm_i915_private *i915) i915->vbt.lvds_dither = lvds_options->pixel_dither; - ret = intel_opregion_get_panel_type(i915); - if (ret >= 0) { - drm_WARN_ON(&i915->drm, ret > 0xf); - panel_type = ret; - drm_dbg_kms(&i915->drm, "Panel type: %d (OpRegion)\n", - panel_type); - } else { - if (lvds_options->panel_type > 0xf) { - drm_dbg_kms(&i915->drm, - "Invalid VBT panel type 0x%x, assuming 0\n", - lvds_options->panel_type); - panel_type = 0; - } else { - panel_type = lvds_options->panel_type; - drm_dbg_kms(&i915->drm, "Panel type: %d (VBT)\n", - panel_type); - } - } + panel_type = get_panel_type(i915); i915->vbt.panel_type = panel_type; From cc589f2deeb4160fb21cd74c25d6779082d9d561 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 4 May 2022 18:04:38 +0300 Subject: [PATCH 204/219] drm/i915/bios: Refactor panel_type code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the panel type code a bit more abstract along the lines of the source of the panel type. For the moment we have three classes: OpRegion, VBT, fallback. Well introduce another one shortly. We can now also print out all the different panel types, and indicate which one we ultimately selected. Could help with debugging. v2: Add .get_panel_type() vfunc (Jani) Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220504150440.13748-8-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 66 ++++++++++++++++++----- 1 file changed, 53 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 5248ca42e543..92de16fade2e 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -594,6 +594,11 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data, return NULL; } +static int opregion_get_panel_type(struct drm_i915_private *i915) +{ + return intel_opregion_get_panel_type(i915); +} + static int vbt_get_panel_type(struct drm_i915_private *i915) { const struct bdb_lvds_options *lvds_options; @@ -611,25 +616,60 @@ static int vbt_get_panel_type(struct drm_i915_private *i915) return lvds_options->panel_type; } +static int fallback_get_panel_type(struct drm_i915_private *i915) +{ + return 0; +} + +enum panel_type { + PANEL_TYPE_OPREGION, + PANEL_TYPE_VBT, + PANEL_TYPE_FALLBACK, +}; + static int get_panel_type(struct drm_i915_private *i915) { - int ret; + struct { + const char *name; + int (*get_panel_type)(struct drm_i915_private *i915); + int panel_type; + } panel_types[] = { + [PANEL_TYPE_OPREGION] = { + .name = "OpRegion", + .get_panel_type = opregion_get_panel_type, + }, + [PANEL_TYPE_VBT] = { + .name = "VBT", + .get_panel_type = vbt_get_panel_type, + }, + [PANEL_TYPE_FALLBACK] = { + .name = "fallback", + .get_panel_type = fallback_get_panel_type, + }, + }; + int i; - ret = intel_opregion_get_panel_type(i915); - if (ret >= 0) { - drm_WARN_ON(&i915->drm, ret > 0xf); - drm_dbg_kms(&i915->drm, "Panel type: %d (OpRegion)\n", ret); - return ret; + for (i = 0; i < ARRAY_SIZE(panel_types); i++) { + panel_types[i].panel_type = panel_types[i].get_panel_type(i915); + + drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf); + + if (panel_types[i].panel_type >= 0) + drm_dbg_kms(&i915->drm, "Panel type (%s): %d\n", + panel_types[i].name, panel_types[i].panel_type); } - ret = vbt_get_panel_type(i915); - if (ret >= 0) { - drm_WARN_ON(&i915->drm, ret > 0xf); - drm_dbg_kms(&i915->drm, "Panel type: %d (VBT)\n", ret); - return ret; - } + if (panel_types[PANEL_TYPE_OPREGION].panel_type >= 0) + i = PANEL_TYPE_OPREGION; + else if (panel_types[PANEL_TYPE_VBT].panel_type >= 0) + i = PANEL_TYPE_VBT; + else + i = PANEL_TYPE_FALLBACK; - return 0; /* fallback */ + drm_dbg_kms(&i915->drm, "Selected panel type (%s): %d\n", + panel_types[i].name, panel_types[i].panel_type); + + return panel_types[i].panel_type; } /* Parse general panel options */ From 790b45f1bc6736a8dd48ba5731b6871e0217311e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 4 May 2022 18:04:39 +0300 Subject: [PATCH 205/219] drm/i915/bios: Parse the seamless DRRS min refresh rate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extract the seamless DRRS min refresh rate from the VBT. v2: Do a version check Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220504150440.13748-9-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 9 ++++++++- drivers/gpu/drm/i915/i915_drv.h | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 92de16fade2e..f7f49a03a2e4 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -761,6 +761,7 @@ parse_lfp_data(struct drm_i915_private *i915) const struct bdb_lvds_lfp_data *data; const struct bdb_lvds_lfp_data_tail *tail; const struct bdb_lvds_lfp_data_ptrs *ptrs; + int panel_type = i915->vbt.panel_type; ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); if (!ptrs) @@ -777,7 +778,13 @@ parse_lfp_data(struct drm_i915_private *i915) if (!tail) return; - (void)tail; + if (i915->vbt.version >= 188) { + i915->vbt.seamless_drrs_min_refresh_rate = + tail->seamless_drrs_min_refresh_rate[panel_type]; + drm_dbg_kms(&i915->drm, + "Seamless DRRS min refresh rate: %d Hz\n", + i915->vbt.seamless_drrs_min_refresh_rate); + } } static void diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 806d893cac01..d5ba3e1af603 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -326,6 +326,7 @@ struct intel_vbt_data { bool override_afc_startup; u8 override_afc_startup_val; + u8 seamless_drrs_min_refresh_rate; enum drrs_type drrs_type; struct { From 949665a6e237a6fd49ff207e3876d71b20b7e9f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 4 May 2022 18:04:40 +0300 Subject: [PATCH 206/219] drm/i915: Respect VBT seamless DRRS min refresh rate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure our choice of downclock mode respects the VBT seameless DRRS min refresh rate limit. v2: s/vrefesh/vrefresh/ (Jani) Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220504150440.13748-10-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_panel.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 03398feb6676..d1d1b59102d6 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -75,13 +75,17 @@ const struct drm_display_mode * intel_panel_downclock_mode(struct intel_connector *connector, const struct drm_display_mode *adjusted_mode) { + struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *fixed_mode, *best_mode = NULL; - int vrefresh = drm_mode_vrefresh(adjusted_mode); + int min_vrefresh = i915->vbt.seamless_drrs_min_refresh_rate; + int max_vrefresh = drm_mode_vrefresh(adjusted_mode); /* pick the fixed_mode with the lowest refresh rate */ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) { - if (drm_mode_vrefresh(fixed_mode) < vrefresh) { - vrefresh = drm_mode_vrefresh(fixed_mode); + int vrefresh = drm_mode_vrefresh(fixed_mode); + + if (vrefresh >= min_vrefresh && vrefresh < max_vrefresh) { + max_vrefresh = vrefresh; best_mode = fixed_mode; } } From 1b93ff4d0679190e8812cd0d0b3aebfcba1ed883 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 4 May 2022 21:37:14 +0300 Subject: [PATCH 207/219] drm/i915: remove unused GEM_DEBUG_DECL() and GEM_DEBUG_BUG_ON() There are already too many choices here, take away the unused ones. Cc: Tvrtko Ursulin Signed-off-by: Jani Nikula Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20220504183716.987793-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_gem.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index d0752e5553db..b7b257f54d2e 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -54,9 +54,7 @@ struct drm_i915_private; } while(0) #define GEM_WARN_ON(expr) WARN_ON(expr) -#define GEM_DEBUG_DECL(var) var #define GEM_DEBUG_EXEC(expr) expr -#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr) #define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr) #else @@ -66,9 +64,7 @@ struct drm_i915_private; #define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) #define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); }) -#define GEM_DEBUG_DECL(var) #define GEM_DEBUG_EXEC(expr) do { } while (0) -#define GEM_DEBUG_BUG_ON(expr) #define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; }) #endif From e9794c88cd6cf4be4a79188916a75539751f532c Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 4 May 2022 21:37:15 +0300 Subject: [PATCH 208/219] drm/i915: remove single-use GEM_DEBUG_EXEC() Reduce the magic of what's going on in GEM_DEBUG_EXEC() by expanding it inline and being explicit about it. It's as single use case anyway, so the macro feels overkill. Cc: Tvrtko Ursulin Signed-off-by: Jani Nikula Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20220504183716.987793-2-jani.nikula@intel.com --- drivers/gpu/drm/i915/gt/intel_ring.c | 3 ++- drivers/gpu/drm/i915/i915_gem.h | 2 -- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index 40ffcb94e379..15ec64d881c4 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -299,7 +299,8 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) GEM_BUG_ON(ring->emit > ring->size - bytes); GEM_BUG_ON(ring->space < bytes); cs = ring->vaddr + ring->emit; - GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs))); + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + memset32(cs, POISON_INUSE, bytes / sizeof(*cs)); ring->emit += bytes; ring->space -= bytes; diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index b7b257f54d2e..a2be323a4be5 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -54,7 +54,6 @@ struct drm_i915_private; } while(0) #define GEM_WARN_ON(expr) WARN_ON(expr) -#define GEM_DEBUG_EXEC(expr) expr #define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr) #else @@ -64,7 +63,6 @@ struct drm_i915_private; #define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) #define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); }) -#define GEM_DEBUG_EXEC(expr) do { } while (0) #define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; }) #endif From ef83e1198f9f7d7db0031c839bb1112cfee45b42 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Wed, 4 May 2022 13:22:12 -0700 Subject: [PATCH 209/219] drm/i915/dmc: Load DMC on DG2 Add Support for DC states on Dg2. v2: Add dc9 as the max supported DC states and disable DC5. v3: set max_dc to 0. (Imre) Cc: Imre Deak Cc: Rodrigo Vivi Signed-off-by: Anusha Srivatsa Reviewed-by: Rodrigo Vivi (v1) Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20220504202213.740200-2-anusha.srivatsa@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 4 +++- drivers/gpu/drm/i915/display/intel_dmc.c | 10 +++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 1d9bd5808849..15b15f434fcf 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -907,7 +907,9 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, if (!HAS_DISPLAY(dev_priv)) return 0; - if (IS_DG1(dev_priv)) + if (IS_DG2(dev_priv)) + max_dc = 0; + else if (IS_DG1(dev_priv)) max_dc = 3; else if (DISPLAY_VER(dev_priv) >= 12) max_dc = 4; diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 257cf662f9f4..2f01aca4d981 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -52,6 +52,10 @@ #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE +#define DG2_DMC_PATH DMC_PATH(dg2, 2, 06) +#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 06) +MODULE_FIRMWARE(DG2_DMC_PATH); + #define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16) #define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 16) MODULE_FIRMWARE(ADLP_DMC_PATH); @@ -688,7 +692,11 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) */ intel_dmc_runtime_pm_get(dev_priv); - if (IS_ALDERLAKE_P(dev_priv)) { + if (IS_DG2(dev_priv)) { + dmc->fw_path = DG2_DMC_PATH; + dmc->required_version = DG2_DMC_VERSION_REQUIRED; + dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; + } else if (IS_ALDERLAKE_P(dev_priv)) { dmc->fw_path = ADLP_DMC_PATH; dmc->required_version = ADLP_DMC_VERSION_REQUIRED; dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; From 7ecc3cc8a7b39f08eee9aea7b718187583342a70 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 10 May 2022 14:49:57 +0300 Subject: [PATCH 210/219] drm/i915: Fix 'mixing different enum types' warnings in intel_display_power.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following sparse warnings: drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: int enum port drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: int enum port drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: unsigned int enum aux_ch drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: unsigned int enum aux_ch Fixes: 979e1b32e0e2 ("drm/i915: Sanitize the port -> DDI/AUX power domain mapping for each platform") Reported-by: Jani Nikula Cc: Jouni Högander Signed-off-by: Imre Deak Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220510114957.406070-1-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 15b15f434fcf..5eb8d63fb89e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -2430,7 +2430,7 @@ intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port) if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID) return POWER_DOMAIN_PORT_DDI_IO_A; - return domains->ddi_io + port - domains->port_start; + return domains->ddi_io + (int)(port - domains->port_start); } enum intel_display_power_domain @@ -2441,7 +2441,7 @@ intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port po if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID) return POWER_DOMAIN_PORT_DDI_LANES_A; - return domains->ddi_lanes + port - domains->port_start; + return domains->ddi_lanes + (int)(port - domains->port_start); } static const struct intel_ddi_port_domains * @@ -2467,7 +2467,7 @@ intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID) return POWER_DOMAIN_AUX_A; - return domains->aux_legacy_usbc + aux_ch - domains->aux_ch_start; + return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start); } enum intel_display_power_domain @@ -2478,5 +2478,5 @@ intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch au if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID) return POWER_DOMAIN_AUX_TBT1; - return domains->aux_tbt + aux_ch - domains->aux_ch_start; + return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start); } From 21c47196aec3a93f913a7515e1e7b30e6c54d6c6 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Tue, 10 May 2022 17:08:47 -0700 Subject: [PATCH 211/219] drm/i915/dmc: Add MMIO range restrictions Bspec has added some steps that check forDMC MMIO range before programming them v2: Fix for CI v3: move register defines to .h (Anusha) - Check MMIO restrictions per pipe - Add MMIO restricton for v1 dmc header as well (Lucas) v4: s/_PICK/_PICK_EVEN and use it only for Pipe DMC scenario. - clean up sanity check logic.(Lucas) - Add MMIO range for RKL as well.(Anusha) v5: Use DISPLAY_VER instead of per platform check (Lucas) BSpec: 49193 Cc: stable@vger.kernel.org Cc: Lucas De Marchi Signed-off-by: Anusha Srivatsa Reviewed-by: Lucas De Marchi Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20220511000847.1068302-1-anusha.srivatsa@intel.com --- drivers/gpu/drm/i915/display/intel_dmc.c | 44 +++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dmc_regs.h | 18 +++++++- 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 2f01aca4d981..34d00f5aff25 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -378,6 +378,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, } } +static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, + const u32 *mmioaddr, u32 mmio_count, + int header_ver, u8 dmc_id) +{ + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + u32 start_range, end_range; + int i; + + if (dmc_id >= DMC_FW_MAX) { + drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id); + return false; + } + + if (header_ver == 1) { + start_range = DMC_MMIO_START_RANGE; + end_range = DMC_MMIO_END_RANGE; + } else if (dmc_id == DMC_FW_MAIN) { + start_range = TGL_MAIN_MMIO_START; + end_range = TGL_MAIN_MMIO_END; + } else if (DISPLAY_VER(i915) >= 13) { + start_range = ADLP_PIPE_MMIO_START; + end_range = ADLP_PIPE_MMIO_END; + } else if (DISPLAY_VER(i915) >= 12) { + start_range = TGL_PIPE_MMIO_START(dmc_id); + end_range = TGL_PIPE_MMIO_END(dmc_id); + } else { + drm_warn(&i915->drm, "Unknown mmio range for sanity check"); + return false; + } + + for (i = 0; i < mmio_count; i++) { + if (mmioaddr[i] < start_range || mmioaddr[i] > end_range) + return false; + } + + return true; +} + static u32 parse_dmc_fw_header(struct intel_dmc *dmc, const struct intel_dmc_header_base *dmc_header, size_t rem_size, u8 dmc_id) @@ -447,6 +485,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, return 0; } + if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count, + dmc_header->header_ver, dmc_id)) { + drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n"); + return 0; + } + for (i = 0; i < mmio_count; i++) { dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); dmc_info->mmiodata[i] = mmiodata[i]; diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h index d65e698832eb..67e14eb96a7a 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h +++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h @@ -16,7 +16,23 @@ #define DMC_LAST_WRITE _MMIO(0x8F034) #define DMC_LAST_WRITE_VALUE 0xc003b400 #define DMC_MMIO_START_RANGE 0x80000 -#define DMC_MMIO_END_RANGE 0x8FFFF +#define DMC_MMIO_END_RANGE 0x8FFFF +#define DMC_V1_MMIO_START_RANGE 0x80000 +#define TGL_MAIN_MMIO_START 0x8F000 +#define TGL_MAIN_MMIO_END 0x8FFFF +#define _TGL_PIPEA_MMIO_START 0x92000 +#define _TGL_PIPEA_MMIO_END 0x93FFF +#define _TGL_PIPEB_MMIO_START 0x96000 +#define _TGL_PIPEB_MMIO_END 0x97FFF +#define ADLP_PIPE_MMIO_START 0x5F000 +#define ADLP_PIPE_MMIO_END 0x5FFFF + +#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\ + _TGL_PIPEB_MMIO_START) + +#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\ + _TGL_PIPEB_MMIO_END) + #define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030) #define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C) #define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038) From 945ae909aa76f55ac8c9e95feb3683512d39134a Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 12 May 2022 19:16:38 +0300 Subject: [PATCH 212/219] drm/i915/audio: fix audio code enable/disable pipe logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Need to use pipe_name(pipe) instead of pipe directly. Fixes: 1f31e35f2e88 ("drm/i915/audio: unify audio codec enable/disable debug logging") Cc: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220512161638.272601-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_audio.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 1c87bf66b092..f0f0dfce27ce 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -827,7 +827,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on pipe %c, %u bytes ELD\n", connector->base.id, connector->name, encoder->base.base.id, encoder->base.name, - pipe, drm_eld_size(connector->eld)); + pipe_name(pipe), drm_eld_size(connector->eld)); /* FIXME precompute the ELD in .compute_config() */ if (!connector->eld[0]) @@ -888,7 +888,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on pipe %c\n", connector->base.id, connector->name, - encoder->base.base.id, encoder->base.name, pipe); + encoder->base.base.id, encoder->base.name, pipe_name(pipe)); if (dev_priv->audio.funcs) dev_priv->audio.funcs->audio_codec_disable(encoder, From c3e57159dea473e9e138d32d08c48f3103294050 Mon Sep 17 00:00:00 2001 From: Anshuman Gupta Date: Wed, 11 May 2022 18:34:54 +0530 Subject: [PATCH 213/219] drm/i915: Use drm_dbg for rpm logging RPM suspend/resume also supported on gfx platforms which doesn't have kms support and even on platforms without any connected display panel. There is no good reason to log rpm suspend/resume debug message with drm_dbg_kms() therefore changing it to drm_dbg(). Signed-off-by: Anshuman Gupta Reviewed-by: Ashutosh Dixit Link: https://patchwork.freedesktop.org/patch/msgid/20220511130455.22028-1-anshuman.gupta@intel.com --- drivers/gpu/drm/i915/i915_driver.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 6a7f11ee0b9e..30a7f4381f4b 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -1547,7 +1547,7 @@ static int intel_runtime_suspend(struct device *kdev) if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) return -ENODEV; - drm_dbg_kms(&dev_priv->drm, "Suspending device\n"); + drm_dbg(&dev_priv->drm, "Suspending device\n"); disable_rpm_wakeref_asserts(rpm); @@ -1623,7 +1623,7 @@ static int intel_runtime_suspend(struct device *kdev) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_poll_enable(dev_priv); - drm_dbg_kms(&dev_priv->drm, "Device suspended\n"); + drm_dbg(&dev_priv->drm, "Device suspended\n"); return 0; } @@ -1637,7 +1637,7 @@ static int intel_runtime_resume(struct device *kdev) if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) return -ENODEV; - drm_dbg_kms(&dev_priv->drm, "Resuming device\n"); + drm_dbg(&dev_priv->drm, "Resuming device\n"); drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count)); disable_rpm_wakeref_asserts(rpm); @@ -1681,7 +1681,7 @@ static int intel_runtime_resume(struct device *kdev) drm_err(&dev_priv->drm, "Runtime resume failed, disabling it (%d)\n", ret); else - drm_dbg_kms(&dev_priv->drm, "Device resumed\n"); + drm_dbg(&dev_priv->drm, "Device resumed\n"); return ret; } From 057a6a1936e79c0bc9c86537fb9886ed39cd078a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jouni=20H=C3=B6gander?= Date: Fri, 13 May 2022 17:28:10 +0300 Subject: [PATCH 214/219] drm/i915/psr: Use full update In case of area calculation fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we have some corner cases where area calculation fails. For these sel fetch area calculation ends up having update area as y1 = 0, y2 = 4. Instead of these values safer option is full update. One of such for example is big fb with offset. We don't have usable offset in psr2_sel_fetch_update. Currently it's open what is the proper way to fix this corner case. Use full update for now. v2: Commit message modified v3: Print out debug info once when area calculation fails v4: Use drm_info_once v5: pipeA -> "pipe %c", pipe_name(crtc-pipe) Cc: José Roberto de Souza Cc: Mika Kahola Signed-off-by: Jouni Högander Reviewed-by: José Roberto de Souza Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20220513142811.779331-2-jouni.hogander@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 06db407e2749..fecdaaeac39e 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1685,6 +1685,7 @@ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *c int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 }; struct intel_plane_state *new_plane_state, *old_plane_state; @@ -1770,6 +1771,19 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, clip_area_update(&pipe_clip, &damaged_area); } + /* + * TODO: For now we are just using full update in case + * selective fetch area calculation fails. To optimize this we + * should identify cases where this happens and fix the area + * calculation for those. + */ + if (pipe_clip.y1 == -1) { + drm_info_once(&dev_priv->drm, + "Selective fetch area calculation failed in pipe %c\n", + pipe_name(crtc->pipe)); + full_update = true; + } + if (full_update) goto skip_sel_fetch_set_loop; From d6774b8c3c5813aa541c9148f641d3d8d4b296d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jouni=20H=C3=B6gander?= Date: Fri, 13 May 2022 17:28:11 +0300 Subject: [PATCH 215/219] drm/i915: Ensure damage clip area is within pipe area MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Current update area calculation is not handling situation where e.g. cursor plane is fully or partially outside pipe area. Fix this by checking damage area against pipe_src area using drm_rect_intersect. v2: Set x1 and x2 in damaged_area initialization v3: Move drm_rect_intersect into clip_area_update v4: draw_area -> pipe_src Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5440 Cc: José Roberto de Souza Cc: Mika Kahola Reviewed-by: José Roberto de Souza Signed-off-by: Jouni Högander Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20220513142811.779331-3-jouni.hogander@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index fecdaaeac39e..36356893c7ca 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1618,8 +1618,12 @@ exit: } static void clip_area_update(struct drm_rect *overlap_damage_area, - struct drm_rect *damage_area) + struct drm_rect *damage_area, + struct drm_rect *pipe_src) { + if (!drm_rect_intersect(damage_area, pipe_src)) + return; + if (overlap_damage_area->y1 == -1) { overlap_damage_area->y1 = damage_area->y1; overlap_damage_area->y2 = damage_area->y2; @@ -1709,7 +1713,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, */ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { - struct drm_rect src, damaged_area = { .y1 = -1 }; + struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1, + .x2 = INT_MAX }; struct drm_atomic_helper_damage_iter iter; struct drm_rect clip; @@ -1736,20 +1741,23 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, if (old_plane_state->uapi.visible) { damaged_area.y1 = old_plane_state->uapi.dst.y1; damaged_area.y2 = old_plane_state->uapi.dst.y2; - clip_area_update(&pipe_clip, &damaged_area); + clip_area_update(&pipe_clip, &damaged_area, + &crtc_state->pipe_src); } if (new_plane_state->uapi.visible) { damaged_area.y1 = new_plane_state->uapi.dst.y1; damaged_area.y2 = new_plane_state->uapi.dst.y2; - clip_area_update(&pipe_clip, &damaged_area); + clip_area_update(&pipe_clip, &damaged_area, + &crtc_state->pipe_src); } continue; } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) { /* If alpha changed mark the whole plane area as damaged */ damaged_area.y1 = new_plane_state->uapi.dst.y1; damaged_area.y2 = new_plane_state->uapi.dst.y2; - clip_area_update(&pipe_clip, &damaged_area); + clip_area_update(&pipe_clip, &damaged_area, + &crtc_state->pipe_src); continue; } @@ -1760,7 +1768,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, &new_plane_state->uapi); drm_atomic_for_each_plane_damage(&iter, &clip) { if (drm_rect_intersect(&clip, &src)) - clip_area_update(&damaged_area, &clip); + clip_area_update(&damaged_area, &clip, + &crtc_state->pipe_src); } if (damaged_area.y1 == -1) @@ -1768,7 +1777,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1; damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1; - clip_area_update(&pipe_clip, &damaged_area); + clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src); } /* From 230fb39ff7e07bd0324c87acf08dd2c9b0bbcea8 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 18 May 2022 14:33:14 +0300 Subject: [PATCH 216/219] drm/i915/reg: fix undefined behavior due to shift overflowing the constant MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use REG_GENMASK() and REG_FIELD_PREP() to avoid errors due to -fsanitize=shift. References: https://lore.kernel.org/r/20220405151517.29753-12-bp@alien8.de Reported-by: Borislav Petkov Reported-by: Ruiqi GONG Cc: Randy Dunlap Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220518113315.1305027-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c997bd604c49..9ad593810537 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7546,25 +7546,25 @@ enum skl_power_gate { #define _PORT_CLK_SEL_A 0x46100 #define _PORT_CLK_SEL_B 0x46104 #define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B) -#define PORT_CLK_SEL_LCPLL_2700 (0 << 29) -#define PORT_CLK_SEL_LCPLL_1350 (1 << 29) -#define PORT_CLK_SEL_LCPLL_810 (2 << 29) -#define PORT_CLK_SEL_SPLL (3 << 29) -#define PORT_CLK_SEL_WRPLL(pll) (((pll) + 4) << 29) -#define PORT_CLK_SEL_WRPLL1 (4 << 29) -#define PORT_CLK_SEL_WRPLL2 (5 << 29) -#define PORT_CLK_SEL_NONE (7 << 29) -#define PORT_CLK_SEL_MASK (7 << 29) +#define PORT_CLK_SEL_MASK REG_GENMASK(31, 29) +#define PORT_CLK_SEL_LCPLL_2700 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 0) +#define PORT_CLK_SEL_LCPLL_1350 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 1) +#define PORT_CLK_SEL_LCPLL_810 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 2) +#define PORT_CLK_SEL_SPLL REG_FIELD_PREP(PORT_CLK_SEL_MASK, 3) +#define PORT_CLK_SEL_WRPLL(pll) REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4 + (pll)) +#define PORT_CLK_SEL_WRPLL1 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4) +#define PORT_CLK_SEL_WRPLL2 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 5) +#define PORT_CLK_SEL_NONE REG_FIELD_PREP(PORT_CLK_SEL_MASK, 7) /* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */ #define DDI_CLK_SEL(port) PORT_CLK_SEL(port) -#define DDI_CLK_SEL_NONE (0x0 << 28) -#define DDI_CLK_SEL_MG (0x8 << 28) -#define DDI_CLK_SEL_TBT_162 (0xC << 28) -#define DDI_CLK_SEL_TBT_270 (0xD << 28) -#define DDI_CLK_SEL_TBT_540 (0xE << 28) -#define DDI_CLK_SEL_TBT_810 (0xF << 28) -#define DDI_CLK_SEL_MASK (0xF << 28) +#define DDI_CLK_SEL_MASK REG_GENMASK(31, 28) +#define DDI_CLK_SEL_NONE REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x0) +#define DDI_CLK_SEL_MG REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x8) +#define DDI_CLK_SEL_TBT_162 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xC) +#define DDI_CLK_SEL_TBT_270 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xD) +#define DDI_CLK_SEL_TBT_540 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xE) +#define DDI_CLK_SEL_TBT_810 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xF) /* Transcoder clock selection */ #define _TRANS_CLK_SEL_A 0x46140 From 85a040bc9049dd168d5e79a1fa9d2da87e6e52dc Mon Sep 17 00:00:00 2001 From: Ashutosh Dixit Date: Thu, 19 May 2022 09:57:30 +0100 Subject: [PATCH 217/219] drm/i915: Introduce has_media_ratio_mode Media ratio mode (the ability for media IP to work at a different frequency from the GT) is available for a subset of dGfx platforms supporting GuC/SLPC. Introduce 'has_media_ratio_mode' flag in intel_device_info to identify these platforms and set it for XEHPSDV and DG2/ATS-M. Signed-off-by: Ashutosh Dixit Reviewed-by: Rodrigo Vivi Reviewed-by: Andi Shyti Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20220519085732.1276255-1-tvrtko.ursulin@linux.intel.com [tursulin: fixup merge conflict] --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_pci.c | 2 ++ drivers/gpu/drm/i915/intel_device_info.h | 1 + 3 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d5ba3e1af603..8802ba39087b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1214,6 +1214,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define CCS_MASK(gt) \ ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS) +#define HAS_MEDIA_RATIO_MODE(dev_priv) (INTEL_INFO(dev_priv)->has_media_ratio_mode) + /* * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution * All later gens can run the final buffer from the ppgtt diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index c88e454a74bb..1e5c40f36798 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -1032,6 +1032,7 @@ static const struct intel_device_info xehpsdv_info = { .display = { }, .has_64k_pages = 1, .needs_compact_pt = 1, + .has_media_ratio_mode = 1, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) | @@ -1051,6 +1052,7 @@ static const struct intel_device_info xehpsdv_info = { .has_64k_pages = 1, \ .has_guc_deprivilege = 1, \ .needs_compact_pt = 1, \ + .has_media_ratio_mode = 1, \ .platform_engine_mask = \ BIT(RCS0) | BIT(BCS0) | \ BIT(VECS0) | BIT(VECS1) | \ diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 4053efaa55da..eb3a37808e1d 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -151,6 +151,7 @@ enum intel_ppgtt_type { func(has_llc); \ func(has_logical_ring_contexts); \ func(has_logical_ring_elsq); \ + func(has_media_ratio_mode); \ func(has_mslices); \ func(has_pooled_eu); \ func(has_pxp); \ From ee421bb4cb9535f44015634baad833dcc98c9062 Mon Sep 17 00:00:00 2001 From: Ashutosh Dixit Date: Thu, 19 May 2022 09:57:31 +0100 Subject: [PATCH 218/219] drm/i915/pcode: Extend pcode functions for multiple gt's Each gt contains an independent instance of pcode. Extend pcode functions to interface with pcode on different gt's. To avoid creating dependency of display functionality on intel_gt, pcode function interfaces are exposed in terms of uncore rather than intel_gt. Callers have been converted to pass in the appropritate (i915 or intel_gt) uncore to the pcode functions. v2: Expose pcode functions in terms of uncore rather than gt (Jani/Rodrigo) v3: Retain previous function names to eliminate needless #defines (Rodrigo) v4: Move out i915_pcode_init() to a separate patch (Tvrtko) Remove duplicated drm_err/drm_dbg from intel_pcode_init() (Tvrtko) Cc: Tvrtko Ursulin Cc: Jani Nikula Signed-off-by: Ashutosh Dixit Reviewed-by: Rodrigo Vivi Reviewed-by: Andi Shyti Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20220519085732.1276255-2-tvrtko.ursulin@linux.intel.com [tursulin: fixup merge conflict] --- drivers/gpu/drm/i915/display/hsw_ips.c | 4 +- drivers/gpu/drm/i915/display/intel_bw.c | 6 +- drivers/gpu/drm/i915/display/intel_cdclk.c | 16 ++--- .../drm/i915/display/intel_display_power.c | 2 +- .../i915/display/intel_display_power_well.c | 4 +- drivers/gpu/drm/i915/display/intel_hdcp.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c | 4 +- drivers/gpu/drm/i915/gt/intel_llc.c | 3 +- drivers/gpu/drm/i915/gt/intel_rc6.c | 4 +- drivers/gpu/drm/i915/gt/intel_rps.c | 5 +- drivers/gpu/drm/i915/gt/selftest_llc.c | 2 +- drivers/gpu/drm/i915/gt/selftest_rps.c | 2 +- drivers/gpu/drm/i915/i915_driver.c | 4 +- drivers/gpu/drm/i915/intel_dram.c | 2 +- drivers/gpu/drm/i915/intel_pcode.c | 69 ++++++++----------- drivers/gpu/drm/i915/intel_pcode.h | 14 ++-- drivers/gpu/drm/i915/intel_pm.c | 10 +-- 17 files changed, 71 insertions(+), 82 deletions(-) diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c index 38014e0cc9ad..861dcd2eb890 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.c +++ b/drivers/gpu/drm/i915/display/hsw_ips.c @@ -28,7 +28,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) if (IS_BROADWELL(i915)) { drm_WARN_ON(&i915->drm, - snb_pcode_write(i915, DISPLAY_IPS_CONTROL, + snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, IPS_ENABLE | IPS_PCODE_CONTROL)); /* * Quoting Art Runyan: "its not safe to expect any particular @@ -62,7 +62,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state) if (IS_BROADWELL(i915)) { drm_WARN_ON(&i915->drm, - snb_pcode_write(i915, DISPLAY_IPS_CONTROL, 0)); + snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0)); /* * Wait for PCODE to finish disabling IPS. The BSpec specified * 42ms timeout value leads to occasional timeouts so use 100ms diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 37bd7b17f3d0..79269d2c476b 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -78,7 +78,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, u16 dclk; int ret; - ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point), &val, &val2); if (ret) @@ -104,7 +104,7 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv, int ret; int i; - ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL); if (ret) return ret; @@ -123,7 +123,7 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, int ret; /* bspec says to keep retrying for at least 1 ms */ - ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG, + ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG, points_mask, ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK, ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE, diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index b2017d8161b4..6e80162632dd 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -800,7 +800,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, "trying to change cdclk frequency with cdclk not enabled\n")) return; - ret = snb_pcode_write(dev_priv, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); + ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); if (ret) { drm_err(&dev_priv->drm, "failed to inform pcode about cdclk change\n"); @@ -828,7 +828,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n"); - snb_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, + snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ, cdclk_config->voltage_level); intel_de_write(dev_priv, CDCLK_FREQ, @@ -1086,7 +1086,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, drm_WARN_ON_ONCE(&dev_priv->drm, IS_SKYLAKE(dev_priv) && vco == 8640000); - ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, + ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); @@ -1132,7 +1132,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, intel_de_posting_read(dev_priv, CDCLK_CTL); /* inform PCU of the change */ - snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, + snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, cdclk_config->voltage_level); intel_update_cdclk(dev_priv); @@ -1702,7 +1702,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, /* Inform power controller of upcoming frequency change. */ if (DISPLAY_VER(dev_priv) >= 11) - ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, + ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); @@ -1711,7 +1711,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * BSpec requires us to wait up to 150usec, but that leads to * timeouts; the 2ms used here is based on experiment. */ - ret = snb_pcode_write_timeout(dev_priv, + ret = snb_pcode_write_timeout(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ, 0x80000000, 150, 2); if (ret) { @@ -1774,7 +1774,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); if (DISPLAY_VER(dev_priv) >= 11) { - ret = snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, + ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, cdclk_config->voltage_level); } else { /* @@ -1783,7 +1783,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * FIXME: Waiting for the request completion could be delayed * until the next PCODE request based on BSpec. */ - ret = snb_pcode_write_timeout(dev_priv, + ret = snb_pcode_write_timeout(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ, cdclk_config->voltage_level, 150, 2); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 5eb8d63fb89e..fb17439bd4f8 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -1196,7 +1196,7 @@ static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) { if (IS_HASWELL(dev_priv)) { - if (snb_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) + if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val)) drm_dbg_kms(&dev_priv->drm, "Failed to write to D_COMP\n"); } else { diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 5be18eb94042..91cfd5890f46 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -474,7 +474,7 @@ static void icl_tc_cold_exit(struct drm_i915_private *i915) int ret, tries = 0; while (1) { - ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0, + ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0, 250, 1); if (ret != -EAGAIN || ++tries == 3) break; @@ -1739,7 +1739,7 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block) * Spec states that we should timeout the request after 200us * but the function below will timeout after 500us */ - ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val); + ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val); if (ret == 0) { if (block && (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 4de4c174a987..ed5dab8427d6 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -298,7 +298,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) * Mailbox interface. */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) { - ret = snb_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1); + ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1); if (ret) { drm_err(&dev_priv->drm, "Failed to initiate HDCP key load (%d)\n", diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 437e96bb3b93..b3fa6607da7e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -138,7 +138,7 @@ static int gen6_drpc(struct seq_file *m) } if (GRAPHICS_VER(i915) <= 7) - snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); + snb_pcode_read(gt->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); seq_printf(m, "RC1e Enabled: %s\n", str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); @@ -559,7 +559,7 @@ static int llc_show(struct seq_file *m, void *data) wakeref = intel_runtime_pm_get(gt->uncore->rpm); for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { ia_freq = gpu_freq; - snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE, + snb_pcode_read(gt->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE, &ia_freq, NULL); seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", intel_gpu_freq(rps, diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c index 40e2e28ee6c7..14fe65812e42 100644 --- a/drivers/gpu/drm/i915/gt/intel_llc.c +++ b/drivers/gpu/drm/i915/gt/intel_llc.c @@ -124,7 +124,6 @@ static void calc_ia_freq(struct intel_llc *llc, static void gen6_update_ring_freq(struct intel_llc *llc) { - struct drm_i915_private *i915 = llc_to_gt(llc)->i915; struct ia_constants consts; unsigned int gpu_freq; @@ -142,7 +141,7 @@ static void gen6_update_ring_freq(struct intel_llc *llc) unsigned int ia_freq, ring_freq; calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq); - snb_pcode_write(i915, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, + snb_pcode_write(llc_to_gt(llc)->uncore, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | gpu_freq); diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 63db136cbc27..cc8bff220abc 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -271,7 +271,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6) GEN6_RC_CTL_HW_ENABLE; rc6vids = 0; - ret = snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); + ret = snb_pcode_read(rc6_to_gt(rc6)->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); if (GRAPHICS_VER(i915) == 6 && ret) { drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n"); } else if (GRAPHICS_VER(i915) == 6 && @@ -281,7 +281,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6) GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); rc6vids &= 0xffff00; rc6vids |= GEN6_ENCODE_RC6_VID(450); - ret = snb_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); + ret = snb_pcode_write(rc6_to_gt(rc6)->uncore, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); if (ret) drm_err(&i915->drm, "Couldn't fix incorrect rc6 voltage\n"); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index a9c13b1a3018..ce61ceb07114 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -1096,7 +1096,8 @@ static void gen6_rps_init(struct intel_rps *rps) IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { u32 ddcc_status = 0; - if (snb_pcode_read(i915, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, + if (snb_pcode_read(rps_to_gt(rps)->uncore, + HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, &ddcc_status, NULL) == 0) rps->efficient_freq = clamp_t(u8, @@ -1947,7 +1948,7 @@ void intel_rps_init(struct intel_rps *rps) if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { u32 params = 0; - snb_pcode_read(i915, GEN6_READ_OC_PARAMS, ¶ms, NULL); + snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_READ_OC_PARAMS, ¶ms, NULL); if (params & BIT(31)) { /* OC supported */ drm_dbg(&i915->drm, "Overclocking supported, max: %dMHz, overclock: %dMHz\n", diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c index 2cd184ab32b1..cfd736d88939 100644 --- a/drivers/gpu/drm/i915/gt/selftest_llc.c +++ b/drivers/gpu/drm/i915/gt/selftest_llc.c @@ -31,7 +31,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc) calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq); val = gpu_freq; - if (snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE, + if (snb_pcode_read(llc_to_gt(llc)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE, &val, NULL)) { pr_err("Failed to read freq table[%d], range [%d, %d]\n", gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq); diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 6a69ac0184ad..cfb4708dd62e 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -521,7 +521,7 @@ static void show_pcu_config(struct intel_rps *rps) for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { int ia_freq = gpu_freq; - snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE, + snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE, &ia_freq, NULL); pr_info("%5d %5d %5d\n", diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 30a7f4381f4b..419e237f9ea4 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -634,7 +634,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) intel_opregion_setup(dev_priv); - ret = intel_pcode_init(dev_priv); + ret = intel_pcode_init(&dev_priv->uncore); if (ret) goto err_msi; @@ -1249,7 +1249,7 @@ static int i915_drm_resume(struct drm_device *dev) disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - ret = intel_pcode_init(dev_priv); + ret = intel_pcode_init(&dev_priv->uncore); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c index 2b9e7833da96..437447119770 100644 --- a/drivers/gpu/drm/i915/intel_dram.c +++ b/drivers/gpu/drm/i915/intel_dram.c @@ -393,7 +393,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv) u32 val = 0; int ret; - ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c index ac727546868e..2be700932322 100644 --- a/drivers/gpu/drm/i915/intel_pcode.c +++ b/drivers/gpu/drm/i915/intel_pcode.c @@ -52,14 +52,12 @@ static int gen7_check_mailbox_status(u32 mbox) } } -static int __snb_pcode_rw(struct drm_i915_private *i915, u32 mbox, +static int __snb_pcode_rw(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1, int fast_timeout_us, int slow_timeout_ms, bool is_read) { - struct intel_uncore *uncore = &i915->uncore; - - lockdep_assert_held(&i915->sb_lock); + lockdep_assert_held(&uncore->i915->sb_lock); /* * GEN6_PCODE_* are outside of the forcewake domain, we can use @@ -88,22 +86,22 @@ static int __snb_pcode_rw(struct drm_i915_private *i915, u32 mbox, if (is_read && val1) *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1); - if (GRAPHICS_VER(i915) > 6) + if (GRAPHICS_VER(uncore->i915) > 6) return gen7_check_mailbox_status(mbox); else return gen6_check_mailbox_status(mbox); } -int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1) +int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1) { int err; - mutex_lock(&i915->sb_lock); - err = __snb_pcode_rw(i915, mbox, val, val1, 500, 20, true); - mutex_unlock(&i915->sb_lock); + mutex_lock(&uncore->i915->sb_lock); + err = __snb_pcode_rw(uncore, mbox, val, val1, 500, 20, true); + mutex_unlock(&uncore->i915->sb_lock); if (err) { - drm_dbg(&i915->drm, + drm_dbg(&uncore->i915->drm, "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n", mbox, __builtin_return_address(0), err); } @@ -111,18 +109,18 @@ int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1) return err; } -int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val, +int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val, int fast_timeout_us, int slow_timeout_ms) { int err; - mutex_lock(&i915->sb_lock); - err = __snb_pcode_rw(i915, mbox, &val, NULL, + mutex_lock(&uncore->i915->sb_lock); + err = __snb_pcode_rw(uncore, mbox, &val, NULL, fast_timeout_us, slow_timeout_ms, false); - mutex_unlock(&i915->sb_lock); + mutex_unlock(&uncore->i915->sb_lock); if (err) { - drm_dbg(&i915->drm, + drm_dbg(&uncore->i915->drm, "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n", val, mbox, __builtin_return_address(0), err); } @@ -130,18 +128,18 @@ int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val, return err; } -static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox, +static bool skl_pcode_try_request(struct intel_uncore *uncore, u32 mbox, u32 request, u32 reply_mask, u32 reply, u32 *status) { - *status = __snb_pcode_rw(i915, mbox, &request, NULL, 500, 0, true); + *status = __snb_pcode_rw(uncore, mbox, &request, NULL, 500, 0, true); return (*status == 0) && ((request & reply_mask) == reply); } /** * skl_pcode_request - send PCODE request until acknowledgment - * @i915: device private + * @uncore: uncore * @mbox: PCODE mailbox ID the request is targeted for * @request: request ID * @reply_mask: mask used to check for request acknowledgment @@ -158,16 +156,16 @@ static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox, * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some * other error as reported by PCODE. */ -int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, +int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request, u32 reply_mask, u32 reply, int timeout_base_ms) { u32 status; int ret; - mutex_lock(&i915->sb_lock); + mutex_lock(&uncore->i915->sb_lock); #define COND \ - skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status) + skl_pcode_try_request(uncore, mbox, request, reply_mask, reply, &status) /* * Prime the PCODE by doing a request first. Normally it guarantees @@ -193,35 +191,26 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, * requests, and for any quirks of the PCODE firmware that delays * the request completion. */ - drm_dbg_kms(&i915->drm, + drm_dbg_kms(&uncore->i915->drm, "PCODE timeout, retrying with preemption disabled\n"); - drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3); + drm_WARN_ON_ONCE(&uncore->i915->drm, timeout_base_ms > 3); preempt_disable(); ret = wait_for_atomic(COND, 50); preempt_enable(); out: - mutex_unlock(&i915->sb_lock); + mutex_unlock(&uncore->i915->sb_lock); return status ? status : ret; #undef COND } -int intel_pcode_init(struct drm_i915_private *i915) +int intel_pcode_init(struct intel_uncore *uncore) { - int ret = 0; + if (!IS_DGFX(uncore->i915)) + return 0; - if (!IS_DGFX(i915)) - return ret; - - ret = skl_pcode_request(i915, DG1_PCODE_STATUS, - DG1_UNCORE_GET_INIT_STATUS, - DG1_UNCORE_INIT_STATUS_COMPLETE, - DG1_UNCORE_INIT_STATUS_COMPLETE, 180000); - - drm_dbg(&i915->drm, "PCODE init status %d\n", ret); - - if (ret) - drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n"); - - return ret; + return skl_pcode_request(uncore, DG1_PCODE_STATUS, + DG1_UNCORE_GET_INIT_STATUS, + DG1_UNCORE_INIT_STATUS_COMPLETE, + DG1_UNCORE_INIT_STATUS_COMPLETE, 180000); } diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h index 0962a17fac48..8f6241b114a5 100644 --- a/drivers/gpu/drm/i915/intel_pcode.h +++ b/drivers/gpu/drm/i915/intel_pcode.h @@ -8,17 +8,17 @@ #include -struct drm_i915_private; +struct intel_uncore; -int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1); -int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val, +int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1); +int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val, int fast_timeout_us, int slow_timeout_ms); -#define snb_pcode_write(i915, mbox, val) \ - snb_pcode_write_timeout(i915, mbox, val, 500, 0) +#define snb_pcode_write(uncore, mbox, val) \ + snb_pcode_write_timeout(uncore, mbox, val, 500, 0) -int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, +int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request, u32 reply_mask, u32 reply, int timeout_base_ms); -int intel_pcode_init(struct drm_i915_private *i915); +int intel_pcode_init(struct intel_uncore *uncore); #endif /* _INTEL_PCODE_H */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ee0047fdc95d..aacb21cbc62e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2874,7 +2874,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, /* read the first set of memory latencies[0:3] */ val = 0; /* data0 to be programmed to 0 for first set */ - ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY, + ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); if (ret) { @@ -2893,7 +2893,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, /* read the second set of memory latencies[4:7] */ val = 1; /* data0 to be programmed to 1 for second set */ - ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY, + ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); if (ret) { drm_err(&dev_priv->drm, @@ -3679,7 +3679,7 @@ intel_sagv_block_time(struct drm_i915_private *dev_priv) u32 val = 0; int ret; - ret = snb_pcode_read(dev_priv, + ret = snb_pcode_read(&dev_priv->uncore, GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, &val, NULL); if (ret) { @@ -3748,7 +3748,7 @@ static void skl_sagv_enable(struct drm_i915_private *dev_priv) return; drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n"); - ret = snb_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, + ret = snb_pcode_write(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_ENABLE); /* We don't need to wait for SAGV when enabling */ @@ -3781,7 +3781,7 @@ static void skl_sagv_disable(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n"); /* bspec says to keep retrying for at least 1 ms */ - ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, + ret = skl_pcode_request(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_DISABLE, GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1); From 5f38c3fb55ce3814b4353320d7a205068a420e48 Mon Sep 17 00:00:00 2001 From: Dale B Stimson Date: Thu, 19 May 2022 09:57:32 +0100 Subject: [PATCH 219/219] drm/i915/pcode: Add a couple of pcode helpers Some dGfx pcode commands take additional sub-commands and parameters. Add a couple of helpers to help formatting these commands to improve code readability. v2: Fixed commit author (Rodrigo) v3: Function rename and convert to new uncore interface for pcode functions Remove unnecessary #define's (Andi) v4: Another function rename Cc: Tvrtko Ursulin Signed-off-by: Dale B Stimson Signed-off-by: Ashutosh Dixit Reviewed-by: Rodrigo Vivi Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20220519085732.1276255-3-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_reg.h | 3 +++ drivers/gpu/drm/i915/intel_pcode.c | 32 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_pcode.h | 6 ++++++ 3 files changed, 41 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9ad593810537..5d2ab9f66294 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6674,6 +6674,9 @@ #define GEN6_PCODE_MAILBOX _MMIO(0x138124) #define GEN6_PCODE_READY (1 << 31) +#define GEN6_PCODE_MB_PARAM2 REG_GENMASK(23, 16) +#define GEN6_PCODE_MB_PARAM1 REG_GENMASK(15, 8) +#define GEN6_PCODE_MB_COMMAND REG_GENMASK(7, 0) #define GEN6_PCODE_ERROR_MASK 0xFF #define GEN6_PCODE_SUCCESS 0x0 #define GEN6_PCODE_ILLEGAL_CMD 0x1 diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c index 2be700932322..a234d9b4ed14 100644 --- a/drivers/gpu/drm/i915/intel_pcode.c +++ b/drivers/gpu/drm/i915/intel_pcode.c @@ -214,3 +214,35 @@ int intel_pcode_init(struct intel_uncore *uncore) DG1_UNCORE_INIT_STATUS_COMPLETE, DG1_UNCORE_INIT_STATUS_COMPLETE, 180000); } + +int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val) +{ + intel_wakeref_t wakeref; + u32 mbox; + int err; + + mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd) + | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1) + | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2); + + with_intel_runtime_pm(uncore->rpm, wakeref) + err = snb_pcode_read(uncore, mbox, val, NULL); + + return err; +} + +int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val) +{ + intel_wakeref_t wakeref; + u32 mbox; + int err; + + mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd) + | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1) + | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2); + + with_intel_runtime_pm(uncore->rpm, wakeref) + err = snb_pcode_write(uncore, mbox, val); + + return err; +} diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h index 8f6241b114a5..8d2198e29422 100644 --- a/drivers/gpu/drm/i915/intel_pcode.h +++ b/drivers/gpu/drm/i915/intel_pcode.h @@ -21,4 +21,10 @@ int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request, int intel_pcode_init(struct intel_uncore *uncore); +/* + * Helpers for dGfx PCODE mailbox command formatting + */ +int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val); +int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val); + #endif /* _INTEL_PCODE_H */