From 07f2fee91937283fddebd4b2f666da024738e84c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 4 Apr 2020 11:40:58 +0200 Subject: [PATCH 001/121] i915/gvt: remove unused xen bits No Xen support anywhere here. Remove a dead declaration and an unused include. Signed-off-by: Christoph Hellwig Acked-by: Zhenyu Wang Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20200404094101.672954-4-hch@lst.de --- drivers/gpu/drm/i915/gvt/gvt.c | 1 - drivers/gpu/drm/i915/gvt/hypercall.h | 2 -- 2 files changed, 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index a6c4fcefa83b..ed7be1b39a06 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -31,7 +31,6 @@ */ #include -#include #include #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index b17c4a1599cd..b79da5124f83 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -79,6 +79,4 @@ struct intel_gvt_mpt { bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn); }; -extern struct intel_gvt_mpt xengt_mpt; - #endif /* _GVT_HYPERCALL_H_ */ From 6c2f73e26a253ae827d9754572bfee4a912e559c Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Tue, 14 Apr 2020 23:57:28 -0400 Subject: [PATCH 002/121] drm/i915/gvt: access shadow ctx via its virtual address directly as shadow context is pinned in intel_vgpu_setup_submission() and unpinned in intel_vgpu_clean_submission(), its base virtual address of is safely obtained from lrc_reg_state. no need to call kmap()/kunmap() repeatedly. Signed-off-by: Yan Zhao Reviewed-by: Zhenyu Wang Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20200415035728.26424-1-yan.y.zhao@intel.com --- drivers/gpu/drm/i915/gvt/scheduler.c | 35 ++++++++++++++-------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index e1e6345700cc..4639a56f9a3c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -128,16 +128,19 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_gem_object *ctx_obj = - workload->req->context->state->obj; + struct intel_context *ctx = workload->req->context; struct execlist_ring_context *shadow_ring_context; - struct page *page; void *dst; + void *context_base; unsigned long context_gpa, context_page_num; int i; - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); - shadow_ring_context = kmap(page); + GEM_BUG_ON(!intel_context_is_pinned(ctx)); + + context_base = (void *) ctx->lrc_reg_state - + (LRC_STATE_PN << I915_GTT_PAGE_SHIFT); + + shadow_ring_context = (void *) ctx->lrc_reg_state; sr_oa_regs(workload, (u32 *)shadow_ring_context, true); #define COPY_REG(name) \ @@ -169,7 +172,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); sr_oa_regs(workload, (u32 *)shadow_ring_context, false); - kunmap(page); if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val)) return 0; @@ -194,11 +196,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return -EFAULT; } - page = i915_gem_object_get_page(ctx_obj, i); - dst = kmap(page); + dst = context_base + (i << I915_GTT_PAGE_SHIFT); intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, I915_GTT_PAGE_SIZE); - kunmap(page); i++; } return 0; @@ -784,9 +784,9 @@ static void update_guest_context(struct intel_vgpu_workload *workload) { struct i915_request *rq = workload->req; struct intel_vgpu *vgpu = workload->vgpu; - struct drm_i915_gem_object *ctx_obj = rq->context->state->obj; struct execlist_ring_context *shadow_ring_context; - struct page *page; + struct intel_context *ctx = workload->req->context; + void *context_base; void *src; unsigned long context_gpa, context_page_num; int i; @@ -797,6 +797,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload) gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id, workload->ctx_desc.lrca); + GEM_BUG_ON(!intel_context_is_pinned(ctx)); + head = workload->rb_head; tail = workload->rb_tail; wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF; @@ -821,6 +823,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload) context_page_num = 19; i = 2; + context_base = (void *) ctx->lrc_reg_state - + (LRC_STATE_PN << I915_GTT_PAGE_SHIFT); while (i < context_page_num) { context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, @@ -831,19 +835,16 @@ static void update_guest_context(struct intel_vgpu_workload *workload) return; } - page = i915_gem_object_get_page(ctx_obj, i); - src = kmap(page); + src = context_base + (i << I915_GTT_PAGE_SHIFT); intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, I915_GTT_PAGE_SIZE); - kunmap(page); i++; } intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); - shadow_ring_context = kmap(page); + shadow_ring_context = (void *) ctx->lrc_reg_state; #define COPY_REG(name) \ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ @@ -860,8 +861,6 @@ static void update_guest_context(struct intel_vgpu_workload *workload) (void *)shadow_ring_context + sizeof(*shadow_ring_context), I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); - - kunmap(page); } void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, From e5e113079efdffb9a39e16a88d109c3d47efdfcc Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Tue, 14 Apr 2020 23:58:27 -0400 Subject: [PATCH 003/121] drm/i915/gvt: combine access to consecutive guest context pages IOVA(GPA)s of context pages are checked and if they are consecutive, read/write them together in one intel_gvt_hypervisor_read_gpa() / intel_gvt_hypervisor_write_gpa(). Signed-off-by: Yan Zhao Reviewed-by: Zhenyu Wang Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20200415035827.26476-1-yan.y.zhao@intel.com --- drivers/gpu/drm/i915/gvt/scheduler.c | 58 +++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 4639a56f9a3c..f650ad3367b6 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -133,6 +133,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) void *dst; void *context_base; unsigned long context_gpa, context_page_num; + unsigned long gpa_base; /* first gpa of consecutive GPAs */ + unsigned long gpa_size; /* size of consecutive GPAs */ int i; GEM_BUG_ON(!intel_context_is_pinned(ctx)); @@ -186,8 +188,11 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0) context_page_num = 19; - i = 2; - while (i < context_page_num) { + /* find consecutive GPAs from gma until the first inconsecutive GPA. + * read from the continuous GPAs into dst virtual address + */ + gpa_size = 0; + for (i = 2; i < context_page_num; i++) { context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, (u32)((workload->ctx_desc.lrca + i) << I915_GTT_PAGE_SHIFT)); @@ -196,10 +201,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return -EFAULT; } + if (gpa_size == 0) { + gpa_base = context_gpa; + dst = context_base + (i << I915_GTT_PAGE_SHIFT); + } else if (context_gpa != gpa_base + gpa_size) + goto read; + + gpa_size += I915_GTT_PAGE_SIZE; + + if (i == context_page_num - 1) + goto read; + + continue; + +read: + intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size); + gpa_base = context_gpa; + gpa_size = I915_GTT_PAGE_SIZE; dst = context_base + (i << I915_GTT_PAGE_SHIFT); - intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, - I915_GTT_PAGE_SIZE); - i++; } return 0; } @@ -789,6 +808,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload) void *context_base; void *src; unsigned long context_gpa, context_page_num; + unsigned long gpa_base; /* first gpa of consecutive GPAs */ + unsigned long gpa_size; /* size of consecutive GPAs*/ int i; u32 ring_base; u32 head, tail; @@ -822,11 +843,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload) if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0) context_page_num = 19; - i = 2; context_base = (void *) ctx->lrc_reg_state - (LRC_STATE_PN << I915_GTT_PAGE_SHIFT); - while (i < context_page_num) { + /* find consecutive GPAs from gma until the first inconsecutive GPA. + * write to the consecutive GPAs from src virtual address + */ + gpa_size = 0; + for (i = 2; i < context_page_num; i++) { context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, (u32)((workload->ctx_desc.lrca + i) << I915_GTT_PAGE_SHIFT)); @@ -835,10 +859,24 @@ static void update_guest_context(struct intel_vgpu_workload *workload) return; } + if (gpa_size == 0) { + gpa_base = context_gpa; + src = context_base + (i << I915_GTT_PAGE_SHIFT); + } else if (context_gpa != gpa_base + gpa_size) + goto write; + + gpa_size += I915_GTT_PAGE_SIZE; + + if (i == context_page_num - 1) + goto write; + + continue; + +write: + intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size); + gpa_base = context_gpa; + gpa_size = I915_GTT_PAGE_SIZE; src = context_base + (i << I915_GTT_PAGE_SHIFT); - intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, - I915_GTT_PAGE_SIZE); - i++; } intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + From fc4a8c16e34b2b0a8bd100e1b52dfbba5a8dd981 Mon Sep 17 00:00:00 2001 From: Anshuman Gupta Date: Wed, 15 Apr 2020 22:35:52 +0530 Subject: [PATCH 004/121] drm/i915: Power well id for ICL PG3 Gen11 onwards PG3 is contains functions for pipe B, external displays, and VGA. It make sense to add a power well id with name ICL_DISP_PW_3 rather then TGL_DISP_PW_3, Also PG3 power well id requires to know if lpsp is enabled. Reviewed-by: Animesh Manna Signed-off-by: Anshuman Gupta Signed-off-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20200415170555.15531-2-anshuman.gupta@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 6 +++--- drivers/gpu/drm/i915/display/intel_display_power.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 03bdde19c8c9..1d01c79fb9db 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -943,7 +943,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) /* Power wells at this level and above must be disabled for DC5 entry */ if (INTEL_GEN(dev_priv) >= 12) - high_pg = TGL_DISP_PW_3; + high_pg = ICL_DISP_PW_3; else high_pg = SKL_DISP_PW_2; @@ -3571,7 +3571,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "power well 3", .domains = ICL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, + .id = ICL_DISP_PW_3, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_PW_3, @@ -3949,7 +3949,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .name = "power well 3", .domains = TGL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = TGL_DISP_PW_3, + .id = ICL_DISP_PW_3, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_PW_3, diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index da64a5edae7a..56cbae6327b7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -100,7 +100,7 @@ enum i915_power_well_id { SKL_DISP_PW_MISC_IO, SKL_DISP_PW_1, SKL_DISP_PW_2, - TGL_DISP_PW_3, + ICL_DISP_PW_3, SKL_DISP_DC_OFF, }; From 8806211fe7b30696c1fcae54b73c94abfdf55893 Mon Sep 17 00:00:00 2001 From: Anshuman Gupta Date: Wed, 15 Apr 2020 22:35:53 +0530 Subject: [PATCH 005/121] drm/i915: Add i915_lpsp_capability debugfs New i915_pm_lpsp igt solution approach relies on connector specific debugfs attribute i915_lpsp_capability, it exposes whether an output is capable of driving lpsp. v2: - CI fixup. v3: - register i915_lpsp_info only for supported connector. [Jani] - use intel_display_power_well_is_enabled() instead of looking inside power_well count. [Jani] - fixes the lpsp capable conditional logic. [Jani] - combined the lpsp capable and enable info. [Jani] v4: - Separate out connector based debugfs i915_lpsp_capability lpsp enable status would be exposes by different entry. [Animesh] v5: - Add Platform Gen condition to add i915_lpsp_capability and some cosmetic nitpick changes. [Animesh] Reviewed-by: Animesh Manna Signed-off-by: Anshuman Gupta Signed-off-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20200415170555.15531-3-anshuman.gupta@intel.com --- .../drm/i915/display/intel_display_debugfs.c | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index ab20b7ea26f7..69be0e05e97f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -1997,6 +1997,48 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) } DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); +#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \ + seq_puts(m, "LPSP: incapable\n")) + +static int i915_lpsp_capability_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct intel_encoder *encoder = + intel_attached_encoder(to_intel_connector(connector)); + struct drm_i915_private *i915 = to_i915(connector->dev); + + if (connector->status != connector_status_connected) + return -ENODEV; + + switch (INTEL_GEN(i915)) { + case 12: + /* + * Actually TGL can drive LPSP on port till DDI_C + * but there is no physical connected DDI_C on TGL sku's, + * even driver is not initilizing DDI_C port for gen12. + */ + LPSP_CAPABLE(encoder->port <= PORT_B); + break; + case 11: + LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI || + connector->connector_type == DRM_MODE_CONNECTOR_eDP); + break; + case 10: + case 9: + LPSP_CAPABLE(encoder->port == PORT_A && + (connector->connector_type == DRM_MODE_CONNECTOR_DSI || + connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)); + break; + default: + if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability); + static int i915_dsc_fec_support_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; @@ -2140,5 +2182,16 @@ int intel_connector_debugfs_add(struct drm_connector *connector) debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root, connector, &i915_dsc_fec_support_fops); + /* Legacy panels doesn't lpsp on any platform */ + if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) || + IS_BROADWELL(dev_priv)) && + (connector->connector_type == DRM_MODE_CONNECTOR_DSI || + connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) + debugfs_create_file("i915_lpsp_capability", 0444, root, + connector, &i915_lpsp_capability_fops); + return 0; } From 76a23f06011dcc19c6769a45f2c788da70a93130 Mon Sep 17 00:00:00 2001 From: Anshuman Gupta Date: Wed, 15 Apr 2020 22:35:54 +0530 Subject: [PATCH 006/121] drm/i915: Add connector dbgfs for all connectors Add connector debugfs attributes for each intel connector which is getting register. v2: - adding connector debugfs for each connector in intel_connector_register() to fix CI failure for legacy connectors. Reviewed-by: Jani Nikula Signed-off-by: Anshuman Gupta Signed-off-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20200415170555.15531-4-anshuman.gupta@intel.com --- drivers/gpu/drm/i915/display/intel_connector.c | 3 +++ drivers/gpu/drm/i915/display/intel_dp.c | 3 --- drivers/gpu/drm/i915/display/intel_hdmi.c | 3 --- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 98ec2ea86c7c..406e96785c76 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -33,6 +33,7 @@ #include "i915_drv.h" #include "intel_connector.h" +#include "intel_display_debugfs.h" #include "intel_display_types.h" #include "intel_hdcp.h" @@ -123,6 +124,8 @@ int intel_connector_register(struct drm_connector *connector) goto err_backlight; } + intel_connector_debugfs_add(connector); + return 0; err_backlight: diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index d4fcc9583869..48397b2c08cf 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -48,7 +48,6 @@ #include "intel_audio.h" #include "intel_connector.h" #include "intel_ddi.h" -#include "intel_display_debugfs.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" @@ -6451,8 +6450,6 @@ intel_dp_connector_register(struct drm_connector *connector) if (ret) return ret; - intel_connector_debugfs_add(connector); - drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", intel_dp->aux.name, connector->kdev->kobj.name); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 6b1bc955124c..9c058f7aa185 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -44,7 +44,6 @@ #include "intel_audio.h" #include "intel_connector.h" #include "intel_ddi.h" -#include "intel_display_debugfs.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dpio_phy.h" @@ -2878,8 +2877,6 @@ intel_hdmi_connector_register(struct drm_connector *connector) if (ret) return ret; - intel_connector_debugfs_add(connector); - intel_hdmi_create_i2c_symlink(connector); return ret; From 9efa0c1a500f97f17e959397f368ca252336fde1 Mon Sep 17 00:00:00 2001 From: Anshuman Gupta Date: Wed, 15 Apr 2020 22:35:55 +0530 Subject: [PATCH 007/121] drm/i915: Add i915_lpsp_status debugfs attribute It requires a separate debugfs attribute to expose lpsp status to user space, as there may be display less configuration without any valid connected output, those configuration will not be able to test lpsp status, if lpsp status exposed from a connector based debugfs attribute. Reviewed-by: Animesh Manna Signed-off-by: Anshuman Gupta Signed-off-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20200415170555.15531-5-anshuman.gupta@intel.com --- .../drm/i915/display/intel_display_debugfs.c | 47 +++++++++++++++++++ .../drm/i915/display/intel_display_power.h | 2 + 2 files changed, 49 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 69be0e05e97f..3d9dc27478b3 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -9,6 +9,7 @@ #include "i915_debugfs.h" #include "intel_csr.h" #include "intel_display_debugfs.h" +#include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_fbc.h" @@ -1149,6 +1150,51 @@ static int i915_drrs_status(struct seq_file *m, void *unused) return 0; } +#define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \ + seq_puts(m, "LPSP: disabled\n")) + +static bool +intel_lpsp_power_well_enabled(struct drm_i915_private *i915, + enum i915_power_well_id power_well_id) +{ + intel_wakeref_t wakeref; + bool is_enabled; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + is_enabled = intel_display_power_well_is_enabled(i915, + power_well_id); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + + return is_enabled; +} + +static int i915_lpsp_status(struct seq_file *m, void *unused) +{ + struct drm_i915_private *i915 = node_to_i915(m->private); + + switch (INTEL_GEN(i915)) { + case 12: + case 11: + LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3)); + break; + case 10: + case 9: + LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2)); + break; + default: + /* + * Apart from HASWELL/BROADWELL other legacy platform doesn't + * support lpsp. + */ + if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL)); + else + seq_puts(m, "LPSP: not supported\n"); + } + + return 0; +} + static int i915_dp_mst_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -1916,6 +1962,7 @@ static const struct drm_info_list intel_display_debugfs_list[] = { {"i915_dp_mst_info", i915_dp_mst_info, 0}, {"i915_ddb_info", i915_ddb_info, 0}, {"i915_drrs_status", i915_drrs_status, 0}, + {"i915_lpsp_status", i915_lpsp_status, 0}, }; static const struct { diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 56cbae6327b7..14c5ad20287f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -266,6 +266,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain); bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); +bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, + enum i915_power_well_id power_well_id); bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, From fb55c735522352704c35d899d0b253453cf0e799 Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Fri, 17 Apr 2020 05:13:34 -0400 Subject: [PATCH 008/121] drm/i915/gvt: skip populate shadow context if guest context not changed Software is not expected to populate engine context except when using restore inhibit bit or golden state to initialize it for the first time. Therefore, if a newly submitted guest context is the same as the last shadowed one, no need to populate its engine context from guest again. Currently using lrca + ring_context_gpa to identify whether two guest contexts are the same. The reason of why context id is not included as an identifier is that i915 recently changed the code and context id is only unique for a context when OA is enabled. And when OA is on, context id is generated based on lrca. Therefore, in that case, if two contexts are of the same lrca, they have identical context ids as well. (This patch also works with old guest kernel like 4.20.) for guest context, if its ggtt entry is modified after last context shadowing, it is also deemed as not the same context as last shadowed one. v7: -removed local variable "valid". use the one in s->last_ctx diretly v6: -change type of lrca of last ctx to be u32. as currently it's all protected by vgpu lock (Kevin Tian) -reset valid of last ctx to false once it needs to be repopulated before population completes successfully (Kevin Tian) v5: -merge all 3 patches into one patch (Zhenyu Wang) v4: - split the series into 3 patches. - don't turn on optimization until last patch in this series (Kevin Tian) - define lrca to be atomic in this patch rather than update its type in the second patch (Kevin Tian) v3: updated commit message to describe engine context and context id clearly (Kevin Tian) v2: rebased to 5.6.0-rc4+Signed-off-by: Yan Zhao Reviewed-by: Zhenyu Wang Reviewed-by: Kevin Tian Cc: Kevin Tian Suggested-by: Zhenyu Wang Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20200417091334.32628-1-yan.y.zhao@intel.com --- drivers/gpu/drm/i915/gvt/gtt.c | 15 ++++++++++++ drivers/gpu/drm/i915/gvt/gvt.h | 5 ++++ drivers/gpu/drm/i915/gvt/scheduler.c | 34 ++++++++++++++++++++++++---- 3 files changed, 49 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index f6f2ab2683f7..dffd4b79b9a6 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2337,12 +2337,27 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; int ret; + struct intel_vgpu_submission *s = &vgpu->submission; + struct intel_engine_cs *engine; + int i; if (bytes != 4 && bytes != 8) return -EINVAL; off -= info->gtt_start_offset; ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes); + + /* if ggtt of last submitted context is written, + * that context is probably got unpinned. + * Set last shadowed ctx to invalid. + */ + for_each_engine(engine, vgpu->gvt->gt, i) { + if (!s->last_ctx[i].valid) + continue; + + if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift)) + s->last_ctx[i].valid = false; + } return ret; } diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 58c2c7932e3f..a4a6db6b7f90 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -163,6 +163,11 @@ struct intel_vgpu_submission { const struct intel_vgpu_submission_ops *ops; int virtual_submission_interface; bool active; + struct { + u32 lrca; + bool valid; + u64 ring_context_gpa; + } last_ctx[I915_NUM_ENGINES]; }; struct intel_vgpu { diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index f650ad3367b6..92a055c9d8d8 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -135,7 +135,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) unsigned long context_gpa, context_page_num; unsigned long gpa_base; /* first gpa of consecutive GPAs */ unsigned long gpa_size; /* size of consecutive GPAs */ + struct intel_vgpu_submission *s = &vgpu->submission; int i; + bool skip = false; + int ring_id = workload->engine->id; GEM_BUG_ON(!intel_context_is_pinned(ctx)); @@ -175,13 +178,31 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) sr_oa_regs(workload, (u32 *)shadow_ring_context, false); - if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val)) + gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx", + workload->engine->name, workload->ctx_desc.lrca, + workload->ctx_desc.context_id, + workload->ring_context_gpa); + + /* only need to ensure this context is not pinned/unpinned during the + * period from last submission to this this submission. + * Upon reaching this function, the currently submitted context is not + * supposed to get unpinned. If a misbehaving guest driver ever does + * this, it would corrupt itself. + */ + if (s->last_ctx[ring_id].valid && + (s->last_ctx[ring_id].lrca == + workload->ctx_desc.lrca) && + (s->last_ctx[ring_id].ring_context_gpa == + workload->ring_context_gpa)) + skip = true; + + s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca; + s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa; + + if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip) return 0; - gvt_dbg_sched("ring %s workload lrca %x", - workload->engine->name, - workload->ctx_desc.lrca); - + s->last_ctx[ring_id].valid = false; context_page_num = workload->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; @@ -220,6 +241,7 @@ read: gpa_size = I915_GTT_PAGE_SIZE; dst = context_base + (i << I915_GTT_PAGE_SHIFT); } + s->last_ctx[ring_id].valid = true; return 0; } @@ -1296,6 +1318,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) atomic_set(&s->running_workload_num, 0); bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES); + memset(s->last_ctx, 0, sizeof(s->last_ctx)); + i915_vm_put(&ppgtt->vm); return 0; From 48b8b04c791d1e39883416fd3106cad205007298 Mon Sep 17 00:00:00 2001 From: Uma Shankar Date: Thu, 16 Apr 2020 16:24:19 +0530 Subject: [PATCH 009/121] drm/i915/display: Enable DP Display Audio WA For certain DP VDSC bpp settings, hblank asserts before hblank_early, leading to a bad audio state. Driver need to program "hblank early enable" and "samples per line" parameters in AUDIO_CONFIG_BE register. This is Display Audio WA #1406928334 for 4k+VDSC usecase applicable on DP encoders. Implemented the same. v2: Fixed build failures on 32bit machine. v3: Dropped u64, added helpers for sample room calculation, other general comments as per Jani Nikula's feedback. Also fixed connector type check (spotted by Anshuman) v4: Addressed Jani Nikula and Kai's review comments. v5: Addressed Anshuman's review comment and used crtc_* variable to get timings. v6: Dropped a redundant initialization. Reviewed-by: Anshuman Gupta Signed-off-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20200416105419.9664-1-uma.shankar@intel.com --- drivers/gpu/drm/i915/display/intel_audio.c | 146 +++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 16 +++ 2 files changed, 162 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 57b80971ae78..dc311bb227f1 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -514,6 +514,148 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, mutex_unlock(&dev_priv->av_mutex); } +/* Add a factor to take care of rounding and truncations */ +#define ROUNDING_FACTOR 10000 + +static unsigned int get_hblank_early_enable_config(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + unsigned int link_clks_available, link_clks_required; + unsigned int tu_data, tu_line, link_clks_active; + unsigned int hblank_rise, hblank_early_prog; + unsigned int h_active, h_total, hblank_delta, pixel_clk, v_total; + unsigned int fec_coeff, refresh_rate, cdclk, vdsc_bpp; + + h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay; + h_total = crtc_state->hw.adjusted_mode.crtc_htotal; + v_total = crtc_state->hw.adjusted_mode.crtc_vtotal; + pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock; + refresh_rate = crtc_state->hw.adjusted_mode.vrefresh; + vdsc_bpp = crtc_state->dsc.compressed_bpp; + cdclk = i915->cdclk.hw.cdclk; + /* fec= 0.972261, using rounding multiplier of 1000000 */ + fec_coeff = 972261; + + drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :" + "lanes = %u vdsc_bpp = %u cdclk = %u\n", + h_active, crtc_state->port_clock, crtc_state->lane_count, + vdsc_bpp, cdclk); + + link_clks_available = ((((h_total - h_active) * + ((crtc_state->port_clock * ROUNDING_FACTOR) / + pixel_clk)) / ROUNDING_FACTOR) - 28); + + link_clks_required = DIV_ROUND_UP(192000, (refresh_rate * + v_total)) * ((48 / + crtc_state->lane_count) + 2); + + if (link_clks_available > link_clks_required) + hblank_delta = 32; + else + hblank_delta = DIV_ROUND_UP(((((5 * ROUNDING_FACTOR) / + crtc_state->port_clock) + ((5 * + ROUNDING_FACTOR) / + cdclk)) * pixel_clk), + ROUNDING_FACTOR); + + tu_data = (pixel_clk * vdsc_bpp * 8) / ((crtc_state->port_clock * + crtc_state->lane_count * fec_coeff) / 1000000); + tu_line = (((h_active * crtc_state->port_clock * fec_coeff) / + 1000000) / (64 * pixel_clk)); + link_clks_active = (tu_line - 1) * 64 + tu_data; + + hblank_rise = ((link_clks_active + 6 * DIV_ROUND_UP(link_clks_active, + 250) + 4) * ((pixel_clk * ROUNDING_FACTOR) / + crtc_state->port_clock)) / ROUNDING_FACTOR; + + hblank_early_prog = h_active - hblank_rise + hblank_delta; + + return hblank_early_prog; +} + +static unsigned int get_sample_room_req_config(const struct intel_crtc_state *crtc_state) +{ + unsigned int h_active, h_total, pixel_clk; + unsigned int samples_room; + + h_active = crtc_state->hw.adjusted_mode.hdisplay; + h_total = crtc_state->hw.adjusted_mode.htotal; + pixel_clk = crtc_state->hw.adjusted_mode.clock; + + samples_room = ((((h_total - h_active) * ((crtc_state->port_clock * + ROUNDING_FACTOR) / pixel_clk)) / + ROUNDING_FACTOR) - 12) / ((48 / + crtc_state->lane_count) + 2); + + return samples_room; +} + +static void enable_audio_dsc_wa(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum pipe pipe = crtc->pipe; + unsigned int hblank_early_prog, samples_room, h_active; + unsigned int val; + + if (INTEL_GEN(i915) < 11) + return; + + h_active = crtc_state->hw.adjusted_mode.hdisplay; + + if (!(h_active && crtc_state->port_clock && crtc_state->lane_count && + crtc_state->dsc.compressed_bpp && i915->cdclk.hw.cdclk)) { + drm_err(&i915->drm, "Null Params rcvd for hblank early enabling\n"); + WARN_ON(1); + return; + } + + val = intel_de_read(i915, AUD_CONFIG_BE); + + if (INTEL_GEN(i915) == 11) + val |= HBLANK_EARLY_ENABLE_ICL(pipe); + else if (INTEL_GEN(i915) >= 12) + val |= HBLANK_EARLY_ENABLE_TGL(pipe); + + if (crtc_state->dsc.compression_enable && + (crtc_state->hw.adjusted_mode.hdisplay >= 3840 && + crtc_state->hw.adjusted_mode.vdisplay >= 2160)) { + /* Get hblank early enable value required */ + hblank_early_prog = get_hblank_early_enable_config(encoder, + crtc_state); + if (hblank_early_prog < 32) { + val &= ~HBLANK_START_COUNT_MASK(pipe); + val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32); + } else if (hblank_early_prog < 64) { + val &= ~HBLANK_START_COUNT_MASK(pipe); + val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64); + } else if (hblank_early_prog < 96) { + val &= ~HBLANK_START_COUNT_MASK(pipe); + val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96); + } else { + val &= ~HBLANK_START_COUNT_MASK(pipe); + val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128); + } + + /* Get samples room value required */ + samples_room = get_sample_room_req_config(crtc_state); + if (samples_room < 3) { + val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe); + val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room); + } else { + /* Program 0 i.e "All Samples available in buffer" */ + val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe); + val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0); + } + } + + intel_de_write(i915, AUD_CONFIG_BE, val); +} + +#undef ROUNDING_FACTOR + static void hsw_audio_codec_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) @@ -531,6 +673,10 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, mutex_lock(&dev_priv->av_mutex); + /* Enable Audio WA for 4k DSC usecases */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP)) + enable_audio_dsc_wa(encoder, crtc_state); + /* Enable audio presence detect, invalidate ELD */ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD); tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index edda3f29c8aa..a24e23e9b3d0 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9396,6 +9396,22 @@ enum { #define AUD_PIN_BUF_CTL _MMIO(0x48414) #define AUD_PIN_BUF_ENABLE REG_BIT(31) +/* Display Audio Config Reg */ +#define AUD_CONFIG_BE _MMIO(0x65ef0) +#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe))) +#define HBLANK_EARLY_ENABLE_TGL(pipe) (0x1 << (24 + (pipe))) +#define HBLANK_START_COUNT_MASK(pipe) (0x7 << (3 + ((pipe) * 6))) +#define HBLANK_START_COUNT(pipe, val) (((val) & 0x7) << (3 + ((pipe)) * 6)) +#define NUMBER_SAMPLES_PER_LINE_MASK(pipe) (0x3 << ((pipe) * 6)) +#define NUMBER_SAMPLES_PER_LINE(pipe, val) (((val) & 0x3) << ((pipe) * 6)) + +#define HBLANK_START_COUNT_8 0 +#define HBLANK_START_COUNT_16 1 +#define HBLANK_START_COUNT_32 2 +#define HBLANK_START_COUNT_64 3 +#define HBLANK_START_COUNT_96 4 +#define HBLANK_START_COUNT_128 5 + /* * HSW - ICL power wells * From 23122a4d992b5d5a660155e2c4640e5a93cc957d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 16 Apr 2020 12:41:17 +0100 Subject: [PATCH 010/121] drm/i915/gt: Scrub execlists state on resume Before we resume, we reset the HW so we restart from a known good state. However, as a part of the reset process, we drain our pending CS event queue -- and if we are resuming that does not correspond to internal state. On setup, we are scrubbing the CS pointers, but alas only on setup. Apply the sanitization not just to setup, but to all resumes. Reported-by: Venkata Ramana Nayana Signed-off-by: Chris Wilson Cc: Venkata Ramana Nayana Cc: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Mika Kuoppala Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200416114117.3460-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_types.h | 1 + drivers/gpu/drm/i915/gt/intel_gt_pm.c | 4 ++ drivers/gpu/drm/i915/gt/intel_lrc.c | 72 +++++++++++--------- 3 files changed, 43 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 01d4bd781a2f..bf395227c99f 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -419,6 +419,7 @@ struct intel_engine_cs { void (*irq_enable)(struct intel_engine_cs *engine); void (*irq_disable)(struct intel_engine_cs *engine); + void (*sanitize)(struct intel_engine_cs *engine); int (*resume)(struct intel_engine_cs *engine); struct { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 3e8a56c7d818..6bdb74892a1e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -147,6 +147,10 @@ static void gt_sanitize(struct intel_gt *gt, bool force) if (intel_gt_is_wedged(gt)) intel_gt_unset_wedged(gt); + for_each_engine(engine, gt, id) + if (engine->sanitize) + engine->sanitize(engine); + intel_uc_sanitize(>->uc); for_each_engine(engine, gt, id) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 6fbad5e2343f..34f67eb9bfa1 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3615,6 +3615,43 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) return ret; } +static void reset_csb_pointers(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + const unsigned int reset_value = execlists->csb_size - 1; + + ring_set_paused(engine, 0); + + /* + * After a reset, the HW starts writing into CSB entry [0]. We + * therefore have to set our HEAD pointer back one entry so that + * the *first* entry we check is entry 0. To complicate this further, + * as we don't wait for the first interrupt after reset, we have to + * fake the HW write to point back to the last entry so that our + * inline comparison of our cached head position against the last HW + * write works even before the first interrupt. + */ + execlists->csb_head = reset_value; + WRITE_ONCE(*execlists->csb_write, reset_value); + wmb(); /* Make sure this is visible to HW (paranoia?) */ + + /* + * Sometimes Icelake forgets to reset its pointers on a GPU reset. + * Bludgeon them with a mmio update to be sure. + */ + ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, + reset_value << 8 | reset_value); + ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); + + invalidate_csb_entries(&execlists->csb_status[0], + &execlists->csb_status[reset_value]); +} + +static void execlists_sanitize(struct intel_engine_cs *engine) +{ + reset_csb_pointers(engine); +} + static void enable_error_interrupt(struct intel_engine_cs *engine) { u32 status; @@ -3754,38 +3791,6 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine) intel_engine_stop_cs(engine); } -static void reset_csb_pointers(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - const unsigned int reset_value = execlists->csb_size - 1; - - ring_set_paused(engine, 0); - - /* - * After a reset, the HW starts writing into CSB entry [0]. We - * therefore have to set our HEAD pointer back one entry so that - * the *first* entry we check is entry 0. To complicate this further, - * as we don't wait for the first interrupt after reset, we have to - * fake the HW write to point back to the last entry so that our - * inline comparison of our cached head position against the last HW - * write works even before the first interrupt. - */ - execlists->csb_head = reset_value; - WRITE_ONCE(*execlists->csb_write, reset_value); - wmb(); /* Make sure this is visible to HW (paranoia?) */ - - /* - * Sometimes Icelake forgets to reset its pointers on a GPU reset. - * Bludgeon them with a mmio update to be sure. - */ - ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, - reset_value << 8 | reset_value); - ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); - - invalidate_csb_entries(&execlists->csb_status[0], - &execlists->csb_status[reset_value]); -} - static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine) { int x; @@ -4545,6 +4550,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) { /* Default vfuncs which can be overriden by each engine. */ + engine->sanitize = execlists_sanitize; engine->resume = execlists_resume; engine->cops = &execlists_context_ops; @@ -4659,8 +4665,6 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) else execlists->csb_size = GEN11_CSB_ENTRIES; - reset_csb_pointers(engine); - /* Finally, take ownership and responsibility for cleanup! */ engine->release = execlists_release; From 9d7e560f432766e5894bfbac41d6faa5f3daf1b7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 17 Apr 2020 10:39:27 +0100 Subject: [PATCH 011/121] drm/i915/selftests: Delay spinner before waiting for an interrupt It seems that although (perhaps because of the memory stall?) the spinner has signaled that it has started, it still takes some time to spin up to 100% utilisation of the HW. Since the test depends on the full utilisation of the HW to trigger the RPS interrupt, wait a little bit and flush the interrupt status to be sure that the event we see if from the spinner. Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200417093928.17822-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_rps.c | 28 +++++++++++++++----------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 26aadc2ae3be..199d608aa763 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -14,6 +14,20 @@ static void dummy_rps_work(struct work_struct *wrk) { } +static void sleep_for_ei(struct intel_rps *rps, int timeout_us) +{ + /* Flush any previous EI */ + usleep_range(timeout_us, 2 * timeout_us); + + /* Reset the interrupt status */ + rps_disable_interrupts(rps); + GEM_BUG_ON(rps->pm_iir); + rps_enable_interrupts(rps); + + /* And then wait for the timeout, for real this time */ + usleep_range(2 * timeout_us, 3 * timeout_us); +} + static int __rps_up_interrupt(struct intel_rps *rps, struct intel_engine_cs *engine, struct igt_spinner *spin) @@ -28,7 +42,6 @@ static int __rps_up_interrupt(struct intel_rps *rps, intel_gt_pm_wait_for_idle(engine->gt); GEM_BUG_ON(rps->active); - rps->pm_iir = 0; rps->cur_freq = rps->min_freq; rq = igt_spinner_create_request(spin, engine->kernel_context, MI_NOOP); @@ -71,7 +84,7 @@ static int __rps_up_interrupt(struct intel_rps *rps, timeout = intel_uncore_read(uncore, GEN6_RP_UP_EI); timeout = GT_PM_INTERVAL_TO_US(engine->i915, timeout); - usleep_range(2 * timeout, 3 * timeout); + sleep_for_ei(rps, timeout); GEM_BUG_ON(i915_request_completed(rq)); igt_spinner_end(spin); @@ -122,16 +135,7 @@ static int __rps_down_interrupt(struct intel_rps *rps, timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_EI); timeout = GT_PM_INTERVAL_TO_US(engine->i915, timeout); - /* Flush any previous EI */ - usleep_range(timeout, 2 * timeout); - - /* Reset the interrupt status */ - rps_disable_interrupts(rps); - GEM_BUG_ON(rps->pm_iir); - rps_enable_interrupts(rps); - - /* And then wait for the timeout, for real this time */ - usleep_range(2 * timeout, 3 * timeout); + sleep_for_ei(rps, timeout); if (rps->cur_freq != rps->max_freq) { pr_err("%s: Frequency unexpectedly changed [down], now %d!\n", From a50717dbf41715683057562d106060bb4e405cce Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 17 Apr 2020 10:39:28 +0100 Subject: [PATCH 012/121] drm/i915/selftests: Take the engine wakeref around __rps_up_interrupt Since we are touching the device to read the registers, we are required to ensure the device is awake at the time. Currently, we believe ourselves to be inside the active request [thus an active engine wakeref], but since that may be retired in the background, we can spontaneously lose the wakeref and the ability to probe the HW. <4> [379.686703] RPM wakelock ref not held during HW access <4> [379.686805] WARNING: CPU: 7 PID: 4869 at ./drivers/gpu/drm/i915/intel_runtime_pm.h:115 gen12_fwtable_read32+0x233/0x300 [i915] <4> [379.686808] Modules linked in: i915(+) vgem snd_hda_codec_hdmi mei_hdcp x86_pkg_temp_thermal coretemp crct10dif_pclmul crc32_pclmul ax88179_178a usbnet mii ghash_clmulni_intel snd_intel_dspcfg snd_hda_codec snd_hwdep snd_hda_core snd_pcm e1000e mei_me ptp mei pps_core intel_lpss_pci prime_numbers [last unloaded: i915] <4> [379.686827] CPU: 7 PID: 4869 Comm: i915_selftest Tainted: G U 5.7.0-rc1-CI-CI_DRM_8313+ #1 <4> [379.686830] Hardware name: Intel Corporation Tiger Lake Client Platform/TigerLake U DDR4 SODIMM RVP, BIOS TGLSFWI1.R00.2457.A13.1912190237 12/19/2019 <4> [379.686883] RIP: 0010:gen12_fwtable_read32+0x233/0x300 [i915] <4> [379.686887] Code: d8 ea e0 0f 0b e9 19 fe ff ff 80 3d ad 12 2d 00 00 0f 85 17 fe ff ff 48 c7 c7 b0 32 3e a0 c6 05 99 12 2d 00 01 e8 2d d8 ea e0 <0f> 0b e9 fd fd ff ff 8b 05 c4 75 56 e2 85 c0 0f 85 84 00 00 00 48 <4> [379.686889] RSP: 0018:ffffc90000727970 EFLAGS: 00010286 <4> [379.686892] RAX: 0000000000000000 RBX: ffff88848cc20ee8 RCX: 0000000000000001 <4> [379.686894] RDX: 0000000080000001 RSI: ffff88843b1f0900 RDI: 00000000ffffffff <4> [379.686896] RBP: 0000000000000000 R08: ffff88843b1f0900 R09: 0000000000000000 <4> [379.686898] R10: 0000000000000000 R11: 0000000000000000 R12: 000000000000a058 <4> [379.686900] R13: 0000000000000001 R14: ffff88848cc2bf30 R15: 00000000ffffffea <4> [379.686902] FS: 00007f7d63f5e300(0000) GS:ffff8884a0180000(0000) knlGS:0000000000000000 <4> [379.686904] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [379.686907] CR2: 000055e5c30f4988 CR3: 000000042e190002 CR4: 0000000000760ee0 <4> [379.686910] PKRU: 55555554 <4> [379.686911] Call Trace: <4> [379.686986] live_rps_interrupt+0xb14/0xc10 [i915] <4> [379.687051] ? intel_rps_unpark+0xb0/0xb0 [i915] <4> [379.687057] ? __trace_bprintk+0x57/0x80 <4> [379.687143] __i915_subtests+0xb8/0x210 [i915] <4> [379.687222] ? __i915_live_teardown+0x50/0x50 [i915] <4> [379.687291] ? __intel_gt_live_setup+0x30/0x30 [i915] <4> [379.687361] __run_selftests+0x112/0x170 [i915] <4> [379.687431] i915_live_selftests+0x2c/0x60 [i915] <4> [379.687491] i915_pci_probe+0x93/0x1b0 [i915] Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200417093928.17822-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_rps.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 199d608aa763..e60600d14937 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -39,10 +39,10 @@ static int __rps_up_interrupt(struct intel_rps *rps, if (!intel_engine_can_store_dword(engine)) return 0; - intel_gt_pm_wait_for_idle(engine->gt); - GEM_BUG_ON(rps->active); - - rps->cur_freq = rps->min_freq; + mutex_lock(&rps->lock); + GEM_BUG_ON(!rps->active); + intel_rps_set(rps, rps->min_freq); + mutex_unlock(&rps->lock); rq = igt_spinner_create_request(spin, engine->kernel_context, MI_NOOP); if (IS_ERR(rq)) @@ -105,7 +105,6 @@ static int __rps_up_interrupt(struct intel_rps *rps, return -EINVAL; } - intel_gt_pm_wait_for_idle(engine->gt); return 0; } @@ -195,9 +194,16 @@ int live_rps_interrupt(void *arg) for_each_engine(engine, gt, id) { /* Keep the engine busy with a spinner; expect an UP! */ if (pm_events & GEN6_PM_RP_UP_THRESHOLD) { + intel_gt_pm_wait_for_idle(engine->gt); + GEM_BUG_ON(rps->active); + + intel_engine_pm_get(engine); err = __rps_up_interrupt(rps, engine, &spin); + intel_engine_pm_put(engine); if (err) goto out; + + intel_gt_pm_wait_for_idle(engine->gt); } /* Keep the engine awake but idle and check for DOWN */ From 442e7ee834e8528ec85ea0df3c76bfe6c6742f5a Mon Sep 17 00:00:00 2001 From: Stanislav Lisovskiy Date: Wed, 15 Apr 2020 17:39:02 +0300 Subject: [PATCH 013/121] drm/i915: Add intel_atomic_get_bw_*_state helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add correspondent helpers to be able to get old/new bandwidth global state object. v2: - Fixed typo in function call v3: - Changed new functions naming to use convention proposed by Jani Nikula, i.e intel_bw_* in intel_bw.c file. v4: - Change function naming back to intel_atomic* pattern, was decided to rename in a separate patch series. v5: - Fix function naming to match existing practices(Ville) v6: - Removed spurious whitespace v7: - Removed bw_state NULL checks(Ville) Reviewed-by: Ville Syrjälä Signed-off-by: Stanislav Lisovskiy Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200415143911.10244-3-stanislav.lisovskiy@intel.com --- drivers/gpu/drm/i915/display/intel_bw.c | 24 +++++++++++++++++++++++- drivers/gpu/drm/i915/display/intel_bw.h | 9 +++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 88f367eb28ea..4aa54fcb0629 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -375,7 +375,29 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv, return data_rate; } -static struct intel_bw_state * +struct intel_bw_state * +intel_atomic_get_old_bw_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_global_state *bw_state; + + bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj); + + return to_intel_bw_state(bw_state); +} + +struct intel_bw_state * +intel_atomic_get_new_bw_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_global_state *bw_state; + + bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj); + + return to_intel_bw_state(bw_state); +} + +struct intel_bw_state * intel_atomic_get_bw_state(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index a8aa7624c5aa..ac004d6f4276 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -24,6 +24,15 @@ struct intel_bw_state { #define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base) +struct intel_bw_state * +intel_atomic_get_old_bw_state(struct intel_atomic_state *state); + +struct intel_bw_state * +intel_atomic_get_new_bw_state(struct intel_atomic_state *state); + +struct intel_bw_state * +intel_atomic_get_bw_state(struct intel_atomic_state *state); + void intel_bw_init_hw(struct drm_i915_private *dev_priv); int intel_bw_init(struct drm_i915_private *dev_priv); int intel_bw_atomic_check(struct intel_atomic_state *state); From a389c49fac556cba82edee7a5724269ec2d28981 Mon Sep 17 00:00:00 2001 From: Stanislav Lisovskiy Date: Wed, 15 Apr 2020 17:57:40 +0300 Subject: [PATCH 014/121] drm/i915: Prepare to extract gen specific functions from intel_can_enable_sagv MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Addressing one of the comments, recommending to extract platform specific code from intel_can_enable_sagv as a preparation, before we are going to add support for tgl+. v2: - Removed whitespace v3: - Removed premature debug and new cycle introduction(Ville) - Added missing no active pipes check(Ville) v4: - Fixed stupid mistake with plane_state caused by stupid macro change Signed-off-by: Stanislav Lisovskiy Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200415145740.28241-1-stanislav.lisovskiy@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 61 +++++++++++++++++++-------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b632b6bb9c3e..1e7f9be3cb71 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3757,42 +3757,22 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) return 0; } -bool intel_can_enable_sagv(struct intel_atomic_state *state) +static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = state->base.dev; + struct drm_device *dev = crtc_state->uapi.crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_plane *plane; - struct intel_crtc_state *crtc_state; - enum pipe pipe; int level, latency; - if (!intel_has_sagv(dev_priv)) - return false; - - /* - * If there are no active CRTCs, no additional checks need be performed - */ - if (hweight8(state->active_pipes) == 0) + if (!crtc_state->hw.active) return true; - /* - * SKL+ workaround: bspec recommends we disable SAGV when we have - * more then one pipe enabled - */ - if (hweight8(state->active_pipes) > 1) - return false; - - /* Since we're now guaranteed to only have one active CRTC... */ - pipe = ffs(state->active_pipes) - 1; - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - crtc_state = to_intel_crtc_state(crtc->base.state); - if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) return false; for_each_intel_plane_on_crtc(dev, crtc, plane) { - struct skl_plane_wm *wm = + const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane->id]; /* Skip this plane if it's not enabled */ @@ -3823,6 +3803,37 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) return true; } +bool intel_can_enable_sagv(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + const struct intel_crtc_state *crtc_state; + enum pipe pipe; + + if (!intel_has_sagv(dev_priv)) + return false; + + /* + * If there are no active CRTCs, no additional checks need be performed + */ + if (hweight8(state->active_pipes) == 0) + return true; + + /* + * SKL+ workaround: bspec recommends we disable SAGV when we have + * more then one pipe enabled + */ + if (hweight8(state->active_pipes) > 1) + return false; + + /* Since we're now guaranteed to only have one active CRTC... */ + pipe = ffs(state->active_pipes) - 1; + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc_state = to_intel_crtc_state(crtc->base.state); + + return intel_crtc_can_enable_sagv(crtc_state); +} + /* * Calculate initial DBuf slice offset, based on slice size * and mask(i.e if slice size is 1024 and second slice is enabled From 680e1af713d92940e39a313a8592b13a6885a14c Mon Sep 17 00:00:00 2001 From: Stanislav Lisovskiy Date: Wed, 15 Apr 2020 17:39:04 +0300 Subject: [PATCH 015/121] drm/i915: Add pre/post plane updates for SAGV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Lets have a unified way to handle SAGV changes, espoecially considering the upcoming Gen12 changes. Current "standard" way of doing this in commit_tail is pre/post plane updates, when everything which has to be forbidden and not supported in new config has to be restricted before update and relaxed after plane update. v2: - Removed unneeded returns(Ville) Reviewed-by: Ville Syrjälä Signed-off-by: Stanislav Lisovskiy Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200415143911.10244-5-stanislav.lisovskiy@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 13 ++++--------- drivers/gpu/drm/i915/intel_pm.c | 16 ++++++++++++++++ drivers/gpu/drm/i915/intel_pm.h | 2 ++ 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index af5b4055b38a..5d44f828b150 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -15354,12 +15354,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_set_cdclk_pre_plane_update(state); - /* - * SKL workaround: bspec recommends we disable the SAGV when we - * have more then one pipe enabled - */ - if (!intel_can_enable_sagv(state)) - intel_disable_sagv(dev_priv); + intel_sagv_pre_plane_update(state); intel_modeset_verify_disabled(dev_priv, state); } @@ -15456,11 +15451,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_check_cpu_fifo_underruns(dev_priv); intel_check_pch_fifo_underruns(dev_priv); - if (state->modeset) + if (state->modeset) { intel_verify_planes(state); - if (state->modeset && intel_can_enable_sagv(state)) - intel_enable_sagv(dev_priv); + intel_sagv_post_plane_update(state); + } drm_atomic_helper_commit_hw_done(&state->base); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 1e7f9be3cb71..d54833a1578f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3757,6 +3757,22 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) return 0; } +void intel_sagv_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + + if (!intel_can_enable_sagv(state)) + intel_disable_sagv(dev_priv); +} + +void intel_sagv_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + + if (intel_can_enable_sagv(state)) + intel_enable_sagv(dev_priv); +} + static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc_state->uapi.crtc->dev; diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index d60a85421c5a..9a6036ab0f90 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -44,6 +44,8 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv); bool intel_can_enable_sagv(struct intel_atomic_state *state); int intel_enable_sagv(struct drm_i915_private *dev_priv); int intel_disable_sagv(struct drm_i915_private *dev_priv); +void intel_sagv_pre_plane_update(struct intel_atomic_state *state); +void intel_sagv_post_plane_update(struct intel_atomic_state *state); bool skl_wm_level_equals(const struct skl_wm_level *l1, const struct skl_wm_level *l2); bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, From d4e3d455a12e9d7d151f35f86b688bdff5dcd076 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 17 Apr 2020 16:20:17 +0100 Subject: [PATCH 016/121] drm/i915/selftests: Move gpu energy measurement into its own little lib Move the handy utility to measure the GPU energy consumption using RAPL msr into a common lib so that it can be reused easily. Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200417152018.13079-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 3 ++- drivers/gpu/drm/i915/gt/selftest_rc6.c | 25 +++++------------------- drivers/gpu/drm/i915/selftests/librapl.c | 24 +++++++++++++++++++++++ drivers/gpu/drm/i915/selftests/librapl.h | 13 ++++++++++++ 4 files changed, 44 insertions(+), 21 deletions(-) create mode 100644 drivers/gpu/drm/i915/selftests/librapl.c create mode 100644 drivers/gpu/drm/i915/selftests/librapl.h diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 44c506b7e117..6f112d8f80ca 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -257,7 +257,8 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ selftests/igt_live_test.o \ selftests/igt_mmap.o \ selftests/igt_reset.o \ - selftests/igt_spinner.o + selftests/igt_spinner.o \ + selftests/librapl.o # virtual gpu code i915-y += i915_vgpu.o diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c index 08c3dbd41b12..2dc460624bbc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rc6.c +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c @@ -11,22 +11,7 @@ #include "selftest_rc6.h" #include "selftests/i915_random.h" - -static u64 energy_uJ(struct intel_rc6 *rc6) -{ - unsigned long long power; - u32 units; - - if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) - return 0; - - units = (power & 0x1f00) >> 8; - - if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power)) - return 0; - - return (1000000 * power) >> units; /* convert to uJ */ -} +#include "selftests/librapl.h" static u64 rc6_residency(struct intel_rc6 *rc6) { @@ -74,9 +59,9 @@ int live_rc6_manual(void *arg) res[0] = rc6_residency(rc6); dt = ktime_get(); - rc0_power = energy_uJ(rc6); + rc0_power = librapl_energy_uJ(); msleep(250); - rc0_power = energy_uJ(rc6) - rc0_power; + rc0_power = librapl_energy_uJ() - rc0_power; dt = ktime_sub(ktime_get(), dt); res[1] = rc6_residency(rc6); if ((res[1] - res[0]) >> 10) { @@ -99,9 +84,9 @@ int live_rc6_manual(void *arg) res[0] = rc6_residency(rc6); intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL); dt = ktime_get(); - rc6_power = energy_uJ(rc6); + rc6_power = librapl_energy_uJ(); msleep(100); - rc6_power = energy_uJ(rc6) - rc6_power; + rc6_power = librapl_energy_uJ() - rc6_power; dt = ktime_sub(ktime_get(), dt); res[1] = rc6_residency(rc6); if (res[1] == res[0]) { diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c new file mode 100644 index 000000000000..58710ac3f979 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/librapl.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include + +#include "librapl.h" + +u64 librapl_energy_uJ(void) +{ + unsigned long long power; + u32 units; + + if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) + return 0; + + units = (power & 0x1f00) >> 8; + + if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power)) + return 0; + + return (1000000 * power) >> units; /* convert to uJ */ +} diff --git a/drivers/gpu/drm/i915/selftests/librapl.h b/drivers/gpu/drm/i915/selftests/librapl.h new file mode 100644 index 000000000000..887f3e91dd05 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/librapl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef SELFTEST_LIBRAPL_H +#define SELFTEST_LIBRAPL_H + +#include + +u64 librapl_energy_uJ(void); + +#endif /* SELFTEST_LIBRAPL_H */ From c43dd6b4144aa8f27ff9a1c4872f6ab49a4ba3ec Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 17 Apr 2020 16:20:18 +0100 Subject: [PATCH 017/121] drm/i915/selftests: Check power consumption at min/max frequencies A basic premise of RPS is that at lower frequencies, not only do we run slower, but we save power compared to higher frequencies. For example, when idle, we set the minimum frequency just in case there is some residual current. Since the power curve should be a physical relationship, if we find no power saving it's likely that we've broken our frequency handling, so test! Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Andi Shyti Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200417152018.13079-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_gt_pm.c | 1 + drivers/gpu/drm/i915/gt/selftest_rps.c | 138 +++++++++++++++++++++++ drivers/gpu/drm/i915/gt/selftest_rps.h | 1 + 3 files changed, 140 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index c50bb502fe03..0141c334f2ac 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -54,6 +54,7 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915) static const struct i915_subtest tests[] = { SUBTEST(live_rc6_manual), SUBTEST(live_rps_interrupt), + SUBTEST(live_rps_power), SUBTEST(live_gt_resume), }; diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index e60600d14937..360f56aa4b82 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -3,12 +3,15 @@ * Copyright © 2020 Intel Corporation */ +#include + #include "intel_engine_pm.h" #include "intel_gt_pm.h" #include "intel_rc6.h" #include "selftest_rps.h" #include "selftests/igt_flush_test.h" #include "selftests/igt_spinner.h" +#include "selftests/librapl.h" static void dummy_rps_work(struct work_struct *wrk) { @@ -231,3 +234,138 @@ out: return err; } + +static u64 __measure_power(int duration_ms) +{ + u64 dE, dt; + + dt = ktime_get(); + dE = librapl_energy_uJ(); + usleep_range(1000 * duration_ms, 2000 * duration_ms); + dE = librapl_energy_uJ() - dE; + dt = ktime_get() - dt; + + return div64_u64(1000 * 1000 * dE, dt); +} + +static int cmp_u64(const void *A, const void *B) +{ + const u64 *a = A, *b = B; + + if (a < b) + return -1; + else if (a > b) + return 1; + else + return 0; +} + +static u64 measure_power_at(struct intel_rps *rps, int freq) +{ + u64 x[5]; + int i; + + mutex_lock(&rps->lock); + GEM_BUG_ON(!rps->active); + intel_rps_set(rps, freq); + mutex_unlock(&rps->lock); + + msleep(20); /* more than enough time to stabilise! */ + + i = read_cagf(rps); + if (i != freq) + pr_notice("Running at %x [%uMHz], not target %x [%uMHz]\n", + i, intel_gpu_freq(rps, i), + freq, intel_gpu_freq(rps, freq)); + + for (i = 0; i < 5; i++) + x[i] = __measure_power(5); + + /* A simple triangle filter for better result stability */ + sort(x, 5, sizeof(*x), cmp_u64, NULL); + return div_u64(x[1] + 2 * x[2] + x[3], 4); +} + +int live_rps_power(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_rps *rps = >->rps; + void (*saved_work)(struct work_struct *wrk); + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_spinner spin; + int err = 0; + + /* + * Our fundamental assumption is that running at lower frequency + * actually saves power. Let's see if our RAPL measurement support + * that theory. + */ + + if (!rps->enabled || rps->max_freq <= rps->min_freq) + return 0; + + if (!librapl_energy_uJ()) + return 0; + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + intel_gt_pm_wait_for_idle(gt); + saved_work = rps->work.func; + rps->work.func = dummy_rps_work; + + for_each_engine(engine, gt, id) { + struct i915_request *rq; + u64 min, max; + + if (!intel_engine_can_store_dword(engine)) + continue; + + rq = igt_spinner_create_request(&spin, + engine->kernel_context, + MI_NOOP); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + + if (!igt_wait_for_spinner(&spin, rq)) { + pr_err("%s: RPS spinner did not start\n", + engine->name); + intel_gt_set_wedged(engine->gt); + err = -EIO; + break; + } + + max = measure_power_at(rps, rps->max_freq); + min = measure_power_at(rps, rps->min_freq); + + igt_spinner_end(&spin); + + pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n", + engine->name, + min, intel_gpu_freq(rps, rps->min_freq), + max, intel_gpu_freq(rps, rps->max_freq)); + if (11 * min > 10 * max) { + pr_err("%s: did not conserve power when setting lower frequency!\n", + engine->name); + err = -EINVAL; + break; + } + + if (igt_flush_test(gt->i915)) { + err = -EIO; + break; + } + } + + igt_spinner_fini(&spin); + + intel_gt_pm_wait_for_idle(gt); + rps->work.func = saved_work; + + return err; +} diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.h b/drivers/gpu/drm/i915/gt/selftest_rps.h index abba66420996..cad515a7f0e5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.h +++ b/drivers/gpu/drm/i915/gt/selftest_rps.h @@ -7,5 +7,6 @@ #define SELFTEST_RPS_H int live_rps_interrupt(void *arg); +int live_rps_power(void *arg); #endif /* SELFTEST_RPS_H */ From c4310defd88e1e32859d4371d07fa3e1d98c1436 Mon Sep 17 00:00:00 2001 From: Radhakrishna Sripada Date: Thu, 16 Apr 2020 09:46:10 -0700 Subject: [PATCH 018/121] drm/i915/icl: Update forcewake firmware ranges Some workarounds are not sticking across suspend resume cycles. The forcewake ranges table has been updated and would reflect the hardware appropriately. Closes: https://gitlab.freedesktop.org/drm/intel/issues/1222 v2: Simplify the table and use 0 for some unused ranges(Matt) Cc: Matt Roper Signed-off-by: Radhakrishna Sripada Reviewed-by: Matt Atwood Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200416164610.15422-1-radhakrishna.sripada@intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 31 ++++++++++++++++------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index fa86b7ab2d99..078f5b2eb8a4 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1092,8 +1092,7 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = { /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ static const struct intel_forcewake_range __gen11_fw_ranges[] = { - GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ + GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), @@ -1103,27 +1102,31 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = { GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8800, 0x8bff, 0), GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), - GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x9560, 0x95ff, 0), + GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER), GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x24000, 0x2407f, 0), + GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0x40000, 0x1bffff, 0), GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), - GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), - GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), - GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0), + GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0), GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), - GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), - GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) + GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0) }; /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ From c0ff9e5e69f33ce5051f33cc4cb35afa754de01c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Thu, 16 Apr 2020 11:58:41 -0700 Subject: [PATCH 019/121] drm/i915: Add missing deinitialization cases of load failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The intel_display_power_put_async() used in TC cold sequences made easy to hit the missing deinitialization of driver in case of load failure as seen in the stack trace bellow. intel_modeset_driver_remove_noirq() had to be removed from i915_driver_modeset_remove_noirq() as those are different initialialition steps with IRQ and GEM initialization in between then. [drm:__intel_engine_init_ctx_wa [i915]] Initialized 3 context workarounds on rcs'0 [drm:__i915_inject_probe_error [i915]] Injecting failure -19 at checkpoint 36 [__uc_init:294] [drm:i915_hdcp_component_unbind [i915]] I915 HDCP comp unbind [drm:edp_panel_vdd_off_sync [i915]] Turning [ENCODER:275:DDI A] VDD off [drm:edp_panel_vdd_off_sync [i915]] PP_STATUS: 0x00000000 PP_CONTROL: 0x00000060 [drm:intel_power_well_disable [i915]] disabling AUX A general protection fault, probably for non-canonical address 0x6b6b6b6b6b6b6b6b: 0000 [#1] PREEMPT SMP NOPTI CPU: 3 PID: 1142 Comm: kworker/u16:20 Tainted: G U 5.6.0-CI-Patchwork_17226+ #1 Hardware name: Intel Corporation Tiger Lake Client Platform/TigerLake U DDR4 SODIMM RVP, BIOS TGLSFWI1.R00.2457.A16.1912270059 12/27/2019 Workqueue: events_unbound intel_display_power_put_async_work [i915] RIP: 0010:__intel_display_power_put_domain+0xa5/0x180 [i915] Code: 48 85 c0 78 54 44 89 e1 41 bd 01 00 00 00 49 c7 c4 80 44 41 a0 49 d3 e5 eb 0d 48 83 eb 10 48 3b 9d 08 ad 00 00 78 32 48 8b 03 <4c> 85 68 10 74 ea 8b 53 08 85 d2 74 2d 83 ea 01 85 d2 89 53 08 75 RSP: 0018:ffffc9000061fdb0 EFLAGS: 00010206 RAX: 6b6b6b6b6b6b6b6b RBX: ffff8884948f5df0 RCX: 000000000000003d RDX: 0000000080000001 RSI: 0000000000000000 RDI: 0000000000000000 RBP: ffff888479be0000 R08: ffff88849a180920 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffffffffa0414480 R13: 2000000000000000 R14: ffff888479beb320 R15: 2000000000000000 FS: 0000000000000000(0000) GS:ffff88849ff80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00005634fa8ed670 CR3: 0000000005610004 CR4: 0000000000760ee0 PKRU: 55555554 Call Trace: release_async_put_domains+0x9b/0x110 [i915] intel_display_power_put_async_work+0x91/0xf0 [i915] process_one_work+0x260/0x600 ? worker_thread+0xc9/0x380 worker_thread+0x37/0x380 ? process_one_work+0x600/0x600 kthread+0x119/0x130 ? kthread_park+0x80/0x80 ret_from_fork+0x24/0x50 Modules linked in: i915(+) vgem snd_hda_codec_hdmi mei_hdcp x86_pkg_temp_thermal coretemp crct10dif_pclmul crc32_pclmul cdc_ether usbnet mii snd_intel_dspcfg ghash_clmulni_intel snd_hda_codec snd_hwdep snd_hda_core e1000e ptp mei_me snd_pcm pps_core mei intel_lpss_pci prime_numbers [last unloaded: i915] ---[ end trace b402d1b4060f8b97 ]--- BUG: sleeping function called from invalid context at kernel/sched/completion.c:99 in_atomic(): 0, irqs_disabled(): 0, non_block: 0, pid: 1142, name: kworker/u16:20 INFO: lockdep is turned off. Preemption disabled at: [<0000000000000000>] 0x0 CPU: 3 PID: 1142 Comm: kworker/u16:20 Tainted: G UD 5.6.0-CI-Patchwork_17226+ #1 Hardware name: Intel Corporation Tiger Lake Client Platform/TigerLake U DDR4 SODIMM RVP, BIOS TGLSFWI1.R00.2457.A16.1912270059 12/27/2019 Workqueue: events_unbound intel_display_power_put_async_work [i915] Call Trace: dump_stack+0x71/0x9b ___might_sleep+0x178/0x260 wait_for_completion+0x37/0x1a0 virt_efi_query_variable_info+0x161/0x1b0 efi_query_variable_store+0xb3/0x1a0 ? efivar_entry_set_safe+0x19c/0x220 efivar_entry_set_safe+0x19c/0x220 ? efi_pstore_write+0x10b/0x150 ? efi_pstore_write+0xa0/0x150 efi_pstore_write+0x10b/0x150 pstore_dump+0x123/0x340 kmsg_dump+0x87/0x1b0 oops_end+0x3e/0x90 do_general_protection+0x1c3/0x2f0 general_protection+0x2d/0x40 RIP: 0010:__intel_display_power_put_domain+0xa5/0x180 [i915] Code: 48 85 c0 78 54 44 89 e1 41 bd 01 00 00 00 49 c7 c4 80 44 41 a0 49 d3 e5 eb 0d 48 83 eb 10 48 3b 9d 08 ad 00 00 78 32 48 8b 03 <4c> 85 68 10 74 ea 8b 53 08 85 d2 74 2d 83 ea 01 85 d2 89 53 08 75 RSP: 0018:ffffc9000061fdb0 EFLAGS: 00010206 RAX: 6b6b6b6b6b6b6b6b RBX: ffff8884948f5df0 RCX: 000000000000003d RDX: 0000000080000001 RSI: 0000000000000000 RDI: 0000000000000000 RBP: ffff888479be0000 R08: ffff88849a180920 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffffffffa0414480 R13: 2000000000000000 R14: ffff888479beb320 R15: 2000000000000000 release_async_put_domains+0x9b/0x110 [i915] intel_display_power_put_async_work+0x91/0xf0 [i915] process_one_work+0x260/0x600 ? worker_thread+0xc9/0x380 worker_thread+0x37/0x380 ? process_one_work+0x600/0x600 kthread+0x119/0x130 ? kthread_park+0x80/0x80 ret_from_fork+0x24/0x50 ------------[ cut here ]------------ WARNING: CPU: 3 PID: 1142 at kernel/rcu/tree_plugin.h:293 rcu_note_context_switch+0x87/0x650 Modules linked in: i915(+) vgem snd_hda_codec_hdmi mei_hdcp x86_pkg_temp_thermal coretemp crct10dif_pclmul crc32_pclmul cdc_ether usbnet mii snd_intel_dspcfg ghash_clmulni_intel snd_hda_codec snd_hwdep snd_hda_core e1000e ptp mei_me snd_pcm pps_core mei intel_lpss_pci prime_numbers [last unloaded: i915] v2: - fixed handling in case of failure in drm_vblank_init() - moved i915_gem_driver_remove() call to before i915_driver_modeset_remove_noirq() this match initialization order too v3: - reverting call swap between i915_reset_error_state() and i915_gem_driver_remove() call order - improved label naming in i915_driver_modeset_probe_noirq() Closes: https://gitlab.freedesktop.org/drm/intel/issues/1647 Cc: Imre Deak Cc: Ville Syrjälä Cc: Jani Nikula Reviewed-by: Imre Deak Reviewed-by: Chris Wilson Signed-off-by: José Roberto de Souza Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200416185841.125686-1-jose.souza@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9ab4ad7ccac9..b5c4cad93a46 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -227,14 +227,14 @@ static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915) ret = drm_vblank_init(&i915->drm, INTEL_NUM_PIPES(i915)); if (ret) - goto out; + return ret; } intel_bios_init(i915); ret = intel_vga_register(i915); if (ret) - goto out; + goto cleanup_bios; intel_power_domains_init_hw(i915, false); @@ -242,13 +242,16 @@ static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915) ret = intel_modeset_init_noirq(i915); if (ret) - goto cleanup_vga_client; + goto cleanup_vga_client_pw_domain_csr; return 0; -cleanup_vga_client: +cleanup_vga_client_pw_domain_csr: + intel_csr_ucode_fini(i915); + intel_power_domains_driver_remove(i915); intel_vga_unregister(i915); -out: +cleanup_bios: + intel_bios_driver_remove(i915); return ret; } @@ -307,13 +310,13 @@ static void i915_driver_modeset_remove(struct drm_i915_private *i915) /* part #2: call after irq uninstall */ static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915) { - intel_modeset_driver_remove_noirq(i915); + intel_csr_ucode_fini(i915); - intel_bios_driver_remove(i915); + intel_power_domains_driver_remove(i915); intel_vga_unregister(i915); - intel_csr_ucode_fini(i915); + intel_bios_driver_remove(i915); } static void intel_init_dpio(struct drm_i915_private *dev_priv) @@ -998,7 +1001,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) out_cleanup_irq: intel_irq_uninstall(i915); out_cleanup_modeset: - /* FIXME */ + i915_driver_modeset_remove_noirq(i915); out_cleanup_hw: i915_driver_hw_remove(i915); intel_memory_regions_driver_release(i915); @@ -1035,12 +1038,12 @@ void i915_driver_remove(struct drm_i915_private *i915) intel_irq_uninstall(i915); - i915_driver_modeset_remove_noirq(i915); + intel_modeset_driver_remove_noirq(i915); i915_reset_error_state(i915); i915_gem_driver_remove(i915); - intel_power_domains_driver_remove(i915); + i915_driver_modeset_remove_noirq(i915); i915_driver_hw_remove(i915); From 34a3f0b273a7196b98668324556bce9f6dc091d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Tue, 14 Apr 2020 12:49:49 -0700 Subject: [PATCH 020/121] drm/i915/display: Move out code to return the digital_port of the aux ch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Moving the code to return the digital port of the aux channel also removing the intel_phy_is_tc() to make it generic. digital_port will be needed in icl_tc_phy_aux_power_well_enable() so adding it as a parameter to icl_tc_port_assert_ref_held(). While at at removing the duplicated call to icl_tc_phy_aux_ch() in icl_tc_port_assert_ref_held(). v2: - fixed build when DRM_I915_DEBUG_RUNTIME_PM is not set - moved to before hsw_wait_for_power_well_enable() as it will be needed by hsw_wait_for_power_well_enable() in a future patch v4: - fixed action of if (!dig_port), continue instead of return Cc: You-Sheng Yang Reviewed-by: Imre Deak Tested-by: You-Sheng Yang Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200414194956.164323-1-jose.souza@intel.com --- .../drm/i915/display/intel_display_power.c | 69 ++++++++++--------- 1 file changed, 37 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 1d01c79fb9db..0cf5ed0254cf 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -282,6 +282,33 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); } +static struct intel_digital_port * +aux_ch_to_digital_port(struct drm_i915_private *dev_priv, + enum aux_ch aux_ch) +{ + struct intel_digital_port *dig_port = NULL; + struct intel_encoder *encoder; + + for_each_intel_encoder(&dev_priv->drm, encoder) { + /* We'll check the MST primary port */ + if (encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + dig_port = enc_to_dig_port(encoder); + if (!dig_port) + continue; + + if (dig_port->aux_ch != aux_ch) { + dig_port = NULL; + continue; + } + + break; + } + + return dig_port; +} + static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { @@ -501,41 +528,14 @@ static int power_well_async_ref_count(struct drm_i915_private *dev_priv, } static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) + struct i915_power_well *power_well, + struct intel_digital_port *dig_port) { - enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); - struct intel_digital_port *dig_port = NULL; - struct intel_encoder *encoder; - /* Bypass the check if all references are released asynchronously */ if (power_well_async_ref_count(dev_priv, power_well) == power_well->count) return; - aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); - - for_each_intel_encoder(&dev_priv->drm, encoder) { - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - - if (!intel_phy_is_tc(dev_priv, phy)) - continue; - - /* We'll check the MST primary port */ - if (encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - dig_port = enc_to_dig_port(encoder); - if (drm_WARN_ON(&dev_priv->drm, !dig_port)) - continue; - - if (dig_port->aux_ch != aux_ch) { - dig_port = NULL; - continue; - } - - break; - } - if (drm_WARN_ON(&dev_priv->drm, !dig_port)) return; @@ -545,7 +545,8 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, #else static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) + struct i915_power_well *power_well, + struct intel_digital_port *dig_port) { } @@ -558,9 +559,10 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); + struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); u32 val; - icl_tc_port_assert_ref_held(dev_priv, power_well); + icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); val &= ~DP_AUX_CH_CTL_TBT_IO; @@ -588,7 +590,10 @@ static void icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - icl_tc_port_assert_ref_held(dev_priv, power_well); + enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); + struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); + + icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); hsw_power_well_disable(dev_priv, power_well); } From dba6b0b4eab362a886db616ee38c1d624914d98b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Tue, 14 Apr 2020 12:49:50 -0700 Subject: [PATCH 021/121] drm/i915/display: Add intel_legacy_aux_to_power_domain() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a similar function to intel_aux_power_domain() but it do not care about TBT ports, this will be needed by ICL TC sequences. v2: - renamed to intel_legacy_aux_to_power_domain() Cc: Imre Deak Cc: Cooper Chiou Cc: Kai-Heng Feng Reviewed-by: Imre Deak Tested-by: You-Sheng Yang Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200414194956.164323-2-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 14 ++++++++++++-- drivers/gpu/drm/i915/display/intel_display.h | 2 ++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 5d44f828b150..bae1d89875d6 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -7296,7 +7296,17 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) } } - switch (dig_port->aux_ch) { + return intel_legacy_aux_to_power_domain(dig_port->aux_ch); +} + +/* + * Converts aux_ch to power_domain without caring about TBT ports for that use + * intel_aux_power_domain() + */ +enum intel_display_power_domain +intel_legacy_aux_to_power_domain(enum aux_ch aux_ch) +{ + switch (aux_ch) { case AUX_CH_A: return POWER_DOMAIN_AUX_A; case AUX_CH_B: @@ -7312,7 +7322,7 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) case AUX_CH_G: return POWER_DOMAIN_AUX_G; default: - MISSING_CASE(dig_port->aux_ch); + MISSING_CASE(aux_ch); return POWER_DOMAIN_AUX_A; } } diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index cc7f287804d7..8d872ed0de36 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -583,6 +583,8 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state); enum intel_display_power_domain intel_port_to_power_domain(enum port port); enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port); +enum intel_display_power_domain +intel_legacy_aux_to_power_domain(enum aux_ch aux_ch); void intel_mode_from_pipe_config(struct drm_display_mode *mode, struct intel_crtc_state *pipe_config); void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, From f8bb28e63a1e9f46fe15f63e924fab643ff8abe0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Tue, 14 Apr 2020 12:49:51 -0700 Subject: [PATCH 022/121] drm/i915/display: Split hsw_power_well_enable() into two MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a preparation for ICL TC cold exit sequences. v2: - renamed new functions to hsw_power_well_enable_prepare()/complete() Signed-off-by: José Roberto de Souza Reviewed-by: Imre Deak Tested-by: You-Sheng Yang Link: https://patchwork.freedesktop.org/patch/msgid/20200414194956.164323-3-jose.souza@intel.com --- .../drm/i915/display/intel_display_power.c | 39 +++++++++++++++---- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 0cf5ed0254cf..606a1659021b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -380,16 +380,16 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, SKL_FUSE_PG_DIST_STATUS(pg), 1)); } -static void hsw_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) +static void hsw_power_well_enable_prepare(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; int pw_idx = power_well->desc->hsw.idx; - bool wait_fuses = power_well->desc->hsw.has_fuses; - enum skl_power_gate uninitialized_var(pg); u32 val; - if (wait_fuses) { + if (power_well->desc->hsw.has_fuses) { + enum skl_power_gate pg; + pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : SKL_PW_CTL_IDX_TO_PG(pw_idx); /* @@ -406,25 +406,46 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, val = intel_de_read(dev_priv, regs->driver); intel_de_write(dev_priv, regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); +} + +static void hsw_power_well_enable_complete(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int pw_idx = power_well->desc->hsw.idx; + hsw_wait_for_power_well_enable(dev_priv, power_well); /* Display WA #1178: cnl */ if (IS_CANNONLAKE(dev_priv) && pw_idx >= GLK_PW_CTL_IDX_AUX_B && pw_idx <= CNL_PW_CTL_IDX_AUX_F) { + u32 val; + val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx)); val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val); } - if (wait_fuses) + if (power_well->desc->hsw.has_fuses) { + enum skl_power_gate pg; + + pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : + SKL_PW_CTL_IDX_TO_PG(pw_idx); gen9_wait_for_power_well_fuses(dev_priv, pg); + } hsw_power_well_post_enable(dev_priv, power_well->desc->hsw.irq_pipe_mask, power_well->desc->hsw.has_vga); } +static void hsw_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + hsw_power_well_enable_prepare(dev_priv, power_well); + hsw_power_well_enable_complete(dev_priv, power_well); +} + static void hsw_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { @@ -570,7 +591,11 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, val |= DP_AUX_CH_CTL_TBT_IO; intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); - hsw_power_well_enable(dev_priv, power_well); + hsw_power_well_enable_prepare(dev_priv, power_well); + + /* TODO ICL TC cold handling */ + + hsw_power_well_enable_complete(dev_priv, power_well); if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) { enum tc_port tc_port; From feb7e0ef5ff820ee7242bb46cfe3d0dd3e234c38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Tue, 14 Apr 2020 12:49:52 -0700 Subject: [PATCH 023/121] drm/i915/tc/icl: Implement TC cold sequences MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is required for legacy/static TC ports as IOM is not aware of the connection and will not trigger the TC cold exit. Just request PCODE to exit TCCOLD is not enough as it could enter again before driver makes use of the port, to prevent it BSpec states that aux powerwell should be held. So here embedding the TC cold exit sequence into ICL aux enable, it will enable aux and then request TC cold to exit. The TC cold block(exit and aux hold) and unblock was added to some exported TC functions for the others and to access PHY registers, callers should enable and keep aux powerwell enabled during access. Also adding TC cold check and warnig in tc_port_load_fia_params() as at this point of the driver initialization we can't request power wells, if we get this warning we will need to figure out how to handle it. v2: - moved ICL TC cold exit function to intel_display_power - using dig_port->tc_legacy_port to only execute sequences for legacy ports, hopefully VBTs will have this right - fixed check to call _hsw_power_well_continue_enable() - calling _hsw_power_well_continue_enable() unconditionally in icl_tc_phy_aux_power_well_enable(), if needed we will surpress timeout warnings of TC legacy ports - only blocking TC cold around fia access v3: - added timeout of 5msec to not loop forever if sandybridge_pcode_write_timeout() keeps returning -EAGAIN returning -EAGAIN in in icl_tc_cold_exit() - removed leftover tc_cold_wakeref - added one msec sleep when PCODE returns -EAGAIN v4: - removed 5msec timeout, instead giving 1msec to whoever is using PCODE to finish it up to 3 times - added a comment about turn TC cold exit failure as a error in future BSpec: 21750 Closes: https://gitlab.freedesktop.org/drm/intel/issues/1296 Cc: Imre Deak Cc: Cooper Chiou Cc: Kai-Heng Feng Reviewed-by: Imre Deak Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200414194956.164323-4-jose.souza@intel.com --- .../drm/i915/display/intel_display_power.c | 25 +++++++- drivers/gpu/drm/i915/display/intel_tc.c | 64 +++++++++++++++++-- drivers/gpu/drm/i915/i915_reg.h | 1 + 3 files changed, 83 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 606a1659021b..c4043d665645 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -575,6 +575,28 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) +static void icl_tc_cold_exit(struct drm_i915_private *i915) +{ + int ret, tries = 0; + + while (1) { + ret = sandybridge_pcode_write_timeout(i915, + ICL_PCODE_EXIT_TCCOLD, + 0, 250, 1); + if (ret != -EAGAIN || ++tries == 3) + break; + msleep(1); + } + + /* Spec states that TC cold exit can take up to 1ms to complete */ + if (!ret) + msleep(1); + + /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ + drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : + "succeeded"); +} + static void icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) @@ -593,7 +615,8 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, hsw_power_well_enable_prepare(dev_priv, power_well); - /* TODO ICL TC cold handling */ + if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) + icl_tc_cold_exit(dev_priv); hsw_power_well_enable_complete(dev_priv, power_well); diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 275618bedf32..0cf33d4d21c3 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -34,6 +34,7 @@ tc_port_load_fia_params(struct drm_i915_private *i915, if (INTEL_INFO(i915)->display.has_modular_fia) { modular_fia = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1)); + drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff); modular_fia &= MODULAR_FIA_MASK; } else { modular_fia = 0; @@ -52,6 +53,37 @@ tc_port_load_fia_params(struct drm_i915_private *i915, } } +static intel_wakeref_t +tc_cold_block(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum intel_display_power_domain domain; + + if (INTEL_GEN(i915) != 11 || !dig_port->tc_legacy_port) + return 0; + + domain = intel_legacy_aux_to_power_domain(dig_port->aux_ch); + return intel_display_power_get(i915, domain); +} + +static void +tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum intel_display_power_domain domain; + + /* + * wakeref == -1, means some error happened saving save_depot_stack but + * power should still be put down and 0 is a invalid save_depot_stack + * id so can be used to skip it for non TC legacy ports. + */ + if (wakeref == 0) + return; + + domain = intel_legacy_aux_to_power_domain(dig_port->aux_ch); + intel_display_power_put_async(i915, domain, wakeref); +} + u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); @@ -420,9 +452,14 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, enum tc_port_mode old_tc_mode = dig_port->tc_mode; intel_display_power_flush_work(i915); - drm_WARN_ON(&i915->drm, - intel_display_power_is_enabled(i915, - intel_aux_power_domain(dig_port))); + if (INTEL_GEN(i915) != 11 || !dig_port->tc_legacy_port) { + enum intel_display_power_domain aux_domain; + bool aux_powered; + + aux_domain = intel_aux_power_domain(dig_port); + aux_powered = intel_display_power_is_enabled(i915, aux_domain); + drm_WARN_ON(&i915->drm, aux_powered); + } icl_tc_phy_disconnect(dig_port); icl_tc_phy_connect(dig_port, required_lanes); @@ -445,9 +482,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_encoder *encoder = &dig_port->base; + intel_wakeref_t tc_cold_wref; int active_links = 0; mutex_lock(&dig_port->tc_lock); + tc_cold_wref = tc_cold_block(dig_port); dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); if (dig_port->dp.is_mst) @@ -473,6 +512,7 @@ out: dig_port->tc_port_name, tc_port_mode_name(dig_port->tc_mode)); + tc_cold_unblock(dig_port, tc_cold_wref); mutex_unlock(&dig_port->tc_lock); } @@ -494,10 +534,15 @@ static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port) bool intel_tc_port_connected(struct intel_digital_port *dig_port) { bool is_connected; + intel_wakeref_t tc_cold_wref; intel_tc_port_lock(dig_port); + tc_cold_wref = tc_cold_block(dig_port); + is_connected = tc_port_live_status_mask(dig_port) & BIT(dig_port->tc_mode); + + tc_cold_unblock(dig_port, tc_cold_wref); intel_tc_port_unlock(dig_port); return is_connected; @@ -513,9 +558,16 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port, mutex_lock(&dig_port->tc_lock); - if (!dig_port->tc_link_refcount && - intel_tc_port_needs_reset(dig_port)) - intel_tc_port_reset_mode(dig_port, required_lanes); + if (!dig_port->tc_link_refcount) { + intel_wakeref_t tc_cold_wref; + + tc_cold_wref = tc_cold_block(dig_port); + + if (intel_tc_port_needs_reset(dig_port)) + intel_tc_port_reset_mode(dig_port, required_lanes); + + tc_cold_unblock(dig_port, tc_cold_wref); + } drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref); dig_port->tc_lock_wakeref = wakeref; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a24e23e9b3d0..ab4dafb163a2 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9108,6 +9108,7 @@ enum { #define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8)) #define GEN6_PCODE_READ_D_COMP 0x10 #define GEN6_PCODE_WRITE_D_COMP 0x11 +#define ICL_PCODE_EXIT_TCCOLD 0x12 #define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 #define DISPLAY_IPS_CONTROL 0x19 /* See also IPS_CTL */ From 7ce40a671538c225ea8eb170830cad90b16ec211 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Tue, 14 Apr 2020 12:49:53 -0700 Subject: [PATCH 024/121] drm/i915/tc: Skip ref held check for TC legacy aux power wells MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As part of ICL TC cold exit sequences we need to request aux power well before lock the access to TC ports, so skiping the intel_tc_port_ref_held() check for TC legacy ports. Reviewed-by: Imre Deak Tested-by: You-Sheng Yang Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200414194956.164323-5-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index c4043d665645..3a4a378792f2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -560,6 +560,9 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, if (drm_WARN_ON(&dev_priv->drm, !dig_port)) return; + if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) + return; + drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); } From 3c02934b24e37344b8c1c6f9df55efc8891e6251 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Tue, 14 Apr 2020 12:49:54 -0700 Subject: [PATCH 025/121] drm/i915/tc/tgl: Implement TC cold sequences MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TC ports can enter in TCCOLD to save power and is required to request to PCODE to exit this state before use or read to TC registers. For TGL there is a new MBOX command to do that with a parameter to ask PCODE to exit and block TCCOLD entry or unblock TCCOLD entry. So adding a new power domain to reuse the refcount and only allow TC cold when all TC ports are not in use. v2: - fixed missing case in intel_display_power_domain_str() - moved tgl_tc_cold_request to intel_display_power.c - renamed TGL_TC_COLD_OFF to TGL_TC_COLD_OFF_POWER_DOMAINS - added all TC and TBT aux power domains to TGL_TC_COLD_OFF_POWER_DOMAINS v3: - added one msec sleep when PCODE returns -EAGAIN - added timeout of 5msec to not loop forever if sandybridge_pcode_write_timeout() keeps returning -EAGAIN v4: - Made failure to block or unblock TC cold a error - removed 5msec timeout, instead giving PCODE 1msec by up 3 times to recover from the internal error v5: - only sleeping 1msec when ret is -EAGAIN BSpec: 49294 Cc: Imre Deak Cc: Cooper Chiou Cc: Kai-Heng Feng Reviewed-by: Imre Deak Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200414194956.164323-6-jose.souza@intel.com --- .../drm/i915/display/intel_display_power.c | 108 ++++++++++++++++++ .../drm/i915/display/intel_display_power.h | 1 + drivers/gpu/drm/i915/display/intel_tc.c | 17 ++- drivers/gpu/drm/i915/i915_reg.h | 4 + 4 files changed, 127 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 3a4a378792f2..ff76a5fdbdbe 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -151,6 +151,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "GT_IRQ"; case POWER_DOMAIN_DPLL_DC_OFF: return "DPLL_DC_OFF"; + case POWER_DOMAIN_TC_COLD_OFF: + return "TC_COLD_OFF"; default: MISSING_CASE(domain); return "?"; @@ -2861,6 +2863,21 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_I_TBT)) +#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_AUX_E) | \ + BIT_ULL(POWER_DOMAIN_AUX_F) | \ + BIT_ULL(POWER_DOMAIN_AUX_G) | \ + BIT_ULL(POWER_DOMAIN_AUX_H) | \ + BIT_ULL(POWER_DOMAIN_AUX_I) | \ + BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ + BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) + static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = i9xx_always_on_power_well_noop, @@ -3963,6 +3980,91 @@ static const struct i915_power_well_desc ehl_power_wells[] = { }, }; +static void +tgl_tc_cold_request(struct drm_i915_private *i915, bool block) +{ + u8 tries = 0; + int ret; + + while (1) { + u32 low_val = 0, high_val; + + if (block) + high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ; + else + high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ; + + /* + * Spec states that we should timeout the request after 200us + * but the function below will timeout after 500us + */ + ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, + &high_val); + if (ret == 0) { + if (block && + (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) + ret = -EIO; + else + break; + } + + if (++tries == 3) + break; + + if (ret == -EAGAIN) + msleep(1); + } + + if (ret) + drm_err(&i915->drm, "TC cold %sblock failed\n", + block ? "" : "un"); + else + drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", + block ? "" : "un"); +} + +static void +tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, + struct i915_power_well *power_well) +{ + tgl_tc_cold_request(i915, true); +} + +static void +tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, + struct i915_power_well *power_well) +{ + tgl_tc_cold_request(i915, false); +} + +static void +tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, + struct i915_power_well *power_well) +{ + if (power_well->count > 0) + tgl_tc_cold_off_power_well_enable(i915, power_well); + else + tgl_tc_cold_off_power_well_disable(i915, power_well); +} + +static bool +tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + /* + * Not the correctly implementation but there is no way to just read it + * from PCODE, so returning count to avoid state mismatch errors + */ + return power_well->count; +} + +static const struct i915_power_well_ops tgl_tc_cold_off_ops = { + .sync_hw = tgl_tc_cold_off_power_well_sync_hw, + .enable = tgl_tc_cold_off_power_well_enable, + .disable = tgl_tc_cold_off_power_well_disable, + .is_enabled = tgl_tc_cold_off_power_well_is_enabled, +}; + static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "always-on", @@ -4290,6 +4392,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.irq_pipe_mask = BIT(PIPE_D), }, }, + { + .name = "TC cold off", + .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, + .ops = &tgl_tc_cold_off_ops, + .id = DISP_PW_ID_NONE, + }, }; static int diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 14c5ad20287f..6c917699293b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -76,6 +76,7 @@ enum intel_display_power_domain { POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, POWER_DOMAIN_DPLL_DC_OFF, + POWER_DOMAIN_TC_COLD_OFF, POWER_DOMAIN_INIT, POWER_DOMAIN_NUM, diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 0cf33d4d21c3..521a94c63640 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -53,16 +53,27 @@ tc_port_load_fia_params(struct drm_i915_private *i915, } } +static enum intel_display_power_domain +tc_cold_get_power_domain(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + + if (INTEL_GEN(i915) == 11) + return intel_legacy_aux_to_power_domain(dig_port->aux_ch); + else + return POWER_DOMAIN_TC_COLD_OFF; +} + static intel_wakeref_t tc_cold_block(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum intel_display_power_domain domain; - if (INTEL_GEN(i915) != 11 || !dig_port->tc_legacy_port) + if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port) return 0; - domain = intel_legacy_aux_to_power_domain(dig_port->aux_ch); + domain = tc_cold_get_power_domain(dig_port); return intel_display_power_get(i915, domain); } @@ -80,7 +91,7 @@ tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref) if (wakeref == 0) return; - domain = intel_legacy_aux_to_power_domain(dig_port->aux_ch); + domain = tc_cold_get_power_domain(dig_port); intel_display_power_put_async(i915, domain, wakeref); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ab4dafb163a2..9e192629d5e5 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9111,6 +9111,10 @@ enum { #define ICL_PCODE_EXIT_TCCOLD 0x12 #define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 #define DISPLAY_IPS_CONTROL 0x19 +#define TGL_PCODE_TCCOLD 0x26 +#define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0) +#define TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ 0 +#define TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ REG_BIT(0) /* See also IPS_CTL */ #define IPS_PCODE_CONTROL (1 << 30) #define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A From 3ed347d1a73ea485ea9f68df64e95cc164e1a790 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Tue, 14 Apr 2020 12:49:55 -0700 Subject: [PATCH 026/121] drm/i915/tc: Catch TC users accessing FIA registers without enable aux MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As described in "drm/i915/tc/icl: Implement TC cold sequences" users of TC functions should held aux power well during access to avoid read garbage due HW in TC cold state. v3: - renamed is_tc_cold_blocked() to assert_tc_cold_blocked() - restored the removed 0xffffffff checks Reviewed-by: Imre Deak Tested-by: You-Sheng Yang Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200414194956.164323-7-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_tc.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 521a94c63640..d3bd5e798fbc 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -95,6 +95,20 @@ tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref) intel_display_power_put_async(i915, domain, wakeref); } +static void +assert_tc_cold_blocked(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + bool enabled; + + if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port) + return; + + enabled = intel_display_power_is_enabled(i915, + tc_cold_get_power_domain(dig_port)); + drm_WARN_ON(&i915->drm, !enabled); +} + u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); @@ -105,6 +119,7 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff); + assert_tc_cold_blocked(dig_port); lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx); return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); @@ -120,6 +135,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) PORT_TX_DFLEXPA1(dig_port->tc_phy_fia)); drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff); + assert_tc_cold_blocked(dig_port); return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >> DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); @@ -134,6 +150,8 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) if (dig_port->tc_mode != TC_PORT_DP_ALT) return 4; + assert_tc_cold_blocked(dig_port); + lane_mask = 0; with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) lane_mask = intel_tc_port_get_lane_mask(dig_port); @@ -166,6 +184,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, drm_WARN_ON(&i915->drm, lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY); + assert_tc_cold_blocked(dig_port); + val = intel_uncore_read(uncore, PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia)); val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx); From 0f8925090ac7ad451ed9e803662ebe3214dc8072 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Tue, 14 Apr 2020 12:49:56 -0700 Subject: [PATCH 027/121] drm/i915/tc: Do not warn when aux power well of static TC ports timeout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a expected timeout of static TC ports not conneceted, so not throwing warnings that would taint CI. v3: - moved checks to tc_phy_aux_timeout_expected() v4: - moved and add comments to tc_phy_aux_timeout_expected() v5: - only checking tc_legacy_port for TC ports Signed-off-by: José Roberto de Souza Reviewed-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20200414194956.164323-8-jose.souza@intel.com --- .../drm/i915/display/intel_display_power.c | 59 +++++++++++++------ 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index ff76a5fdbdbe..f72b8ed095ca 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -20,6 +20,8 @@ #include "intel_tc.h" #include "intel_vga.h" +static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops; + bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); @@ -284,6 +286,21 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); } +#define ICL_AUX_PW_TO_CH(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) + +#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) + +static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int pw_idx = power_well->desc->hsw.idx; + + return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : + ICL_AUX_PW_TO_CH(pw_idx); +} + static struct intel_digital_port * aux_ch_to_digital_port(struct drm_i915_private *dev_priv, enum aux_ch aux_ch) @@ -311,6 +328,28 @@ aux_ch_to_digital_port(struct drm_i915_private *dev_priv, return dig_port; } +static bool tc_phy_aux_timeout_expected(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + /* An AUX timeout is expected if the TBT DP tunnel is down. */ + if (power_well->desc->hsw.is_tc_tbt) + return true; + + /* + * An AUX timeout is expected because we enable TC legacy port aux + * to hold port out of TC cold + */ + if (INTEL_GEN(dev_priv) == 11 && + power_well->desc->ops == &icl_tc_phy_aux_power_well_ops) { + enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); + struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); + + return dig_port->tc_legacy_port; + } + + return false; +} + static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { @@ -323,8 +362,9 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", power_well->desc->name); - /* An AUX timeout is expected if the TBT DP tunnel is down. */ - drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt); + drm_WARN_ON(&dev_priv->drm, + !tc_phy_aux_timeout_expected(dev_priv, power_well)); + } } @@ -520,21 +560,6 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, hsw_wait_for_power_well_disable(dev_priv, power_well); } -#define ICL_AUX_PW_TO_CH(pw_idx) \ - ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) - -#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ - ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) - -static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - int pw_idx = power_well->desc->hsw.idx; - - return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : - ICL_AUX_PW_TO_CH(pw_idx); -} - #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) static u64 async_put_domains_mask(struct i915_power_domains *power_domains); From edcb9028d66b44d74ba4f8b9daa379b004dc1f85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Tue, 14 Apr 2020 16:04:40 -0700 Subject: [PATCH 028/121] drm/i915/display: Load DP_TP_CTL/STATUS offset before use it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Right now dp.regs.dp_tp_ctl/status are only set during the encoder pre_enable() hook, what is causing all reads and writes to those registers to go to offset 0x0 before pre_enable() is executed. So if i915 takes the BIOS state and don't do a modeset any following link retraing will fail. In the case that i915 needs to do a modeset, the DDI disable sequence will write to a wrong register not disabling DP 'Transport Enable' in DP_TP_CTL, making a HDMI modeset in the same port/transcoder to not light up the monitor. So here for GENs older than 12, that have those registers fixed at port offset range it is loading at encoder/port init while for GEN12 it will keep setting it at encoder pre_enable() and during HW state readout. Fixes: 4444df6e205b ("drm/i915/tgl: move DP_TP_* to transcoder") Cc: Matt Roper Cc: Lucas De Marchi Signed-off-by: José Roberto de Souza Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20200414230442.262092-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 14 +++++++++++--- drivers/gpu/drm/i915/display/intel_dp.c | 5 ++--- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index be6c61bcbc9c..1aab93a94f40 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3252,9 +3252,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count, is_mst); - intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port); - intel_dp->regs.dp_tp_status = DP_TP_STATUS(port); - intel_edp_panel_on(intel_dp); intel_ddi_clk_select(encoder, crtc_state); @@ -4061,12 +4058,18 @@ void intel_ddi_get_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 temp, flags = 0; /* XXX: DSI transcoder paranoia */ if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder))) return; + if (INTEL_GEN(dev_priv) >= 12) { + intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(cpu_transcoder); + intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(cpu_transcoder); + } + intel_dsc_get_config(encoder, pipe_config); temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); @@ -4396,6 +4399,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = { static struct intel_connector * intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) { + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); struct intel_connector *connector; enum port port = intel_dig_port->base.port; @@ -4406,6 +4410,10 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); intel_dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain; + if (INTEL_GEN(dev_priv) < 12) { + intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); + intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); + } if (!intel_dp_init_connector(intel_dig_port, connector)) { kfree(connector); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 48397b2c08cf..857a36ff53a5 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2670,9 +2670,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder, intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)); - intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port); - intel_dp->regs.dp_tp_status = DP_TP_STATUS(port); - /* * There are four kinds of DP registers: * @@ -8467,6 +8464,8 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_dig_port->dp.output_reg = output_reg; intel_dig_port->max_lanes = 4; + intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); + intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); intel_encoder->type = INTEL_OUTPUT_DP; intel_encoder->power_domain = intel_port_to_power_domain(port); From 65bb9dd0ec7966880b68b252b9a71585f0b539e8 Mon Sep 17 00:00:00 2001 From: Anshuman Gupta Date: Fri, 17 Apr 2020 22:58:35 +0530 Subject: [PATCH 029/121] drm/i915: Add ICL PG3 PW ID for EHL Gen11 onwards PG3 contains functions for pipe B, external displays, and VGA. Add missing ICL_DISP_PW_3 for ehl_power_wells. Cc: Animesh Manna Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1737 Signed-off-by: Anshuman Gupta Acked-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200417172835.15461-1-anshuman.gupta@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index f72b8ed095ca..fa46ea6c6e01 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -3902,7 +3902,7 @@ static const struct i915_power_well_desc ehl_power_wells[] = { .name = "power well 3", .domains = ICL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, + .id = ICL_DISP_PW_3, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_PW_3, From 7479f3c90a6d6f0a8c9b8a53b3856ac1002a8587 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 17 Apr 2020 17:08:29 +0100 Subject: [PATCH 030/121] drm/i915: remove redundant assignment to variable test_result The variable test_result is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200417160829.112776-1-colin.king@canonical.com --- drivers/gpu/drm/i915/display/intel_dp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 857a36ff53a5..a71d29136595 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -5563,7 +5563,7 @@ void intel_dp_process_phy_request(struct intel_dp *intel_dp) static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) { - u8 test_result = DP_TEST_NAK; + u8 test_result; test_result = intel_dp_prepare_phytest(intel_dp); if (test_result != DP_TEST_ACK) From 31a02eb70b8d9e68c848f2543fa8e745073363e2 Mon Sep 17 00:00:00 2001 From: "Michael J. Ruhl" Date: Fri, 17 Apr 2020 15:51:07 -0400 Subject: [PATCH 031/121] drm/i915: Refactor setting dma info to a common helper DMA_MASK bit values are different for different generations. This will become more difficult to manage over time with the open coded usage of different versions of the device. Fix by: disallow setting of dma mask in AGP path (< GEN(5) for i915, add dma_mask_size to the device info configuration, updating open code call sequence to the latest interface, refactoring into a common function for setting the dma segment and mask info Reviewed-by: Chris Wilson Signed-off-by: Michael J. Ruhl cc: Brian Welty cc: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200417195107.68732-1-michael.j.ruhl@intel.com --- drivers/char/agp/intel-gtt.c | 17 +++-- drivers/gpu/drm/i915/gt/intel_ggtt.c | 15 ---- drivers/gpu/drm/i915/i915_drv.c | 94 +++++++++++++++--------- drivers/gpu/drm/i915/i915_pci.c | 14 ++++ drivers/gpu/drm/i915/intel_device_info.c | 1 + drivers/gpu/drm/i915/intel_device_info.h | 2 + 6 files changed, 87 insertions(+), 56 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 3d42fc4290bc..4b34a5195c65 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1407,13 +1407,16 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); - mask = intel_private.driver->dma_mask_size; - if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) - dev_err(&intel_private.pcidev->dev, - "set gfx device dma mask %d-bit failed!\n", mask); - else - pci_set_consistent_dma_mask(intel_private.pcidev, - DMA_BIT_MASK(mask)); + if (bridge) { + mask = intel_private.driver->dma_mask_size; + if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) + dev_err(&intel_private.pcidev->dev, + "set gfx device dma mask %d-bit failed!\n", + mask); + else + pci_set_consistent_dma_mask(intel_private.pcidev, + DMA_BIT_MASK(mask)); + } if (intel_gtt_init() != 0) { intel_gmch_remove(); diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index eebd1190506f..66165b10256e 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -840,7 +840,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) struct pci_dev *pdev = i915->drm.pdev; unsigned int size; u16 snb_gmch_ctl; - int err; /* TODO: We're not aware of mappable constraints on gen8 yet */ if (!IS_DGFX(i915)) { @@ -848,13 +847,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->mappable_end = resource_size(&ggtt->gmadr); } - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); - if (err) - drm_err(&i915->drm, - "Can't set DMA mask/consistent mask (%d)\n", err); - pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); if (IS_CHERRYVIEW(i915)) size = chv_get_total_gtt_size(snb_gmch_ctl); @@ -990,7 +982,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) struct pci_dev *pdev = i915->drm.pdev; unsigned int size; u16 snb_gmch_ctl; - int err; ggtt->gmadr = pci_resource(pdev, 2); ggtt->mappable_end = resource_size(&ggtt->gmadr); @@ -1005,12 +996,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) return -ENXIO; } - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); - if (err) - drm_err(&i915->drm, - "Can't set DMA mask/consistent mask (%d)\n", err); pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); size = gen6_get_total_gtt_size(snb_gmch_ctl); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b5c4cad93a46..c08b165a9cb4 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -568,6 +568,62 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) intel_gvt_sanitize_options(dev_priv); } +/** + * i915_set_dma_info - set all relevant PCI dma info as configured for the + * platform + * @i915: valid i915 instance + * + * Set the dma max segment size, device and coherent masks. The dma mask set + * needs to occur before i915_ggtt_probe_hw. + * + * A couple of platforms have special needs. Address them as well. + * + */ +static int i915_set_dma_info(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = i915->drm.pdev; + unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size; + int ret; + + GEM_BUG_ON(!mask_size); + + /* + * We don't have a max segment size, so set it to the max so sg's + * debugging layer doesn't complain + */ + dma_set_max_seg_size(&pdev->dev, UINT_MAX); + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size)); + if (ret) + goto mask_err; + + /* overlay on gen2 is broken and can't address above 1G */ + if (IS_GEN(i915, 2)) + mask_size = 30; + + /* + * 965GM sometimes incorrectly writes to hardware status page (HWS) + * using 32bit addressing, overwriting memory if HWS is located + * above 4GB. + * + * The documentation also mentions an issue with undefined + * behaviour if any general state is accessed within a page above 4GB, + * which also needs to be handled carefully. + */ + if (IS_I965G(i915) || IS_I965GM(i915)) + mask_size = 32; + + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size)); + if (ret) + goto mask_err; + + return 0; + +mask_err: + drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret); + return ret; +} + /** * i915_driver_hw_probe - setup state requiring device access * @dev_priv: device private @@ -613,6 +669,10 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) /* needs to be done before ggtt probe */ intel_dram_edram_detect(dev_priv); + ret = i915_set_dma_info(dev_priv); + if (ret) + return ret; + i915_perf_init(dev_priv); ret = i915_ggtt_probe_hw(dev_priv); @@ -641,40 +701,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) pci_set_master(pdev); - /* - * We don't have a max segment size, so set it to the max so sg's - * debugging layer doesn't complain - */ - dma_set_max_seg_size(&pdev->dev, UINT_MAX); - - /* overlay on gen2 is broken and can't address above 1G */ - if (IS_GEN(dev_priv, 2)) { - ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); - if (ret) { - drm_err(&dev_priv->drm, "failed to set DMA mask\n"); - - goto err_mem_regions; - } - } - - /* 965GM sometimes incorrectly writes to hardware status page (HWS) - * using 32bit addressing, overwriting memory if HWS is located - * above 4GB. - * - * The documentation also mentions an issue with undefined - * behaviour if any general state is accessed within a page above 4GB, - * which also needs to be handled carefully. - */ - if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) { - ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - - if (ret) { - drm_err(&dev_priv->drm, "failed to set DMA mask\n"); - - goto err_mem_regions; - } - } - cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); intel_gt_init_workarounds(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 66738f2c4f28..2fc25ec12c3d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -171,6 +171,7 @@ .engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ + .dma_mask_size = 32, \ I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ I9XX_COLORS, \ @@ -190,6 +191,7 @@ .engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ + .dma_mask_size = 32, \ I845_PIPE_OFFSETS, \ I845_CURSOR_OFFSETS, \ I9XX_COLORS, \ @@ -226,6 +228,7 @@ static const struct intel_device_info i865g_info = { .engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ + .dma_mask_size = 32, \ I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ I9XX_COLORS, \ @@ -286,6 +289,7 @@ static const struct intel_device_info g33_info = { PLATFORM(INTEL_G33), .display.has_hotplug = 1, .display.has_overlay = 1, + .dma_mask_size = 36, }; static const struct intel_device_info pnv_g_info = { @@ -293,6 +297,7 @@ static const struct intel_device_info pnv_g_info = { PLATFORM(INTEL_PINEVIEW), .display.has_hotplug = 1, .display.has_overlay = 1, + .dma_mask_size = 36, }; static const struct intel_device_info pnv_m_info = { @@ -301,6 +306,7 @@ static const struct intel_device_info pnv_m_info = { .is_mobile = 1, .display.has_hotplug = 1, .display.has_overlay = 1, + .dma_mask_size = 36, }; #define GEN4_FEATURES \ @@ -313,6 +319,7 @@ static const struct intel_device_info pnv_m_info = { .engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ + .dma_mask_size = 36, \ I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ I965_COLORS, \ @@ -365,6 +372,7 @@ static const struct intel_device_info gm45_info = { .has_coherent_ggtt = true, \ /* ilk does support rc6, but we do not implement [power] contexts */ \ .has_rc6 = 0, \ + .dma_mask_size = 36, \ I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ ILK_COLORS, \ @@ -395,6 +403,7 @@ static const struct intel_device_info ilk_m_info = { .has_rc6 = 1, \ .has_rc6p = 1, \ .has_rps = true, \ + .dma_mask_size = 40, \ .ppgtt_type = INTEL_PPGTT_ALIASING, \ .ppgtt_size = 31, \ I9XX_PIPE_OFFSETS, \ @@ -445,6 +454,7 @@ static const struct intel_device_info snb_m_gt2_info = { .has_rc6 = 1, \ .has_rc6p = 1, \ .has_rps = true, \ + .dma_mask_size = 40, \ .ppgtt_type = INTEL_PPGTT_ALIASING, \ .ppgtt_size = 31, \ IVB_PIPE_OFFSETS, \ @@ -504,6 +514,7 @@ static const struct intel_device_info vlv_info = { .has_rps = true, .display.has_gmch = 1, .display.has_hotplug = 1, + .dma_mask_size = 40, .ppgtt_type = INTEL_PPGTT_ALIASING, .ppgtt_size = 31, .has_snoop = true, @@ -554,6 +565,7 @@ static const struct intel_device_info hsw_gt3_info = { G75_FEATURES, \ GEN(8), \ .has_logical_ring_contexts = 1, \ + .dma_mask_size = 39, \ .ppgtt_type = INTEL_PPGTT_FULL, \ .ppgtt_size = 48, \ .has_64bit_reloc = 1, \ @@ -602,6 +614,7 @@ static const struct intel_device_info chv_info = { .has_rps = true, .has_logical_ring_contexts = 1, .display.has_gmch = 1, + .dma_mask_size = 39, .ppgtt_type = INTEL_PPGTT_ALIASING, .ppgtt_size = 32, .has_reset_engine = 1, @@ -685,6 +698,7 @@ static const struct intel_device_info skl_gt4_info = { .has_logical_ring_contexts = 1, \ .has_logical_ring_preemption = 1, \ .has_gt_uc = 1, \ + .dma_mask_size = 39, \ .ppgtt_type = INTEL_PPGTT_FULL, \ .ppgtt_size = 48, \ .has_reset_engine = 1, \ diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index db8496b4c38d..91bb7891c70c 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -98,6 +98,7 @@ void intel_device_info_print_static(const struct intel_device_info *info, drm_printf(p, "platform: %s\n", intel_platform_name(info->platform)); drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size); drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type); + drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size); #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name)); DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index cce6a72c5ebc..69c9257c6c6a 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -158,6 +158,8 @@ struct intel_device_info { enum intel_platform platform; + unsigned int dma_mask_size; /* available DMA address bits */ + enum intel_ppgtt_type ppgtt_type; unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */ From 27be41de45a70fe1cb0ae1cbd2fd6da1ce3ffe9a Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 17 Apr 2020 16:01:09 +0300 Subject: [PATCH 032/121] drm/i915: fix Sphinx build duplicate label warning Fix the warning caused by enabling the autosectionlabel extension in the kernel Sphinx build: Documentation/gpu/i915.rst:610: WARNING: duplicate label gpu/i915:layout, other instance in Documentation/gpu/i915.rst The autosectionlabel extension adds labels to each section title for cross-referencing, but forbids identical section titles in a document. With kernel-doc, this includes sections titles in the included kernel-doc comments. In the warning message, Sphinx is unable to reference the labels in their true locations in the kernel-doc comments in source. In this case, there's "Layout" sections in both gt/intel_workarounds.c and i915_reg.h. Rename the section in the latter to "File Layout". Fixes: 58ad30cf91f0 ("docs: fix reference to core-api/namespaces.rst") Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200417130109.12791-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9e192629d5e5..69332be5ac41 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -34,8 +34,8 @@ * Follow the style described here for new macros, and while changing existing * macros. Do **not** mass change existing definitions just to update the style. * - * Layout - * ~~~~~~ + * File Layout + * ~~~~~~~~~~~ * * Keep helper macros near the top. For example, _PIPE() and friends. * From b4ed131dbfb6d345e1e5f7cc65358e11ab46f10d Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 17 Apr 2020 09:51:32 +0300 Subject: [PATCH 033/121] drm/i915/audio: error log non-zero audio power refcount after unbind We have some module unload/reload tests hitting an issue with i915 unbinding the component interface before the audio driver has properly put the power. Log an error about it for ease of debugging. (Normally this leads to a wakeref debug splat on the power well.) Cc: Kai Vehmanen Reviewed-by: Kai Vehmanen Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200417065132.23048-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_audio.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index dc311bb227f1..2663e71059af 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -1284,6 +1284,10 @@ static void i915_audio_component_unbind(struct device *i915_kdev, drm_modeset_unlock_all(&dev_priv->drm); device_link_remove(hda_kdev, i915_kdev); + + if (dev_priv->audio_power_refcount) + drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n", + dev_priv->audio_power_refcount); } static const struct component_ops i915_audio_component_bind_ops = { From d7fb38ae36a2dc97924b075ad1d1a88792777ea9 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Mon, 13 Apr 2020 17:44:06 -0400 Subject: [PATCH 034/121] drm/i915/dpcd_bl: Unbreak enable_dpcd_backlight modparam MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Looks like I accidentally made it so you couldn't force DPCD backlight support on, whoops. Fix that. Signed-off-by: Lyude Paul Fixes: 17f5d57915be ("drm/i915: Force DPCD backlight mode on X1 Extreme 2nd Gen 4K AMOLED panel") Cc: Adam Jackson Cc: Jani Nikula Cc: Joonas Lahtinen Cc: "Ville Syrjälä" Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200413214407.1851002-1-lyude@redhat.com --- drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 4b916468540f..0722540d64ad 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -358,6 +358,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) */ if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE && + i915_modparams.enable_dpcd_backlight != 1 && !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks, DP_QUIRK_FORCE_DPCD_BACKLIGHT)) { drm_info(&i915->drm, From a95f3ac21d64d62c746f836598d1467d5837fa28 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Apr 2020 13:53:55 +0100 Subject: [PATCH 035/121] drm/i915/gem: Remove object_is_locked assertion from unpin_from_display_plane Since moving the obj->vma.list to a spin_lock, and the vm->bound_list to its vm->mutex, along with tracking shrinkable status under its own spinlock, we no long require the object to be locked by the caller. This is fortunate as it appears we can be called with the lock along an error path in flipping: <4> [139.942851] WARN_ON(debug_locks && !lock_is_held(&(&((obj)->base.resv)->lock.base)->dep_map)) <4> [139.943242] WARNING: CPU: 0 PID: 1203 at drivers/gpu/drm/i915/gem/i915_gem_domain.c:405 i915_gem_object_unpin_from_display_plane+0x70/0x130 [i915] <4> [139.943263] Modules linked in: snd_hda_intel i915 vgem snd_hda_codec_realtek snd_hda_codec_generic coretemp snd_intel_dspcfg snd_hda_codec snd_hwdep snd_hda_core r8169 lpc_ich snd_pcm realtek prime_numbers [last unloaded: i915] <4> [139.943347] CPU: 0 PID: 1203 Comm: kms_flip Tainted: G U 5.6.0-gd0fda5c2cf3f1-drmtip_474+ #1 <4> [139.943363] Hardware name: /D510MO, BIOS MOPNV10J.86A.0311.2010.0802.2346 08/02/2010 <4> [139.943589] RIP: 0010:i915_gem_object_unpin_from_display_plane+0x70/0x130 [i915] <4> [139.943589] Code: 85 28 01 00 00 be ff ff ff ff 48 8d 78 60 e8 d7 9b f0 e2 85 c0 75 b9 48 c7 c6 50 b9 38 c0 48 c7 c7 e9 48 3c c0 e8 20 d4 e9 e2 <0f> 0b eb a2 48 c7 c1 08 bb 38 c0 ba 0a 01 00 00 48 c7 c6 88 a3 35 <4> [139.943589] RSP: 0018:ffffb774c0603b48 EFLAGS: 00010282 <4> [139.943589] RAX: 0000000000000000 RBX: ffff9a142fa36e80 RCX: 0000000000000006 <4> [139.943589] RDX: 000000000000160d RSI: ffff9a142c1a88f8 RDI: ffffffffa434a64d <4> [139.943589] RBP: ffff9a1410a513c0 R08: ffff9a142c1a88f8 R09: 0000000000000000 <4> [139.943589] R10: 0000000000000000 R11: 0000000000000000 R12: ffff9a1436ee94b8 <4> [139.943589] R13: 0000000000000001 R14: 00000000ffffffff R15: ffff9a1410960000 <4> [139.943589] FS: 00007fc73a744e40(0000) GS:ffff9a143da00000(0000) knlGS:0000000000000000 <4> [139.943589] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [139.943589] CR2: 00007fc73997e098 CR3: 000000002f5fe000 CR4: 00000000000006f0 <4> [139.943589] Call Trace: <4> [139.943589] intel_pin_and_fence_fb_obj+0x1c9/0x1f0 [i915] <4> [139.943589] intel_plane_pin_fb+0x3f/0xd0 [i915] <4> [139.943589] intel_prepare_plane_fb+0x13b/0x5c0 [i915] <4> [139.943589] drm_atomic_helper_prepare_planes+0x85/0x110 <4> [139.943589] intel_atomic_commit+0xda/0x390 [i915] <4> [139.943589] drm_atomic_helper_page_flip+0x9c/0xd0 <4> [139.943589] ? drm_event_reserve_init+0x46/0x60 <4> [139.943589] drm_mode_page_flip_ioctl+0x587/0x5d0 This completes the symmetry lost in commit 8b1c78e06e61 ("drm/i915: Avoid calling i915_gem_object_unbind holding object lock"). Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1743 Fixes: 8b1c78e06e61 ("drm/i915: Avoid calling i915_gem_object_unbind holding object lock") Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Andi Shyti Cc: # v5.6+ Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20200420125356.26614-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index af43e82f45c7..7f76fc68f498 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -368,7 +368,6 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_vma *vma; - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); if (list_empty(&obj->vma.list)) return; @@ -400,12 +399,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) { - struct drm_i915_gem_object *obj = vma->obj; - - assert_object_held(obj); - /* Bump the LRU to try and avoid premature eviction whilst flipping */ - i915_gem_object_bump_inactive_ggtt(obj); + i915_gem_object_bump_inactive_ggtt(vma->obj); i915_vma_unpin(vma); } From f153f6395a6392e894a1039eada81f8083d00517 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Apr 2020 13:53:56 +0100 Subject: [PATCH 036/121] drm/i915/gt: Move the late flush_submission in retire to the end Avoid flushing the submission queue (of others) under the client's timeline lock, but instead move it to the end so that we may catch more. References: https://gitlab.freedesktop.org/drm/intel/-/issues/1066 Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20200420125356.26614-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_gt_requests.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index 835ec184763e..dec96a731a77 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -162,7 +162,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) } } - if (!retire_requests(tl) || flush_submission(gt)) + if (!retire_requests(tl)) active_count++; mutex_unlock(&tl->mutex); @@ -185,6 +185,9 @@ out_active: spin_lock(&timelines->lock); list_for_each_entry_safe(tl, tn, &free, link) __intel_timeline_free(&tl->kref); + if (flush_submission(gt)) /* Wait, there's more! */ + active_count++; + return active_count ? timeout : 0; } From 6b7fc6a3e6af4ff5773949d0fed70d8e7f68d5ce Mon Sep 17 00:00:00 2001 From: Peter Jones Date: Fri, 6 Jul 2018 15:04:24 -0400 Subject: [PATCH 037/121] Make the "Reducing compressed framebufer size" message be DRM_INFO_ONCE() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This was sort of annoying me: random:~$ dmesg | tail -1 [523884.039227] [drm] Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS. random:~$ dmesg | grep -c "Reducing the compressed" 47 This patch makes it DRM_INFO_ONCE() just like the similar message farther down in that function is pr_info_once(). Cc: stable@vger.kernel.org Signed-off-by: Peter Jones Acked-by: Rodrigo Vivi Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1745 Link: https://patchwork.freedesktop.org/patch/msgid/20180706190424.29194-1-pjones@redhat.com [vsyrjala: Rebase due to per-device logging] Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/display/intel_fbc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 56bcd6c52a02..c6afa10e814c 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -485,9 +485,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, if (!ret) goto err_llb; else if (ret > 1) { - drm_info(&dev_priv->drm, - "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); - + drm_info_once(&dev_priv->drm, + "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); } fbc->threshold = ret; From 02a715c371d2b411687fbbb1e57020bcbc41f6c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 17 Apr 2020 16:47:17 +0300 Subject: [PATCH 038/121] drm/i915: Pass encoder to intel_ddi_enable_pipe_clock() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since intel_ddi_enable_pipe_clock() was pushed down into the encoder hooks we can pass on the encoder instead of having to use intel_ddi_get_crtc_encoder(). Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200417134720.16654-1-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson Acked-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_crt.c | 2 +- drivers/gpu/drm/i915/display/intel_ddi.c | 10 +++++----- drivers/gpu/drm/i915/display/intel_ddi.h | 3 ++- drivers/gpu/drm/i915/display/intel_dp_mst.c | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index a59ecbed0004..cbf8408537cf 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -294,7 +294,7 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state, hsw_fdi_link_train(encoder, crtc_state); - intel_ddi_enable_pipe_clock(crtc_state); + intel_ddi_enable_pipe_clock(encoder, crtc_state); } static void hsw_enable_crt(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 1aab93a94f40..2574c058da87 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1986,11 +1986,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, intel_dsc_power_domain(crtc_state)); } -void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) +void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc); enum port port = encoder->port; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; @@ -3158,7 +3158,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, * 7.a Configure Transcoder Clock Select to direct the Port clock to the * Transcoder. */ - intel_ddi_enable_pipe_clock(crtc_state); + intel_ddi_enable_pipe_clock(encoder, crtc_state); /* * 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST @@ -3296,7 +3296,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_fec(encoder, crtc_state); if (!is_mst) - intel_ddi_enable_pipe_clock(crtc_state); + intel_ddi_enable_pipe_clock(encoder, crtc_state); intel_dsc_enable(encoder, crtc_state); } @@ -3357,7 +3357,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, if (IS_GEN9_BC(dev_priv)) skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); - intel_ddi_enable_pipe_clock(crtc_state); + intel_ddi_enable_pipe_clock(encoder, crtc_state); intel_dig_port->set_infoframes(encoder, crtc_state->has_infoframe, diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index de4cd877c002..73dad48f9356 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -27,7 +27,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state); void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state); -void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); +void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 61605eb8c2af..c25e629e3728 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -491,7 +491,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, * here for the following ones. */ if (INTEL_GEN(dev_priv) < 12 || !first_mst_stream) - intel_ddi_enable_pipe_clock(pipe_config); + intel_ddi_enable_pipe_clock(encoder, pipe_config); intel_ddi_set_dp_msa(pipe_config, conn_state); From c38730987b4a3652ed2c350a2679ca228b0ab284 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 17 Apr 2020 16:47:18 +0300 Subject: [PATCH 039/121] drm/i915: Move the TRANS_DDI_FUNC_CTL enable to a later point MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No reason that I can see why we should enable TRANS_DDI_FUNC_CTL before we set up the watermarks of configure the mbus stuff. In fact reordering these seems to match the bspec sequence better, and crucially will allow us to push the TRANS_DDI_FUNC_CTL enable into the encoder enable hook as a followup. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200417134720.16654-2-ville.syrjala@linux.intel.com Acked-by: Chris Wilson Acked-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index bae1d89875d6..55e7c499e7c7 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -7071,15 +7071,15 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, if (INTEL_GEN(dev_priv) >= 11) icl_set_pipe_chicken(crtc); - if (!transcoder_is_dsi(cpu_transcoder)) - intel_ddi_enable_transcoder_func(new_crtc_state); - if (dev_priv->display.initial_watermarks) dev_priv->display.initial_watermarks(state, crtc); if (INTEL_GEN(dev_priv) >= 11) icl_pipe_mbus_enable(crtc); + if (!transcoder_is_dsi(cpu_transcoder)) + intel_ddi_enable_transcoder_func(new_crtc_state); + intel_encoders_enable(state, crtc); if (psl_clkgate_wa) { From 7c2fedd7608fca45ba91200994fafe81700f582c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 17 Apr 2020 16:47:19 +0300 Subject: [PATCH 040/121] drm/i915: Push TRANS_DDI_FUNC_CTL into the encoder->enable() hook MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Push the TRANS_DDI_FUNC_CTL into the encoder enable hook. The disable is already there, and as a followup will enable us to pass the encoder all the way down. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200417134720.16654-3-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson Acked-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_crt.c | 2 ++ drivers/gpu/drm/i915/display/intel_ddi.c | 2 ++ drivers/gpu/drm/i915/display/intel_display.c | 3 --- drivers/gpu/drm/i915/display/intel_dp_mst.c | 2 ++ 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index cbf8408537cf..0a75821a680b 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -308,6 +308,8 @@ static void hsw_enable_crt(struct intel_atomic_state *state, drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); + intel_ddi_enable_transcoder_func(crtc_state); + intel_enable_pipe(crtc_state); lpt_pch_enable(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 2574c058da87..9718415ee680 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3765,6 +3765,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state, { WARN_ON(crtc_state->has_pch_encoder); + intel_ddi_enable_transcoder_func(crtc_state); + intel_enable_pipe(crtc_state); intel_crtc_vblank_on(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 55e7c499e7c7..74449f67acc2 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -7077,9 +7077,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, if (INTEL_GEN(dev_priv) >= 11) icl_pipe_mbus_enable(crtc); - if (!transcoder_is_dsi(cpu_transcoder)) - intel_ddi_enable_transcoder_func(new_crtc_state); - intel_encoders_enable(state, crtc); if (psl_clkgate_wa) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index c25e629e3728..0dcbd8e1d82b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -510,6 +510,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); + intel_ddi_enable_transcoder_func(pipe_config); + intel_enable_pipe(pipe_config); intel_crtc_vblank_on(pipe_config); From eed22a46b979a3ccefd4820a73af167569ad4711 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 17 Apr 2020 16:47:20 +0300 Subject: [PATCH 041/121] drm/i915: Pass encoder all the way to intel_ddi_transcoder_func_reg_val_get() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass the encoder all the way down to intel_ddi_transcoder_func_reg_val_get(). Allows us eliminate the intel_ddi_get_crtc_encoder() eyesore. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200417134720.16654-4-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson Acked-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_crt.c | 2 +- drivers/gpu/drm/i915/display/intel_ddi.c | 39 ++++++--------------- drivers/gpu/drm/i915/display/intel_ddi.h | 3 +- drivers/gpu/drm/i915/display/intel_dp_mst.c | 2 +- 4 files changed, 14 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 0a75821a680b..2f5b9a4baafd 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -308,7 +308,7 @@ static void hsw_enable_crt(struct intel_atomic_state *state, drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); - intel_ddi_enable_transcoder_func(crtc_state); + intel_ddi_enable_transcoder_func(encoder, crtc_state); intel_enable_pipe(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 9718415ee680..74bc489e6dd6 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1351,27 +1351,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); } -static struct intel_encoder * -intel_ddi_get_crtc_encoder(struct intel_crtc *crtc) -{ - struct drm_device *dev = crtc->base.dev; - struct intel_encoder *encoder, *ret = NULL; - int num_encoders = 0; - - for_each_encoder_on_crtc(dev, &crtc->base, encoder) { - ret = encoder; - num_encoders++; - } - - if (num_encoders != 1) - drm_WARN(dev, 1, "%d encoders on crtc for pipe %c\n", - num_encoders, - pipe_name(crtc->pipe)); - - BUG_ON(ret == NULL); - return ret; -} - static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, enum port port) { @@ -1512,10 +1491,10 @@ static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder) * intel_ddi_config_transcoder_func(). */ static u32 -intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) +intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; @@ -1617,7 +1596,8 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) return temp; } -void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) +void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1640,7 +1620,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2); } - ctl = intel_ddi_transcoder_func_reg_val_get(crtc_state); + ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC; intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl); @@ -1651,14 +1631,15 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) * bit. */ static void -intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state) +intel_ddi_config_transcoder_func(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 ctl; - ctl = intel_ddi_transcoder_func_reg_val_get(crtc_state); + ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state); ctl &= ~TRANS_DDI_FUNC_ENABLE; intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl); } @@ -3164,7 +3145,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, * 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST * Transport Select */ - intel_ddi_config_transcoder_func(crtc_state); + intel_ddi_config_transcoder_func(encoder, crtc_state); /* * 7.c Configure & enable DP_TP_CTL with link training pattern 1 @@ -3765,7 +3746,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state, { WARN_ON(crtc_state->has_pch_encoder); - intel_ddi_enable_transcoder_func(crtc_state); + intel_ddi_enable_transcoder_func(encoder, crtc_state); intel_enable_pipe(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 73dad48f9356..fbdf8ddde486 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -25,7 +25,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); -void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state); +void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state); void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 0dcbd8e1d82b..74559379384a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -510,7 +510,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); - intel_ddi_enable_transcoder_func(pipe_config); + intel_ddi_enable_transcoder_func(encoder, pipe_config); intel_enable_pipe(pipe_config); From 3c0ec2c2d5948356a22c8f1c08a8087b12a303e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 17 Apr 2020 18:27:33 +0300 Subject: [PATCH 042/121] drm/i915: Flatten intel_dp_check_mst_status() a bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make intel_dp_check_mst_status() somewhat legible by humans. Note that the return value of drm_dp_mst_hpd_irq() is always either 0 or -ENOMEM, and we never did anything with the latter so we can just ignore the whole thing. We can also get rid of the direct drm_dp_mst_topology_mgr_set_mst(false) call since returning -EINVAL causes the caller to do the very same call for us. Cc: Lyude Paul Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200417152734.464-1-ville.syrjala@linux.intel.com Reviewed-by: Lyude Paul --- drivers/gpu/drm/i915/display/intel_dp.c | 88 ++++++++++++------------- 1 file changed, 41 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index a71d29136595..ca2193c6f7f1 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -5625,61 +5625,55 @@ static int intel_dp_check_mst_status(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); - bool bret; - if (intel_dp->is_mst) { - u8 esi[DP_DPRX_ESI_LEN] = { 0 }; - int ret = 0; + if (!intel_dp->is_mst) + return -EINVAL; + + WARN_ON_ONCE(intel_dp->active_mst_links < 0); + + for (;;) { + u8 esi[DP_DPRX_ESI_LEN] = {}; + bool bret, handled; int retry; - bool handled; - WARN_ON_ONCE(intel_dp->active_mst_links < 0); bret = intel_dp_get_sink_irq_esi(intel_dp, esi); -go_again: - if (bret == true) { - - /* check link status - esi[10] = 0x200c */ - if (intel_dp->active_mst_links > 0 && - !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { - drm_dbg_kms(&i915->drm, - "channel EQ not ok, retraining\n"); - intel_dp_start_link_train(intel_dp); - intel_dp_stop_link_train(intel_dp); - } - - drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); - ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); - - if (handled) { - for (retry = 0; retry < 3; retry++) { - int wret; - wret = drm_dp_dpcd_write(&intel_dp->aux, - DP_SINK_COUNT_ESI+1, - &esi[1], 3); - if (wret == 3) { - break; - } - } - - bret = intel_dp_get_sink_irq_esi(intel_dp, esi); - if (bret == true) { - drm_dbg_kms(&i915->drm, - "got esi2 %3ph\n", esi); - goto go_again; - } - } else - ret = 0; - - return ret; - } else { + if (!bret) { drm_dbg_kms(&i915->drm, "failed to get ESI - device may have failed\n"); - intel_dp->is_mst = false; - drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, - intel_dp->is_mst); + return -EINVAL; + } + + /* check link status - esi[10] = 0x200c */ + /* + * FIXME kill this and use the SST retraining approach + * for MST as well. + */ + if (intel_dp->active_mst_links > 0 && + !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { + drm_dbg_kms(&i915->drm, + "channel EQ not ok, retraining\n"); + intel_dp_start_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + } + + drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); + + drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); + if (!handled) + break; + + for (retry = 0; retry < 3; retry++) { + int wret; + + wret = drm_dp_dpcd_write(&intel_dp->aux, + DP_SINK_COUNT_ESI+1, + &esi[1], 3); + if (wret == 3) + break; } } - return -EINVAL; + + return 0; } static bool From f0617ff0b89416d5e15e14c22489ae3013d343fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 17 Apr 2020 18:27:34 +0300 Subject: [PATCH 043/121] drm/i915: Push MST link retraining to the hotplug work MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We shouldn't try to do link retraining from the short hpd handler. We can't take any modeset locks there so this is racy as hell. Push the whole thing into the hotplug work like we do with SST. We'll just have to adjust the SST retraining code to deal with the MST encoders and multiple pipes. TODO: I have a feeling we should just rip this all out and do a full modeset instead. Stuff like port sync and the tgl+ MST master transcoder stuff maybe doesn't work well if we try to retrain without following the proper modeset sequence. So far haven't done any actual tests to confirm that though. Cc: Lyude Paul Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200417152734.464-2-ville.syrjala@linux.intel.com Reviewed-by: Lyude Paul --- drivers/gpu/drm/i915/display/intel_dp.c | 165 ++++++++++++++++++------ 1 file changed, 122 insertions(+), 43 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index ca2193c6f7f1..ec1157858ac5 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -5625,6 +5625,7 @@ static int intel_dp_check_mst_status(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); + bool need_retrain = false; if (!intel_dp->is_mst) return -EINVAL; @@ -5644,16 +5645,11 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) } /* check link status - esi[10] = 0x200c */ - /* - * FIXME kill this and use the SST retraining approach - * for MST as well. - */ - if (intel_dp->active_mst_links > 0 && + if (intel_dp->active_mst_links > 0 && !need_retrain && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { drm_dbg_kms(&i915->drm, "channel EQ not ok, retraining\n"); - intel_dp_start_link_train(intel_dp); - intel_dp_stop_link_train(intel_dp); + need_retrain = true; } drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); @@ -5673,7 +5669,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) } } - return 0; + return need_retrain; } static bool @@ -5710,20 +5706,102 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); } +static bool intel_dp_has_connector(struct intel_dp *intel_dp, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder; + enum pipe pipe; + + if (!conn_state->best_encoder) + return false; + + /* SST */ + encoder = &dp_to_dig_port(intel_dp)->base; + if (conn_state->best_encoder == &encoder->base) + return true; + + /* MST */ + for_each_pipe(i915, pipe) { + encoder = &intel_dp->mst_encoders[pipe]->base; + if (conn_state->best_encoder == &encoder->base) + return true; + } + + return false; +} + +static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, + struct drm_modeset_acquire_ctx *ctx, + u32 *crtc_mask) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct drm_connector_list_iter conn_iter; + struct intel_connector *connector; + int ret = 0; + + *crtc_mask = 0; + + if (!intel_dp_needs_link_retrain(intel_dp)) + return 0; + + drm_connector_list_iter_begin(&i915->drm, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + struct drm_connector_state *conn_state = + connector->base.state; + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + + if (!intel_dp_has_connector(intel_dp, conn_state)) + continue; + + crtc = to_intel_crtc(conn_state->crtc); + if (!crtc) + continue; + + ret = drm_modeset_lock(&crtc->base.mutex, ctx); + if (ret) + break; + + crtc_state = to_intel_crtc_state(crtc->base.state); + + drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); + + if (!crtc_state->hw.active) + continue; + + if (conn_state->commit && + !try_wait_for_completion(&conn_state->commit->hw_done)) + continue; + + *crtc_mask |= drm_crtc_mask(&crtc->base); + } + drm_connector_list_iter_end(&conn_iter); + + if (!intel_dp_needs_link_retrain(intel_dp)) + *crtc_mask = 0; + + return ret; +} + +static bool intel_dp_is_connected(struct intel_dp *intel_dp) +{ + struct intel_connector *connector = intel_dp->attached_connector; + + return connector->base.status == connector_status_connected || + intel_dp->is_mst; +} + int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_connector *connector = intel_dp->attached_connector; - struct drm_connector_state *conn_state; - struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; + u32 crtc_mask; int ret; - /* FIXME handle the MST connectors as well */ - - if (!connector || connector->base.status != connector_status_connected) + if (!intel_dp_is_connected(intel_dp)) return 0; ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, @@ -5731,46 +5809,42 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, if (ret) return ret; - conn_state = connector->base.state; - - crtc = to_intel_crtc(conn_state->crtc); - if (!crtc) - return 0; - - ret = drm_modeset_lock(&crtc->base.mutex, ctx); + ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); if (ret) return ret; - crtc_state = to_intel_crtc_state(crtc->base.state); - - drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state)); - - if (!crtc_state->hw.active) + if (crtc_mask == 0) return 0; - if (conn_state->commit && - !try_wait_for_completion(&conn_state->commit->hw_done)) - return 0; + drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", + encoder->base.base.id, encoder->base.name); - if (!intel_dp_needs_link_retrain(intel_dp)) - return 0; + for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); - /* Suppress underruns caused by re-training */ - intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); - if (crtc_state->has_pch_encoder) - intel_set_pch_fifo_underrun_reporting(dev_priv, - intel_crtc_pch_transcoder(crtc), false); + /* Suppress underruns caused by re-training */ + intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); + if (crtc_state->has_pch_encoder) + intel_set_pch_fifo_underrun_reporting(dev_priv, + intel_crtc_pch_transcoder(crtc), false); + } intel_dp_start_link_train(intel_dp); intel_dp_stop_link_train(intel_dp); - /* Keep underrun reporting disabled until things are stable */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); - intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); - if (crtc_state->has_pch_encoder) - intel_set_pch_fifo_underrun_reporting(dev_priv, - intel_crtc_pch_transcoder(crtc), true); + /* Keep underrun reporting disabled until things are stable */ + intel_wait_for_vblank(dev_priv, crtc->pipe); + + intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); + if (crtc_state->has_pch_encoder) + intel_set_pch_fifo_underrun_reporting(dev_priv, + intel_crtc_pch_transcoder(crtc), true); + } return 0; } @@ -7412,7 +7486,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) } if (intel_dp->is_mst) { - if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { + switch (intel_dp_check_mst_status(intel_dp)) { + case -EINVAL: /* * If we were in MST mode, and device is not * there, get out of MST mode @@ -7426,6 +7501,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) intel_dp->is_mst); return IRQ_NONE; + case 1: + return IRQ_NONE; + default: + break; } } From 4ba74e53ada3c7cca9808d0c1332d34133f06575 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Apr 2020 18:27:34 +0100 Subject: [PATCH 044/121] drm/i915/selftests: Verify frequency scaling with RPS One of the core tenents of reclocking the GPU is that its throughput scales with the clock frequency. We can observe this by incrementing a loop counter on the GPU, and compare the different execution rates at the notional RPS frequencies. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200420172739.11620-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_gt_pm.c | 3 +- drivers/gpu/drm/i915/gt/selftest_rps.c | 249 +++++++++++++++++++++-- drivers/gpu/drm/i915/gt/selftest_rps.h | 1 + 3 files changed, 240 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index 0141c334f2ac..4b2733967c42 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -53,8 +53,9 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_rc6_manual), - SUBTEST(live_rps_interrupt), + SUBTEST(live_rps_frequency), SUBTEST(live_rps_power), + SUBTEST(live_rps_interrupt), SUBTEST(live_gt_resume), }; diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 360f56aa4b82..b1a435db1edc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -6,6 +6,7 @@ #include #include "intel_engine_pm.h" +#include "intel_gpu_commands.h" #include "intel_gt_pm.h" #include "intel_rc6.h" #include "selftest_rps.h" @@ -17,6 +18,242 @@ static void dummy_rps_work(struct work_struct *wrk) { } +static int cmp_u64(const void *A, const void *B) +{ + const u64 *a = A, *b = B; + + if (a < b) + return -1; + else if (a > b) + return 1; + else + return 0; +} + +static struct i915_vma * +create_spin_counter(struct intel_engine_cs *engine, + struct i915_address_space *vm, + u32 **cancel, + u32 **counter) +{ + enum { + COUNT, + INC, + __NGPR__, + }; +#define CS_GPR(x) GEN8_RING_CS_GPR(engine->mmio_base, x) + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 *base, *cs; + int loop, i; + int err; + + obj = i915_gem_object_create_internal(vm->i915, 4096); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) { + i915_vma_put(vma); + return ERR_PTR(err); + } + + base = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(base)) { + i915_gem_object_put(obj); + return ERR_CAST(base); + } + cs = base; + + *cs++ = MI_LOAD_REGISTER_IMM(__NGPR__ * 2); + for (i = 0; i < __NGPR__; i++) { + *cs++ = i915_mmio_reg_offset(CS_GPR(i)); + *cs++ = 0; + *cs++ = i915_mmio_reg_offset(CS_GPR(i)) + 4; + *cs++ = 0; + } + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(CS_GPR(INC)); + *cs++ = 1; + + loop = cs - base; + + *cs++ = MI_MATH(4); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(COUNT)); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(INC)); + *cs++ = MI_MATH_ADD; + *cs++ = MI_MATH_STORE(MI_MATH_REG(COUNT), MI_MATH_REG_ACCU); + + *cs++ = MI_STORE_REGISTER_MEM_GEN8; + *cs++ = i915_mmio_reg_offset(CS_GPR(COUNT)); + *cs++ = lower_32_bits(vma->node.start + 1000 * sizeof(*cs)); + *cs++ = upper_32_bits(vma->node.start + 1000 * sizeof(*cs)); + + *cs++ = MI_BATCH_BUFFER_START_GEN8; + *cs++ = lower_32_bits(vma->node.start + loop * sizeof(*cs)); + *cs++ = upper_32_bits(vma->node.start + loop * sizeof(*cs)); + + i915_gem_object_flush_map(obj); + + *cancel = base + loop; + *counter = memset32(base + 1000, 0, 1); + return vma; +} + +static u64 __measure_frequency(u32 *cntr, int duration_ms) +{ + u64 dc, dt; + + dt = ktime_get(); + dc = READ_ONCE(*cntr); + usleep_range(1000 * duration_ms, 2000 * duration_ms); + dc = READ_ONCE(*cntr) - dc; + dt = ktime_get() - dt; + + return div64_u64(1000 * 1000 * dc, dt); +} + +static u64 measure_frequency_at(struct intel_rps *rps, u32 *cntr, int *freq) +{ + u64 x[5]; + int i; + + mutex_lock(&rps->lock); + GEM_BUG_ON(!rps->active); + intel_rps_set(rps, *freq); + mutex_unlock(&rps->lock); + + msleep(20); /* more than enough time to stabilise! */ + + for (i = 0; i < 5; i++) + x[i] = __measure_frequency(cntr, 2); + *freq = read_cagf(rps); + + /* A simple triangle filter for better result stability */ + sort(x, 5, sizeof(*x), cmp_u64, NULL); + return div_u64(x[1] + 2 * x[2] + x[3], 4); +} + +static bool scaled_within(u64 x, u64 y, u32 f_n, u32 f_d) +{ + return f_d * x > f_n * y && f_n * x < f_d * y; +} + +int live_rps_frequency(void *arg) +{ + void (*saved_work)(struct work_struct *wrk); + struct intel_gt *gt = arg; + struct intel_rps *rps = >->rps; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* + * The premise is that the GPU does change freqency at our behest. + * Let's check there is a correspondence between the requested + * frequency, the actual frequency, and the observed clock rate. + */ + + if (!rps->enabled || rps->max_freq <= rps->min_freq) + return 0; + + if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */ + return 0; + + intel_gt_pm_wait_for_idle(gt); + saved_work = rps->work.func; + rps->work.func = dummy_rps_work; + + for_each_engine(engine, gt, id) { + struct i915_request *rq; + struct i915_vma *vma; + u32 *cancel, *cntr; + struct { + u64 count; + int freq; + } min, max; + + vma = create_spin_counter(engine, + engine->kernel_context->vm, + &cancel, &cntr); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + break; + } + + rq = intel_engine_create_kernel_request(engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_vma; + } + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, false); + if (!err) + err = i915_vma_move_to_active(vma, rq, 0); + if (!err) + err = rq->engine->emit_bb_start(rq, + vma->node.start, + PAGE_SIZE, 0); + i915_vma_unlock(vma); + i915_request_add(rq); + if (err) + goto err_vma; + + if (wait_for(READ_ONCE(*cntr), 10)) { + pr_err("%s: timed loop did not start\n", + engine->name); + goto err_vma; + } + + min.freq = rps->min_freq; + min.count = measure_frequency_at(rps, cntr, &min.freq); + + max.freq = rps->max_freq; + max.count = measure_frequency_at(rps, cntr, &max.freq); + + pr_info("%s: min:%lluKHz @ %uMHz, max:%lluKHz @ %uMHz [%d%%]\n", + engine->name, + min.count, intel_gpu_freq(rps, min.freq), + max.count, intel_gpu_freq(rps, max.freq), + (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * max.count, + max.freq * min.count)); + + if (!scaled_within(max.freq * min.count, + min.freq * max.count, + 1, 2)) { + pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n", + engine->name, + max.freq * min.count, + min.freq * max.count); + err = -EINVAL; + } + +err_vma: + *cancel = MI_BATCH_BUFFER_END; + i915_gem_object_unpin_map(vma->obj); + i915_vma_unpin(vma); + i915_vma_put(vma); + + if (igt_flush_test(gt->i915)) + err = -EIO; + if (err) + break; + } + + intel_gt_pm_wait_for_idle(gt); + rps->work.func = saved_work; + + return err; +} + static void sleep_for_ei(struct intel_rps *rps, int timeout_us) { /* Flush any previous EI */ @@ -248,18 +485,6 @@ static u64 __measure_power(int duration_ms) return div64_u64(1000 * 1000 * dE, dt); } -static int cmp_u64(const void *A, const void *B) -{ - const u64 *a = A, *b = B; - - if (a < b) - return -1; - else if (a > b) - return 1; - else - return 0; -} - static u64 measure_power_at(struct intel_rps *rps, int freq) { u64 x[5]; diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.h b/drivers/gpu/drm/i915/gt/selftest_rps.h index cad515a7f0e5..07c2bddf8899 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.h +++ b/drivers/gpu/drm/i915/gt/selftest_rps.h @@ -6,6 +6,7 @@ #ifndef SELFTEST_RPS_H #define SELFTEST_RPS_H +int live_rps_frequency(void *arg); int live_rps_interrupt(void *arg); int live_rps_power(void *arg); From a740f5c5f6bc781340c1fd346123b6b7d8be54de Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Apr 2020 18:27:35 +0100 Subject: [PATCH 045/121] drm/i915/selftests: Skip energy consumption tests if not controlling freq If we can not manipulate the frequency with RPS, then comparing min/max power consumption is pointless / misleading. We will leave the warning about not being able to control the frequency selection via RPS to other tests so as not to confuse this more specialised check. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200420172739.11620-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_rps.c | 37 ++++++++++++++++---------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index b1a435db1edc..149a3de86cb9 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -485,26 +485,21 @@ static u64 __measure_power(int duration_ms) return div64_u64(1000 * 1000 * dE, dt); } -static u64 measure_power_at(struct intel_rps *rps, int freq) +static u64 measure_power_at(struct intel_rps *rps, int *freq) { u64 x[5]; int i; mutex_lock(&rps->lock); GEM_BUG_ON(!rps->active); - intel_rps_set(rps, freq); + intel_rps_set(rps, *freq); mutex_unlock(&rps->lock); msleep(20); /* more than enough time to stabilise! */ - i = read_cagf(rps); - if (i != freq) - pr_notice("Running at %x [%uMHz], not target %x [%uMHz]\n", - i, intel_gpu_freq(rps, i), - freq, intel_gpu_freq(rps, freq)); - for (i = 0; i < 5; i++) x[i] = __measure_power(5); + *freq = read_cagf(rps); /* A simple triangle filter for better result stability */ sort(x, 5, sizeof(*x), cmp_u64, NULL); @@ -542,7 +537,10 @@ int live_rps_power(void *arg) for_each_engine(engine, gt, id) { struct i915_request *rq; - u64 min, max; + struct { + u64 power; + int freq; + } min, max; if (!intel_engine_can_store_dword(engine)) continue; @@ -565,16 +563,27 @@ int live_rps_power(void *arg) break; } - max = measure_power_at(rps, rps->max_freq); - min = measure_power_at(rps, rps->min_freq); + max.freq = rps->max_freq; + max.power = measure_power_at(rps, &max.freq); + + min.freq = rps->min_freq; + min.power = measure_power_at(rps, &min.freq); igt_spinner_end(&spin); pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n", engine->name, - min, intel_gpu_freq(rps, rps->min_freq), - max, intel_gpu_freq(rps, rps->max_freq)); - if (11 * min > 10 * max) { + min.power, intel_gpu_freq(rps, min.freq), + max.power, intel_gpu_freq(rps, max.freq)); + + if (10 * min.freq >= 9 * max.freq) { + pr_notice("Could not control frequency, ran at [%d:%uMHz, %d:%uMhz]\n", + min.freq, intel_gpu_freq(rps, min.freq), + max.freq, intel_gpu_freq(rps, max.freq)); + continue; + } + + if (11 * min.power > 10 * max.power) { pr_err("%s: did not conserve power when setting lower frequency!\n", engine->name); err = -EINVAL; From 9938ee2e631c329ff335884c51ff78ebe5224722 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Apr 2020 18:27:36 +0100 Subject: [PATCH 046/121] drm/i915/selftests: Check RPS controls Check that the GPU does respond to our RPS frequency requests by setting our desired frequency. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200420172739.11620-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_gt_pm.c | 1 + drivers/gpu/drm/i915/gt/selftest_rps.c | 212 ++++++++++++++++++++--- drivers/gpu/drm/i915/gt/selftest_rps.h | 1 + drivers/gpu/drm/i915/i915_reg.h | 1 + 4 files changed, 191 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index 4b2733967c42..de3eaef40596 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -53,6 +53,7 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_rc6_manual), + SUBTEST(live_rps_control), SUBTEST(live_rps_frequency), SUBTEST(live_rps_power), SUBTEST(live_rps_interrupt), diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 149a3de86cb9..63f5f06347c8 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -107,6 +107,188 @@ create_spin_counter(struct intel_engine_cs *engine, return vma; } +static u8 rps_set_check(struct intel_rps *rps, u8 freq) +{ + u8 history[64], i; + unsigned long end; + int sleep; + + mutex_lock(&rps->lock); + GEM_BUG_ON(!rps->active); + intel_rps_set(rps, freq); + GEM_BUG_ON(rps->last_freq != freq); + mutex_unlock(&rps->lock); + + i = 0; + memset(history, freq, sizeof(history)); + sleep = 20; + + /* The PCU does not change instantly, but drifts towards the goal? */ + end = jiffies + msecs_to_jiffies(50); + do { + u8 act; + + act = read_cagf(rps); + if (time_after(jiffies, end)) + return act; + + /* Target acquired */ + if (act == freq) + return act; + + /* Any change within the last N samples? */ + if (!memchr_inv(history, act, sizeof(history))) + return act; + + history[i] = act; + i = (i + 1) % ARRAY_SIZE(history); + + usleep_range(sleep, 2 * sleep); + sleep *= 2; + if (sleep > 1000) + sleep = 1000; + } while (1); +} + +static void show_pstate_limits(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (IS_BROXTON(i915)) { + pr_info("P_STATE_CAP[%x]: 0x%08x\n", + i915_mmio_reg_offset(BXT_RP_STATE_CAP), + intel_uncore_read(rps_to_uncore(rps), + BXT_RP_STATE_CAP)); + } else if (IS_GEN(i915, 9)) { + pr_info("P_STATE_LIMITS[%x]: 0x%08x\n", + i915_mmio_reg_offset(GEN9_RP_STATE_LIMITS), + intel_uncore_read(rps_to_uncore(rps), + GEN9_RP_STATE_LIMITS)); + } +} + +int live_rps_control(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_rps *rps = >->rps; + void (*saved_work)(struct work_struct *wrk); + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_spinner spin; + int err = 0; + + /* + * Check that the actual frequency matches our requested frequency, + * to verify our control mechanism. We have to be careful that the + * PCU may throttle the GPU in which case the actual frequency used + * will be lowered than requested. + */ + + if (!rps->enabled || rps->max_freq <= rps->min_freq) + return 0; + + if (IS_CHERRYVIEW(gt->i915)) /* XXX fragile PCU */ + return 0; + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + intel_gt_pm_wait_for_idle(gt); + saved_work = rps->work.func; + rps->work.func = dummy_rps_work; + + intel_gt_pm_get(gt); + for_each_engine(engine, gt, id) { + struct i915_request *rq; + ktime_t min_dt, max_dt; + int f, limit; + int min, max; + + if (!intel_engine_can_store_dword(engine)) + continue; + + rq = igt_spinner_create_request(&spin, + engine->kernel_context, + MI_NOOP); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + + if (!igt_wait_for_spinner(&spin, rq)) { + pr_err("%s: RPS spinner did not start\n", + engine->name); + igt_spinner_end(&spin); + intel_gt_set_wedged(engine->gt); + err = -EIO; + break; + } + + if (rps_set_check(rps, rps->min_freq) != rps->min_freq) { + pr_err("%s: could not set minimum frequency [%x], only %x!\n", + engine->name, rps->min_freq, read_cagf(rps)); + igt_spinner_end(&spin); + err = -EINVAL; + break; + } + + for (f = rps->min_freq + 1; f < rps->max_freq; f++) { + if (rps_set_check(rps, f) < f) + break; + } + + limit = rps_set_check(rps, f); + + if (rps_set_check(rps, rps->min_freq) != rps->min_freq) { + pr_err("%s: could not restore minimum frequency [%x], only %x!\n", + engine->name, rps->min_freq, read_cagf(rps)); + igt_spinner_end(&spin); + show_pstate_limits(rps); + err = -EINVAL; + break; + } + + max_dt = ktime_get(); + max = rps_set_check(rps, limit); + max_dt = ktime_sub(ktime_get(), max_dt); + + min_dt = ktime_get(); + min = rps_set_check(rps, rps->min_freq); + min_dt = ktime_sub(ktime_get(), min_dt); + + igt_spinner_end(&spin); + + pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n", + engine->name, + rps->min_freq, intel_gpu_freq(rps, rps->min_freq), + rps->max_freq, intel_gpu_freq(rps, rps->max_freq), + limit, intel_gpu_freq(rps, limit), + min, max, ktime_to_ns(min_dt), ktime_to_ns(max_dt)); + + if (limit == rps->min_freq) { + pr_err("%s: GPU throttled to minimum!\n", + engine->name); + err = -ENODEV; + break; + } + + if (igt_flush_test(gt->i915)) { + err = -EIO; + break; + } + } + intel_gt_pm_put(gt); + + igt_spinner_fini(&spin); + + intel_gt_pm_wait_for_idle(gt); + rps->work.func = saved_work; + + return err; +} + static u64 __measure_frequency(u32 *cntr, int duration_ms) { u64 dc, dt; @@ -125,16 +307,10 @@ static u64 measure_frequency_at(struct intel_rps *rps, u32 *cntr, int *freq) u64 x[5]; int i; - mutex_lock(&rps->lock); - GEM_BUG_ON(!rps->active); - intel_rps_set(rps, *freq); - mutex_unlock(&rps->lock); - - msleep(20); /* more than enough time to stabilise! */ - + *freq = rps_set_check(rps, *freq); for (i = 0; i < 5; i++) x[i] = __measure_frequency(cntr, 2); - *freq = read_cagf(rps); + *freq = (*freq + read_cagf(rps)) / 2; /* A simple triangle filter for better result stability */ sort(x, 5, sizeof(*x), cmp_u64, NULL); @@ -279,10 +455,7 @@ static int __rps_up_interrupt(struct intel_rps *rps, if (!intel_engine_can_store_dword(engine)) return 0; - mutex_lock(&rps->lock); - GEM_BUG_ON(!rps->active); - intel_rps_set(rps, rps->min_freq); - mutex_unlock(&rps->lock); + rps_set_check(rps, rps->min_freq); rq = igt_spinner_create_request(spin, engine->kernel_context, MI_NOOP); if (IS_ERR(rq)) @@ -354,10 +527,7 @@ static int __rps_down_interrupt(struct intel_rps *rps, struct intel_uncore *uncore = engine->uncore; u32 timeout; - mutex_lock(&rps->lock); - GEM_BUG_ON(!rps->active); - intel_rps_set(rps, rps->max_freq); - mutex_unlock(&rps->lock); + rps_set_check(rps, rps->max_freq); if (!(rps->pm_events & GEN6_PM_RP_DOWN_THRESHOLD)) { pr_err("%s: RPS did not register DOWN interrupt\n", @@ -490,16 +660,10 @@ static u64 measure_power_at(struct intel_rps *rps, int *freq) u64 x[5]; int i; - mutex_lock(&rps->lock); - GEM_BUG_ON(!rps->active); - intel_rps_set(rps, *freq); - mutex_unlock(&rps->lock); - - msleep(20); /* more than enough time to stabilise! */ - + *freq = rps_set_check(rps, *freq); for (i = 0; i < 5; i++) x[i] = __measure_power(5); - *freq = read_cagf(rps); + *freq = (*freq + read_cagf(rps)) / 2; /* A simple triangle filter for better result stability */ sort(x, 5, sizeof(*x), cmp_u64, NULL); diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.h b/drivers/gpu/drm/i915/gt/selftest_rps.h index 07c2bddf8899..be0bf8e3f639 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.h +++ b/drivers/gpu/drm/i915/gt/selftest_rps.h @@ -6,6 +6,7 @@ #ifndef SELFTEST_RPS_H #define SELFTEST_RPS_H +int live_rps_control(void *arg); int live_rps_frequency(void *arg); int live_rps_interrupt(void *arg); int live_rps_power(void *arg); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 69332be5ac41..4a1965467374 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4013,6 +4013,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994) #define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) #define BXT_RP_STATE_CAP _MMIO(0x138170) +#define GEN9_RP_STATE_LIMITS _MMIO(0x138148) /* * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS From 0eaccc4b180c0ded33e0b71491aac09fcb323dac Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Apr 2020 18:27:37 +0100 Subject: [PATCH 047/121] drm/i915/selftests: Split RPS frequency measurement Split the frequency measurement into two modes, so that we can judge the impact of the llc setup on top of the pure CS frequency scaling. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200420172739.11620-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_gt_pm.c | 3 +- drivers/gpu/drm/i915/gt/selftest_rps.c | 157 ++++++++++++++++++++++- drivers/gpu/drm/i915/gt/selftest_rps.h | 3 +- 3 files changed, 154 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index de3eaef40596..9855e6f0ce7c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -54,7 +54,8 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915) static const struct i915_subtest tests[] = { SUBTEST(live_rc6_manual), SUBTEST(live_rps_control), - SUBTEST(live_rps_frequency), + SUBTEST(live_rps_frequency_cs), + SUBTEST(live_rps_frequency_srm), SUBTEST(live_rps_power), SUBTEST(live_rps_interrupt), SUBTEST(live_gt_resume), diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 63f5f06347c8..81d7772763bc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -33,6 +33,7 @@ static int cmp_u64(const void *A, const void *B) static struct i915_vma * create_spin_counter(struct intel_engine_cs *engine, struct i915_address_space *vm, + bool srm, u32 **cancel, u32 **counter) { @@ -91,10 +92,12 @@ create_spin_counter(struct intel_engine_cs *engine, *cs++ = MI_MATH_ADD; *cs++ = MI_MATH_STORE(MI_MATH_REG(COUNT), MI_MATH_REG_ACCU); - *cs++ = MI_STORE_REGISTER_MEM_GEN8; - *cs++ = i915_mmio_reg_offset(CS_GPR(COUNT)); - *cs++ = lower_32_bits(vma->node.start + 1000 * sizeof(*cs)); - *cs++ = upper_32_bits(vma->node.start + 1000 * sizeof(*cs)); + if (srm) { + *cs++ = MI_STORE_REGISTER_MEM_GEN8; + *cs++ = i915_mmio_reg_offset(CS_GPR(COUNT)); + *cs++ = lower_32_bits(vma->node.start + 1000 * sizeof(*cs)); + *cs++ = upper_32_bits(vma->node.start + 1000 * sizeof(*cs)); + } *cs++ = MI_BATCH_BUFFER_START_GEN8; *cs++ = lower_32_bits(vma->node.start + loop * sizeof(*cs)); @@ -103,7 +106,7 @@ create_spin_counter(struct intel_engine_cs *engine, i915_gem_object_flush_map(obj); *cancel = base + loop; - *counter = memset32(base + 1000, 0, 1); + *counter = srm ? memset32(base + 1000, 0, 1) : NULL; return vma; } @@ -317,12 +320,43 @@ static u64 measure_frequency_at(struct intel_rps *rps, u32 *cntr, int *freq) return div_u64(x[1] + 2 * x[2] + x[3], 4); } +static u64 __measure_cs_frequency(struct intel_engine_cs *engine, + int duration_ms) +{ + u64 dc, dt; + + dt = ktime_get(); + dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0)); + usleep_range(1000 * duration_ms, 2000 * duration_ms); + dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0)) - dc; + dt = ktime_get() - dt; + + return div64_u64(1000 * 1000 * dc, dt); +} + +static u64 measure_cs_frequency_at(struct intel_rps *rps, + struct intel_engine_cs *engine, + int *freq) +{ + u64 x[5]; + int i; + + *freq = rps_set_check(rps, *freq); + for (i = 0; i < 5; i++) + x[i] = __measure_cs_frequency(engine, 2); + *freq = (*freq + read_cagf(rps)) / 2; + + /* A simple triangle filter for better result stability */ + sort(x, 5, sizeof(*x), cmp_u64, NULL); + return div_u64(x[1] + 2 * x[2] + x[3], 4); +} + static bool scaled_within(u64 x, u64 y, u32 f_n, u32 f_d) { return f_d * x > f_n * y && f_n * x < f_d * y; } -int live_rps_frequency(void *arg) +int live_rps_frequency_cs(void *arg) { void (*saved_work)(struct work_struct *wrk); struct intel_gt *gt = arg; @@ -357,7 +391,116 @@ int live_rps_frequency(void *arg) } min, max; vma = create_spin_counter(engine, - engine->kernel_context->vm, + engine->kernel_context->vm, false, + &cancel, &cntr); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + break; + } + + rq = intel_engine_create_kernel_request(engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_vma; + } + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, false); + if (!err) + err = i915_vma_move_to_active(vma, rq, 0); + if (!err) + err = rq->engine->emit_bb_start(rq, + vma->node.start, + PAGE_SIZE, 0); + i915_vma_unlock(vma); + i915_request_add(rq); + if (err) + goto err_vma; + + if (wait_for(intel_uncore_read(engine->uncore, CS_GPR(0)), + 10)) { + pr_err("%s: timed loop did not start\n", + engine->name); + goto err_vma; + } + + min.freq = rps->min_freq; + min.count = measure_cs_frequency_at(rps, engine, &min.freq); + + max.freq = rps->max_freq; + max.count = measure_cs_frequency_at(rps, engine, &max.freq); + + pr_info("%s: min:%lluKHz @ %uMHz, max:%lluKHz @ %uMHz [%d%%]\n", + engine->name, + min.count, intel_gpu_freq(rps, min.freq), + max.count, intel_gpu_freq(rps, max.freq), + (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * max.count, + max.freq * min.count)); + + if (!scaled_within(max.freq * min.count, + min.freq * max.count, + 2, 3)) { + pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n", + engine->name, + max.freq * min.count, + min.freq * max.count); + err = -EINVAL; + } + +err_vma: + *cancel = MI_BATCH_BUFFER_END; + i915_gem_object_unpin_map(vma->obj); + i915_vma_unpin(vma); + i915_vma_put(vma); + + if (igt_flush_test(gt->i915)) + err = -EIO; + if (err) + break; + } + + intel_gt_pm_wait_for_idle(gt); + rps->work.func = saved_work; + + return err; +} + +int live_rps_frequency_srm(void *arg) +{ + void (*saved_work)(struct work_struct *wrk); + struct intel_gt *gt = arg; + struct intel_rps *rps = >->rps; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* + * The premise is that the GPU does change freqency at our behest. + * Let's check there is a correspondence between the requested + * frequency, the actual frequency, and the observed clock rate. + */ + + if (!rps->enabled || rps->max_freq <= rps->min_freq) + return 0; + + if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */ + return 0; + + intel_gt_pm_wait_for_idle(gt); + saved_work = rps->work.func; + rps->work.func = dummy_rps_work; + + for_each_engine(engine, gt, id) { + struct i915_request *rq; + struct i915_vma *vma; + u32 *cancel, *cntr; + struct { + u64 count; + int freq; + } min, max; + + vma = create_spin_counter(engine, + engine->kernel_context->vm, true, &cancel, &cntr); if (IS_ERR(vma)) { err = PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.h b/drivers/gpu/drm/i915/gt/selftest_rps.h index be0bf8e3f639..22e46c5341c5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.h +++ b/drivers/gpu/drm/i915/gt/selftest_rps.h @@ -7,7 +7,8 @@ #define SELFTEST_RPS_H int live_rps_control(void *arg); -int live_rps_frequency(void *arg); +int live_rps_frequency_cs(void *arg); +int live_rps_frequency_srm(void *arg); int live_rps_interrupt(void *arg); int live_rps_power(void *arg); From 6b36fc9442bbbb8d44a4d06315f189f88381809b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Apr 2020 18:27:38 +0100 Subject: [PATCH 048/121] drm/i915/selftests: Show the pcode frequency table on error If we encounter an error while scaling, read back the frequency tables from the pcu. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200420172739.11620-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_rps.c | 39 ++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 81d7772763bc..3f5b8cf928cc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -292,6 +292,43 @@ int live_rps_control(void *arg) return err; } +static void show_pcu_config(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + unsigned int max_gpu_freq, min_gpu_freq; + intel_wakeref_t wakeref; + int gpu_freq; + + if (!HAS_LLC(i915)) + return; + + min_gpu_freq = rps->min_freq; + max_gpu_freq = rps->max_freq; + if (INTEL_GEN(i915) >= 9) { + /* Convert GT frequency to 50 HZ units */ + min_gpu_freq /= GEN9_FREQ_SCALER; + max_gpu_freq /= GEN9_FREQ_SCALER; + } + + wakeref = intel_runtime_pm_get(rps_to_uncore(rps)->rpm); + + pr_info("%5s %5s %5s\n", "GPU", "eCPU", "eRing"); + for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { + int ia_freq = gpu_freq; + + sandybridge_pcode_read(i915, + GEN6_PCODE_READ_MIN_FREQ_TABLE, + &ia_freq, NULL); + + pr_info("%5d %5d %5d\n", + gpu_freq * 50, + ((ia_freq >> 0) & 0xff) * 100, + ((ia_freq >> 8) & 0xff) * 100); + } + + intel_runtime_pm_put(rps_to_uncore(rps)->rpm, wakeref); +} + static u64 __measure_frequency(u32 *cntr, int duration_ms) { u64 dc, dt; @@ -444,6 +481,7 @@ int live_rps_frequency_cs(void *arg) engine->name, max.freq * min.count, min.freq * max.count); + show_pcu_config(rps); err = -EINVAL; } @@ -552,6 +590,7 @@ int live_rps_frequency_srm(void *arg) engine->name, max.freq * min.count, min.freq * max.count); + show_pcu_config(rps); err = -EINVAL; } From e42a969e725c2ff93a0a66ca5d2c27d59310056e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Apr 2020 18:27:39 +0100 Subject: [PATCH 049/121] drm/i915/selftests: Exercise dynamic reclocking with RPS After having testing all the RPS controls individually, we need to take a step back and check how our RPS worker integrates them to perform dynamic GPU reclocking. So do that by submitting a spinner and wait and see what happens. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200420172739.11620-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_gt_pm.c | 1 + drivers/gpu/drm/i915/gt/selftest_rps.c | 112 +++++++++++++++++++++-- drivers/gpu/drm/i915/gt/selftest_rps.h | 3 +- 3 files changed, 105 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index 9855e6f0ce7c..e02fdec58826 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -58,6 +58,7 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915) SUBTEST(live_rps_frequency_srm), SUBTEST(live_rps_power), SUBTEST(live_rps_interrupt), + SUBTEST(live_rps_dynamic), SUBTEST(live_gt_resume), }; diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 3f5b8cf928cc..d7cd673550ef 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -110,24 +110,18 @@ create_spin_counter(struct intel_engine_cs *engine, return vma; } -static u8 rps_set_check(struct intel_rps *rps, u8 freq) +static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms) { u8 history[64], i; unsigned long end; int sleep; - mutex_lock(&rps->lock); - GEM_BUG_ON(!rps->active); - intel_rps_set(rps, freq); - GEM_BUG_ON(rps->last_freq != freq); - mutex_unlock(&rps->lock); - i = 0; memset(history, freq, sizeof(history)); sleep = 20; /* The PCU does not change instantly, but drifts towards the goal? */ - end = jiffies + msecs_to_jiffies(50); + end = jiffies + msecs_to_jiffies(timeout_ms); do { u8 act; @@ -148,11 +142,22 @@ static u8 rps_set_check(struct intel_rps *rps, u8 freq) usleep_range(sleep, 2 * sleep); sleep *= 2; - if (sleep > 1000) - sleep = 1000; + if (sleep > timeout_ms * 20) + sleep = timeout_ms * 20; } while (1); } +static u8 rps_set_check(struct intel_rps *rps, u8 freq) +{ + mutex_lock(&rps->lock); + GEM_BUG_ON(!rps->active); + intel_rps_set(rps, freq); + GEM_BUG_ON(rps->last_freq != freq); + mutex_unlock(&rps->lock); + + return wait_for_freq(rps, freq, 50); +} + static void show_pstate_limits(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); @@ -949,3 +954,90 @@ int live_rps_power(void *arg) return err; } + +int live_rps_dynamic(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_rps *rps = >->rps; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_spinner spin; + int err = 0; + + /* + * We've looked at the bascs, and have established that we + * can change the clock frequency and that the HW will generate + * interrupts based on load. Now we check how we integrate those + * moving parts into dynamic reclocking based on load. + */ + + if (!rps->enabled || rps->max_freq <= rps->min_freq) + return 0; + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + for_each_engine(engine, gt, id) { + struct i915_request *rq; + struct { + ktime_t dt; + u8 freq; + } min, max; + + if (!intel_engine_can_store_dword(engine)) + continue; + + intel_gt_pm_wait_for_idle(gt); + GEM_BUG_ON(rps->active); + rps->cur_freq = rps->min_freq; + + intel_engine_pm_get(engine); + intel_rc6_disable(>->rc6); + GEM_BUG_ON(rps->last_freq != rps->min_freq); + + rq = igt_spinner_create_request(&spin, + engine->kernel_context, + MI_NOOP); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err; + } + + i915_request_add(rq); + + max.dt = ktime_get(); + max.freq = wait_for_freq(rps, rps->max_freq, 500); + max.dt = ktime_sub(ktime_get(), max.dt); + + igt_spinner_end(&spin); + + min.dt = ktime_get(); + min.freq = wait_for_freq(rps, rps->min_freq, 2000); + min.dt = ktime_sub(ktime_get(), min.dt); + + pr_info("%s: dynamically reclocked to %u:%uMHz while busy in %lluns, and %u:%uMHz while idle in %lluns\n", + engine->name, + max.freq, intel_gpu_freq(rps, max.freq), + ktime_to_ns(max.dt), + min.freq, intel_gpu_freq(rps, min.freq), + ktime_to_ns(min.dt)); + if (min.freq >= max.freq) { + pr_err("%s: dynamic reclocking of spinner failed\n!", + engine->name); + err = -EINVAL; + } + +err: + intel_rc6_enable(>->rc6); + intel_engine_pm_put(engine); + + if (igt_flush_test(gt->i915)) + err = -EIO; + if (err) + break; + } + + igt_spinner_fini(&spin); + + return err; +} diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.h b/drivers/gpu/drm/i915/gt/selftest_rps.h index 22e46c5341c5..76c4b19553e6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.h +++ b/drivers/gpu/drm/i915/gt/selftest_rps.h @@ -9,7 +9,8 @@ int live_rps_control(void *arg); int live_rps_frequency_cs(void *arg); int live_rps_frequency_srm(void *arg); -int live_rps_interrupt(void *arg); int live_rps_power(void *arg); +int live_rps_interrupt(void *arg); +int live_rps_dynamic(void *arg); #endif /* SELFTEST_RPS_H */ From e07c7606a00c4361bad72ff4e72ed0dfbefa23b0 Mon Sep 17 00:00:00 2001 From: Xiyu Yang Date: Mon, 20 Apr 2020 13:41:54 +0800 Subject: [PATCH 050/121] drm/i915/selftests: Fix i915_address_space refcnt leak igt_ppgtt_pin_update() invokes i915_gem_context_get_vm_rcu(), which returns a reference of the i915_address_space object to "vm" with increased refcount. When igt_ppgtt_pin_update() returns, "vm" becomes invalid, so the refcount should be decreased to keep refcount balanced. The reference counting issue happens in two exception handling paths of igt_ppgtt_pin_update(). When i915_gem_object_create_internal() returns IS_ERR, the refcnt increased by i915_gem_context_get_vm_rcu() is not decreased, causing a refcnt leak. Fix this issue by jumping to "out_vm" label when i915_gem_object_create_internal() returns IS_ERR. Fixes: a4e7ccdac38e ("drm/i915: Move context management under GEM") Signed-off-by: Xiyu Yang Signed-off-by: Xin Tan Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/1587361342-83494-1-git-send-email-xiyuyang19@fudan.edu.cn --- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 2d0fd50c5312..d4f94ca9ae0d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1477,8 +1477,10 @@ static int igt_ppgtt_pin_update(void *arg) unsigned int page_size = BIT(first); obj = i915_gem_object_create_internal(dev_priv, page_size); - if (IS_ERR(obj)) - return PTR_ERR(obj); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_vm; + } vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { @@ -1531,8 +1533,10 @@ static int igt_ppgtt_pin_update(void *arg) } obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_vm; + } vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { From 89e01caac641864c2453d41c81a990160a9acaf9 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Wed, 15 Apr 2020 16:34:35 -0700 Subject: [PATCH 051/121] drm/i915: Use single set of AUX powerwell ops for gen11+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit AUX power wells sometimes need additional handling besides just programming the specific power well registers: * Type-C PHY's also require additional Type-C register programming * ICL combo PHY's require additional workarounds * TGL & EHL combo PHY's can be treated like any other power well Today we have dedicated aux ops for the ICL combo PHY and Type-C cases. This works fine, but means that when a new platform shows up with identical general power well handling, but different types of PHYs on its outputs, we have to define an entire new power well table for that platform and can't just re-use the table from the earlier platform -- as an example, see ehl_power_wells[], which is a subset of icl_power_wells[], *except* that we need to specify different AUX ops for the third display. If we instead create a single set of top-level aux ops that will check the PHY type and then dispatch to the appropriate handlers, we can get more reuse out of our power well definitions. This allows us to immediately eliminate ehl_power_wells[] and simply reuse the ICL table; if future platforms follow the same general power well assignments as either ICL or TGL, we'll be able to re-use those tables in the same way. Note that I've only changed ICL+ platforms over to using the new icl_aux ops; at this point it's unlikely that we'll have any new platforms that re-use gen9 or earlier power well configurations. v2: - ICL_AUX_PW_TO_PHY() won't return the proper PHY for TBT AUX power wells. But we know those wells will only used on Type-C outputs anyway, so we can just check is is_tc_tbt flag in the condition. (Jose). Cc: José Roberto de Souza Signed-off-by: Matt Roper Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200415233435.3064257-2-matthew.d.roper@intel.com --- .../drm/i915/display/intel_display_power.c | 244 +++++------------- 1 file changed, 62 insertions(+), 182 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index fa46ea6c6e01..2293de009f56 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -676,6 +676,40 @@ icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, hsw_power_well_disable(dev_priv, power_well); } +static void +icl_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int pw_idx = power_well->desc->hsw.idx; + enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */ + bool is_tbt = power_well->desc->hsw.is_tc_tbt; + + if (is_tbt || intel_phy_is_tc(dev_priv, phy)) + return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); + else if (IS_ICELAKE(dev_priv)) + return icl_combo_phy_aux_power_well_enable(dev_priv, + power_well); + else + return hsw_power_well_enable(dev_priv, power_well); +} + +static void +icl_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int pw_idx = power_well->desc->hsw.idx; + enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */ + bool is_tbt = power_well->desc->hsw.is_tc_tbt; + + if (is_tbt || intel_phy_is_tc(dev_priv, phy)) + return icl_tc_phy_aux_power_well_disable(dev_priv, power_well); + else if (IS_ICELAKE(dev_priv)) + return icl_combo_phy_aux_power_well_disable(dev_priv, + power_well); + else + return hsw_power_well_disable(dev_priv, power_well); +} + /* * We should only use the power well if we explicitly asked the hardware to * enable it, so check if it's enabled and also check if we've requested it to @@ -3601,17 +3635,10 @@ static const struct i915_power_well_desc cnl_power_wells[] = { }, }; -static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { +static const struct i915_power_well_ops icl_aux_power_well_ops = { .sync_hw = hsw_power_well_sync_hw, - .enable = icl_combo_phy_aux_power_well_enable, - .disable = icl_combo_phy_aux_power_well_disable, - .is_enabled = hsw_power_well_enabled, -}; - -static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = { - .sync_hw = hsw_power_well_sync_hw, - .enable = icl_tc_phy_aux_power_well_enable, - .disable = icl_tc_phy_aux_power_well_disable, + .enable = icl_aux_power_well_enable, + .disable = icl_aux_power_well_disable, .is_enabled = hsw_power_well_enabled, }; @@ -3741,7 +3768,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { { .name = "AUX A", .domains = ICL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_combo_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3751,7 +3778,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { { .name = "AUX B", .domains = ICL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_combo_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3761,7 +3788,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { { .name = "AUX C TC1", .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3772,7 +3799,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { { .name = "AUX D TC2", .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3783,7 +3810,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { { .name = "AUX E TC3", .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3794,7 +3821,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { { .name = "AUX F TC4", .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3805,7 +3832,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { { .name = "AUX C TBT1", .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3816,7 +3843,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { { .name = "AUX D TBT2", .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3827,7 +3854,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { { .name = "AUX E TBT3", .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3838,7 +3865,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { { .name = "AUX F TBT4", .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3860,151 +3887,6 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }; -static const struct i915_power_well_desc ehl_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = ICL_PW_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_fuses = true, - }, - }, - { - .name = "power well 3", - .domains = ICL_PW_3_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_3, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A IO", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_ddi_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - }, - }, - { - .name = "DDI B IO", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_ddi_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - }, - }, - { - .name = "DDI C IO", - .domains = ICL_DDI_IO_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_ddi_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_DDI_C, - }, - }, - { - .name = "DDI D IO", - .domains = ICL_DDI_IO_D_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_ddi_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_DDI_D, - }, - }, - { - .name = "AUX A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX C", - .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - }, - }, - { - .name = "AUX D", - .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_D, - }, - }, - { - .name = "power well 4", - .domains = ICL_PW_4_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), - }, - }, -}; - static void tgl_tc_cold_request(struct drm_i915_private *i915, bool block) { @@ -4234,7 +4116,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX A", .domains = TGL_AUX_A_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4244,7 +4126,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX B", .domains = TGL_AUX_B_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4254,7 +4136,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX C", .domains = TGL_AUX_C_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4264,7 +4146,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX D TC1", .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4275,7 +4157,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX E TC2", .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4286,7 +4168,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX F TC3", .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4297,7 +4179,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX G TC4", .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4308,7 +4190,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX H TC5", .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4319,7 +4201,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX I TC6", .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4330,7 +4212,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX D TBT1", .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4341,7 +4223,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX E TBT2", .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4352,7 +4234,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX F TBT3", .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4363,7 +4245,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX G TBT4", .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4374,7 +4256,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX H TBT5", .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4385,7 +4267,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX I TBT6", .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, + .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4572,8 +4454,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) */ if (IS_GEN(dev_priv, 12)) { err = set_power_wells(power_domains, tgl_power_wells); - } else if (IS_ELKHARTLAKE(dev_priv)) { - err = set_power_wells(power_domains, ehl_power_wells); } else if (IS_GEN(dev_priv, 11)) { err = set_power_wells(power_domains, icl_power_wells); } else if (IS_CANNONLAKE(dev_priv)) { From 3c3041149c76a282e93433b4b8fdd3d3d01bc709 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 20 Apr 2020 17:04:37 +0300 Subject: [PATCH 052/121] drm/i915/hdmi: remove unused intel_hdmi_hdcp2_protocol() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Unused, hiding from the compiler warnings behind the inline keyword. Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200420140438.14672-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_hdmi.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 9c058f7aa185..54412f79f0c5 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1750,12 +1750,6 @@ int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port, return ret; } -static inline -enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void) -{ - return HDCP_PROTOCOL_HDMI; -} - static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = { .write_an_aksv = intel_hdmi_hdcp_write_an_aksv, .read_bksv = intel_hdmi_hdcp_read_bksv, From 81b55ef1f47bfd3d0f8220d4f13ddf1dc2c66b09 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 20 Apr 2020 17:04:38 +0300 Subject: [PATCH 053/121] drm/i915: drop a bunch of superfluous inlines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove a number of inlines from .c files, and let the compiler decide what's best. There's more to do, but need to start somewhere, and need to start setting the example. Acked-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200420140438.14672-2-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/icl_dsi.c | 8 +++---- drivers/gpu/drm/i915/display/intel_ddi.c | 7 +++--- drivers/gpu/drm/i915/display/intel_display.c | 10 ++++---- .../drm/i915/display/intel_display_power.c | 5 ++-- drivers/gpu/drm/i915/display/intel_dp.c | 6 ++--- drivers/gpu/drm/i915/display/intel_dsb.c | 6 ++--- drivers/gpu/drm/i915/display/intel_dsi_vbt.c | 2 +- drivers/gpu/drm/i915/display/intel_gmbus.c | 3 +-- drivers/gpu/drm/i915/display/intel_hdcp.c | 23 ++++++++----------- drivers/gpu/drm/i915/display/intel_hdmi.c | 8 +++---- drivers/gpu/drm/i915/display/intel_panel.c | 14 +++++------ drivers/gpu/drm/i915/intel_pm.c | 7 +++--- drivers/gpu/drm/i915/intel_sideband.c | 4 ++-- drivers/gpu/drm/i915/intel_wopcm.c | 22 +++++++++--------- 14 files changed, 58 insertions(+), 67 deletions(-) diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 99a25c0bb08f..a8d2daa214e6 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -36,15 +36,15 @@ #include "intel_panel.h" #include "intel_vdsc.h" -static inline int header_credits_available(struct drm_i915_private *dev_priv, - enum transcoder dsi_trans) +static int header_credits_available(struct drm_i915_private *dev_priv, + enum transcoder dsi_trans) { return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) >> FREE_HEADER_CREDIT_SHIFT; } -static inline int payload_credits_available(struct drm_i915_private *dev_priv, - enum transcoder dsi_trans) +static int payload_credits_available(struct drm_i915_private *dev_priv, + enum transcoder dsi_trans) { return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) >> FREE_PLOAD_CREDIT_SHIFT; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 74bc489e6dd6..e6e963f7fdf9 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1908,7 +1908,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, return true; } -static inline enum intel_display_power_domain +static enum intel_display_power_domain intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port) { /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with @@ -2693,9 +2693,8 @@ u32 ddi_signal_levels(struct intel_dp *intel_dp) return DDI_BUF_TRANS_SELECT(level); } -static inline -u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, - enum phy phy) +static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, + enum phy phy) { if (intel_phy_is_combo(dev_priv, phy)) { return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 74449f67acc2..3cfedeaf0846 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -238,9 +238,9 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv) dev_priv->czclk_freq); } -static inline u32 /* units of 100MHz */ -intel_fdi_link_freq(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *pipe_config) +/* units of 100MHz */ +static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *pipe_config) { if (HAS_DDI(dev_priv)) return pipe_config->port_clock; /* SPLL */ @@ -8134,7 +8134,7 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) } } -static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) +static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { if (i915_modparams.panel_use_ssc >= 0) return i915_modparams.panel_use_ssc != 0; @@ -12827,7 +12827,7 @@ static void intel_dump_crtc_timings(struct drm_i915_private *i915, mode->type, mode->flags); } -static inline void +static void intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, const char *id, unsigned int lane_count, const struct intel_link_m_n *m_n) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 2293de009f56..721e9ba96d34 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -4515,9 +4515,8 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) mutex_unlock(&power_domains->lock); } -static inline -bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, - i915_reg_t reg, bool enable) +static bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, + i915_reg_t reg, bool enable) { u32 val, status; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index ec1157858ac5..66f8a9d1503d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -6895,9 +6895,9 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { 0, 0 }, }; -static inline -int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, - u8 *rx_status) +static int +intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, + u8 *rx_status) { struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); ssize_t ret; diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index d7a6bf2277df..29fec6a92d17 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -34,7 +34,7 @@ #define DSB_BYTE_EN_SHIFT 20 #define DSB_REG_VALUE_MASK 0xfffff -static inline bool is_dsb_busy(struct intel_dsb *dsb) +static bool is_dsb_busy(struct intel_dsb *dsb) { struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -43,7 +43,7 @@ static inline bool is_dsb_busy(struct intel_dsb *dsb) return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id)); } -static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb) +static bool intel_dsb_enable_engine(struct intel_dsb *dsb) { struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -63,7 +63,7 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb) return true; } -static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb) +static bool intel_dsb_disable_engine(struct intel_dsb *dsb) { struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 3c9c05478a03..eed037ec0b29 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -121,7 +121,7 @@ struct i2c_adapter_lookup { #define ICL_GPIO_DDPA_CTRLCLK_2 8 #define ICL_GPIO_DDPA_CTRLDATA_2 9 -static inline enum port intel_dsi_seq_port_to_port(u8 port) +static enum port intel_dsi_seq_port_to_port(u8 port) { return port ? PORT_C : PORT_A; } diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 1fd3a5a6296b..a8d119b6b45c 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -379,8 +379,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) return ret; } -static inline -unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv) +static unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv) { return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX : GMBUS_BYTE_COUNT_MAX; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index d3ad10653b2e..2cbc4619b4ce 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -109,18 +109,16 @@ bool intel_hdcp2_capable(struct intel_connector *connector) return capable; } -static inline -bool intel_hdcp_in_use(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder, enum port port) +static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, enum port port) { return intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) & HDCP_STATUS_ENC; } -static inline -bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder, enum port port) +static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, enum port port) { return intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & @@ -853,8 +851,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector) return ret; } -static inline -struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) +static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) { return container_of(hdcp, struct intel_connector, hdcp); } @@ -1856,8 +1853,7 @@ static const struct component_ops i915_hdcp_component_ops = { .unbind = i915_hdcp_component_unbind, }; -static inline -enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port) +static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port) { switch (port) { case PORT_A: @@ -1869,8 +1865,7 @@ enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port) } } -static inline -enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder) +static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder) { switch (cpu_transcoder) { case TRANSCODER_A ... TRANSCODER_D: @@ -1880,8 +1875,8 @@ enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder) } } -static inline int initialize_hdcp_port_data(struct intel_connector *connector, - const struct intel_hdcp_shim *shim) +static int initialize_hdcp_port_data(struct intel_connector *connector, + const struct intel_hdcp_shim *shim) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 54412f79f0c5..cdf9dedca36b 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1614,10 +1614,10 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired) return -EINVAL; } -static inline -int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, - u8 msg_id, bool *msg_ready, - ssize_t *msg_sz) +static int +hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, + u8 msg_id, bool *msg_ready, + ssize_t *msg_sz) { struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN]; diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 08bfecfbe681..bcd2cc1aba90 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -287,7 +287,7 @@ centre_vertically(struct drm_display_mode *adjusted_mode, adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width; } -static inline u32 panel_fitter_scaling(u32 source, u32 target) +static u32 panel_fitter_scaling(u32 source, u32 target) { /* * Floating point operation is not supported. So the FACTOR @@ -484,8 +484,8 @@ static u32 scale(u32 source_val, } /* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */ -static inline u32 scale_user_to_hw(struct intel_connector *connector, - u32 user_level, u32 user_max) +static u32 scale_user_to_hw(struct intel_connector *connector, + u32 user_level, u32 user_max) { struct intel_panel *panel = &connector->panel; @@ -495,8 +495,8 @@ static inline u32 scale_user_to_hw(struct intel_connector *connector, /* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result * to [hw_min..hw_max]. */ -static inline u32 clamp_user_to_hw(struct intel_connector *connector, - u32 user_level, u32 user_max) +static u32 clamp_user_to_hw(struct intel_connector *connector, + u32 user_level, u32 user_max) { struct intel_panel *panel = &connector->panel; u32 hw_level; @@ -508,8 +508,8 @@ static inline u32 clamp_user_to_hw(struct intel_connector *connector, } /* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */ -static inline u32 scale_hw_to_user(struct intel_connector *connector, - u32 hw_level, u32 user_max) +static u32 scale_hw_to_user(struct intel_connector *connector, + u32 hw_level, u32 user_max) { struct intel_panel *panel = &connector->panel; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d54833a1578f..6f40bfee7304 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5455,8 +5455,8 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv, return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm); } -static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, - const struct skl_ddb_entry *b) +static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, + const struct skl_ddb_entry *b) { return a->start < b->end && b->start < a->end; } @@ -5907,8 +5907,7 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state, mutex_unlock(&dev_priv->wm.wm_mutex); } -static inline void skl_wm_level_from_reg_val(u32 val, - struct skl_wm_level *level) +static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) { level->plane_en = val & PLANE_WM_EN; level->ignore_lines = val & PLANE_WM_IGNORE_LINES; diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 3f13baaef058..14daf6af6854 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -336,7 +336,7 @@ void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value, intel_sbi_rw(i915, reg, destination, &value, false); } -static inline int gen6_check_mailbox_status(u32 mbox) +static int gen6_check_mailbox_status(u32 mbox) { switch (mbox & GEN6_PCODE_ERROR_MASK) { case GEN6_PCODE_SUCCESS: @@ -356,7 +356,7 @@ static inline int gen6_check_mailbox_status(u32 mbox) } } -static inline int gen7_check_mailbox_status(u32 mbox) +static int gen7_check_mailbox_status(u32 mbox) { switch (mbox & GEN6_PCODE_ERROR_MASK) { case GEN6_PCODE_SUCCESS: diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 2186386a45c8..6942487c14a9 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -89,7 +89,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm) drm_dbg(&i915->drm, "WOPCM: %uK\n", wopcm->size / 1024); } -static inline u32 context_reserved_size(struct drm_i915_private *i915) +static u32 context_reserved_size(struct drm_i915_private *i915) { if (IS_GEN9_LP(i915)) return BXT_WOPCM_RC6_CTX_RESERVED; @@ -99,8 +99,8 @@ static inline u32 context_reserved_size(struct drm_i915_private *i915) return 0; } -static inline bool gen9_check_dword_gap(struct drm_i915_private *i915, - u32 guc_wopcm_base, u32 guc_wopcm_size) +static bool gen9_check_dword_gap(struct drm_i915_private *i915, + u32 guc_wopcm_base, u32 guc_wopcm_size) { u32 offset; @@ -122,8 +122,8 @@ static inline bool gen9_check_dword_gap(struct drm_i915_private *i915, return true; } -static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915, - u32 guc_wopcm_size, u32 huc_fw_size) +static bool gen9_check_huc_fw_fits(struct drm_i915_private *i915, + u32 guc_wopcm_size, u32 huc_fw_size) { /* * On Gen9 & CNL A0, hardware requires the total available GuC WOPCM @@ -141,9 +141,9 @@ static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915, return true; } -static inline bool check_hw_restrictions(struct drm_i915_private *i915, - u32 guc_wopcm_base, u32 guc_wopcm_size, - u32 huc_fw_size) +static bool check_hw_restrictions(struct drm_i915_private *i915, + u32 guc_wopcm_base, u32 guc_wopcm_size, + u32 huc_fw_size) { if (IS_GEN(i915, 9) && !gen9_check_dword_gap(i915, guc_wopcm_base, guc_wopcm_size)) @@ -157,9 +157,9 @@ static inline bool check_hw_restrictions(struct drm_i915_private *i915, return true; } -static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size, - u32 guc_wopcm_base, u32 guc_wopcm_size, - u32 guc_fw_size, u32 huc_fw_size) +static bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size, + u32 guc_wopcm_base, u32 guc_wopcm_size, + u32 guc_fw_size, u32 huc_fw_size) { const u32 ctx_rsvd = context_reserved_size(i915); u32 size; From 11ebc2321b8ef88f549c5b8bf61ed4d00fc7952a Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 20 Apr 2020 16:16:32 +0300 Subject: [PATCH 054/121] drm/i915/audio: fix compressed_bpp check The early check for compressed_bpp being zero is too early, as it is hit also when DSC is not enabled. Move the checks down to where the values are actually needed. This is a paranoid check for a situation that should not happen, so we don't really care about handling it gracefully apart from not oopsing. Fixes: 48b8b04c791d ("drm/i915/display: Enable DP Display Audio WA") Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1750 Cc: Anshuman Gupta Cc: Uma Shankar Reviewed-by: Uma Shankar Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200420131632.23283-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_audio.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 2663e71059af..36aaee8536f1 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -542,6 +542,10 @@ static unsigned int get_hblank_early_enable_config(struct intel_encoder *encoder h_active, crtc_state->port_clock, crtc_state->lane_count, vdsc_bpp, cdclk); + if (WARN_ON(!crtc_state->port_clock || !crtc_state->lane_count || + !crtc_state->dsc.compressed_bpp || !i915->cdclk.hw.cdclk)) + return 0; + link_clks_available = ((((h_total - h_active) * ((crtc_state->port_clock * ROUNDING_FACTOR) / pixel_clk)) / ROUNDING_FACTOR) - 28); @@ -597,21 +601,12 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder, struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; - unsigned int hblank_early_prog, samples_room, h_active; + unsigned int hblank_early_prog, samples_room; unsigned int val; if (INTEL_GEN(i915) < 11) return; - h_active = crtc_state->hw.adjusted_mode.hdisplay; - - if (!(h_active && crtc_state->port_clock && crtc_state->lane_count && - crtc_state->dsc.compressed_bpp && i915->cdclk.hw.cdclk)) { - drm_err(&i915->drm, "Null Params rcvd for hblank early enabling\n"); - WARN_ON(1); - return; - } - val = intel_de_read(i915, AUD_CONFIG_BE); if (INTEL_GEN(i915) == 11) From 61198fe1bf48246d20de9d608a89687932e4dad6 Mon Sep 17 00:00:00 2001 From: Pankaj Bharadiya Date: Mon, 6 Apr 2020 16:57:43 +0530 Subject: [PATCH 055/121] drm/i915/display/icl_dsi: Prefer drm_WARN_ON over WARN_ON struct drm_device specific drm_WARN* macros include device information in the backtrace, so we know what device the warnings originate from. Prefer drm_WARN_ON over WARN_ON. Signed-off-by: Pankaj Bharadiya Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200406112800.23762-2-pankaj.laxminarayan.bharadiya@intel.com --- drivers/gpu/drm/i915/display/icl_dsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index a8d2daa214e6..73ebd387f549 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1195,7 +1195,7 @@ static void gen11_dsi_enable(struct intel_atomic_state *state, { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - WARN_ON(crtc_state->has_pch_encoder); + drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder); /* step6d: enable dsi transcoder */ gen11_dsi_enable_transcoder(encoder); From 1e6850ee4c44cfb6fea9c4f3f7034f815c7874dd Mon Sep 17 00:00:00 2001 From: Pankaj Bharadiya Date: Mon, 6 Apr 2020 16:57:44 +0530 Subject: [PATCH 056/121] drm/i915/display/atomic_plane: Prefer drm_WARN_ON over WARN_ON struct drm_device specific drm_WARN* macros include device information in the backtrace, so we know what device the warnings originate from. Prefer drm_WARN_ON over WARN_ON. Signed-off-by: Pankaj Bharadiya Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200406112800.23762-3-pankaj.laxminarayan.bharadiya@intel.com --- drivers/gpu/drm/i915/display/intel_atomic_plane.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 25dfeb3197aa..79032701873a 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -125,7 +125,7 @@ intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { struct intel_plane_state *plane_state = to_intel_plane_state(state); - WARN_ON(plane_state->vma); + drm_WARN_ON(plane->dev, plane_state->vma); __drm_atomic_helper_plane_destroy_state(&plane_state->uapi); if (plane_state->hw.fb) @@ -396,7 +396,7 @@ skl_next_plane_to_commit(struct intel_atomic_state *state, } /* should never happen */ - WARN_ON(1); + drm_WARN_ON(state->base.dev, 1); return NULL; } From 8b4f2137cc0fac74821cc097fe7b7e0110ea0762 Mon Sep 17 00:00:00 2001 From: Pankaj Bharadiya Date: Mon, 6 Apr 2020 16:57:45 +0530 Subject: [PATCH 057/121] drm/i915/display/ddi: Prefer drm_WARN* over WARN* struct drm_device specific drm_WARN* macros include device information in the backtrace, so we know what device the warnings originate from. Prefer drm_WARN* over WARN* calls. Signed-off-by: Pankaj Bharadiya Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200406112800.23762-4-pankaj.laxminarayan.bharadiya@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index e6e963f7fdf9..c086ae5eb12f 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2635,8 +2635,9 @@ static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder, tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level); } -static u32 translate_signal_level(int signal_levels) +static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); int i; for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) { @@ -2644,8 +2645,9 @@ static u32 translate_signal_level(int signal_levels) return i; } - WARN(1, "Unsupported voltage swing/pre-emphasis level: 0x%x\n", - signal_levels); + drm_WARN(&i915->drm, 1, + "Unsupported voltage swing/pre-emphasis level: 0x%x\n", + signal_levels); return 0; } @@ -2656,7 +2658,7 @@ static u32 intel_ddi_dp_level(struct intel_dp *intel_dp) int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); - return translate_signal_level(signal_levels); + return translate_signal_level(intel_dp, signal_levels); } u32 bxt_signal_levels(struct intel_dp *intel_dp) @@ -3743,7 +3745,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - WARN_ON(crtc_state->has_pch_encoder); + drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder); intel_ddi_enable_transcoder_func(encoder, crtc_state); @@ -3856,7 +3858,7 @@ intel_ddi_update_prepare(struct intel_atomic_state *state, crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL; int required_lanes = crtc_state ? crtc_state->lane_count : 1; - WARN_ON(crtc && crtc->active); + drm_WARN_ON(state->base.dev, crtc && crtc->active); intel_tc_port_get_link(enc_to_dig_port(encoder), required_lanes); From ce04ecd9cf1e89385e72e43e349cda48c76ab1e2 Mon Sep 17 00:00:00 2001 From: Pankaj Bharadiya Date: Mon, 6 Apr 2020 16:57:46 +0530 Subject: [PATCH 058/121] drm/i915/display/display: Prefer drm_WARN_ON over WARN_ON struct drm_device specific drm_WARN* macros include device information in the backtrace, so we know what device the warnings originate from. Prefer drm_WARN_ON over WARN_ON at places where struct drm_device pointer can be extracted. Signed-off-by: Pankaj Bharadiya Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200406112800.23762-5-pankaj.laxminarayan.bharadiya@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 24 ++++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3cfedeaf0846..adb08a00bb57 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1973,16 +1973,16 @@ static bool is_aux_plane(const struct drm_framebuffer *fb, int plane) static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) { - WARN_ON(!is_ccs_modifier(fb->modifier) || - (main_plane && main_plane >= fb->format->num_planes / 2)); + drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || + (main_plane && main_plane >= fb->format->num_planes / 2)); return fb->format->num_planes / 2 + main_plane; } static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) { - WARN_ON(!is_ccs_modifier(fb->modifier) || - ccs_plane < fb->format->num_planes / 2); + drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || + ccs_plane < fb->format->num_planes / 2); return ccs_plane - fb->format->num_planes / 2; } @@ -2992,7 +2992,7 @@ setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info, fb->modifier != I915_FORMAT_MOD_Yf_TILED) return 0; - if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane))) + if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane))) return 0; rot_info->plane[plane] = *plane_info; @@ -6626,7 +6626,7 @@ intel_connector_primary_encoder(struct intel_connector *connector) return &dp_to_dig_port(connector->mst_port)->base; encoder = intel_attached_encoder(connector); - WARN_ON(!encoder); + drm_WARN_ON(connector->base.dev, !encoder); return encoder; } @@ -7958,7 +7958,8 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) if (pipe_h < pfit_h) pipe_h = pfit_h; - if (WARN_ON(!pfit_w || !pfit_h)) + if (drm_WARN_ON(pipe_config->uapi.crtc->dev, + !pfit_w || !pfit_h)) return pixel_rate; pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), @@ -12438,8 +12439,10 @@ static int icl_add_linked_planes(struct intel_atomic_state *state) if (IS_ERR(linked_plane_state)) return PTR_ERR(linked_plane_state); - WARN_ON(linked_plane_state->planar_linked_plane != plane); - WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave); + drm_WARN_ON(state->base.dev, + linked_plane_state->planar_linked_plane != plane); + drm_WARN_ON(state->base.dev, + linked_plane_state->planar_slave == plane_state->planar_slave); } return 0; @@ -13162,7 +13165,8 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state { crtc_state->uapi.enable = crtc_state->hw.enable; crtc_state->uapi.active = crtc_state->hw.active; - WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0); + drm_WARN_ON(crtc_state->uapi.crtc->dev, + drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0); crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; From 4ad53ededf0bc53bedae1ddc2b3e16a2b86faec5 Mon Sep 17 00:00:00 2001 From: Pankaj Bharadiya Date: Mon, 6 Apr 2020 16:57:49 +0530 Subject: [PATCH 059/121] drm/i915/display/dpll_mgr: Prefer drm_WARN_ON over WARN_ON struct drm_device specific drm_WARN* macros include device information in the backtrace, so we know what device the warnings originate from. Prefer drm_WARN_ON over WARN_ON at places where struct drm_device pointer can be extracted. Signed-off-by: Pankaj Bharadiya Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200406112800.23762-8-pankaj.laxminarayan.bharadiya@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 2d47f1f756a2..b45185b80bec 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -80,7 +80,7 @@ intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s) { struct intel_atomic_state *state = to_intel_atomic_state(s); - WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex)); + drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex)); if (!state->dpll_set) { state->dpll_set = true; @@ -979,7 +979,7 @@ hsw_ddi_spll_get_dpll(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (WARN_ON(crtc_state->port_clock / 2 != 135000)) + if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000)) return NULL; crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | @@ -1616,7 +1616,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * ref_clock / 0x8000; - if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0)) + if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0)) return 0; return dco_freq / (p0 * p1 * p2 * 5); @@ -2074,7 +2074,7 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state, clk_div->p1 = best_clock.p1; clk_div->p2 = best_clock.p2; - WARN_ON(best_clock.m1 != 2); + drm_WARN_ON(&i915->drm, best_clock.m1 != 2); clk_div->n = best_clock.n; clk_div->m2_int = best_clock.m2 >> 22; clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1); From a7f2ad392928e7e01812641c8d696b21e01171f5 Mon Sep 17 00:00:00 2001 From: Pankaj Bharadiya Date: Mon, 6 Apr 2020 16:57:50 +0530 Subject: [PATCH 060/121] drm/i915/display/frontbuffer: Prefer drm_WARN_ON over WARN_ON struct drm_device specific drm_WARN* macros include device information in the backtrace, so we know what device the warnings originate from. Prefer drm_WARN_ON over WARN_ON. Signed-off-by: Pankaj Bharadiya Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200406112800.23762-9-pankaj.laxminarayan.bharadiya@intel.com --- drivers/gpu/drm/i915/display/intel_frontbuffer.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 6cb02c912acc..2979ed2588eb 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -302,12 +302,14 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old, BITS_PER_TYPE(atomic_t)); if (old) { - WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits)); + drm_WARN_ON(old->obj->base.dev, + !(atomic_read(&old->bits) & frontbuffer_bits)); atomic_andnot(frontbuffer_bits, &old->bits); } if (new) { - WARN_ON(atomic_read(&new->bits) & frontbuffer_bits); + drm_WARN_ON(new->obj->base.dev, + atomic_read(&new->bits) & frontbuffer_bits); atomic_or(frontbuffer_bits, &new->bits); } } From 8d641574f3f057a49ff1e28fd306ab4fa06b7fa4 Mon Sep 17 00:00:00 2001 From: Pankaj Bharadiya Date: Mon, 6 Apr 2020 16:57:51 +0530 Subject: [PATCH 061/121] drm/i915/display/global_state: Prefer drm_WARN* over WARN* struct drm_device specific drm_WARN* macros include device information in the backtrace, so we know what device the warnings originate from. Prefer drm_WARN* over WARN* calls. Signed-off-by: Pankaj Bharadiya Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200406112800.23762-10-pankaj.laxminarayan.bharadiya@intel.com --- drivers/gpu/drm/i915/display/intel_global_state.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c index 6f72feb14f3e..212d4ee68205 100644 --- a/drivers/gpu/drm/i915/display/intel_global_state.c +++ b/drivers/gpu/drm/i915/display/intel_global_state.c @@ -64,7 +64,7 @@ static void assert_global_state_read_locked(struct intel_atomic_state *state) return; } - WARN(1, "Global state not read locked\n"); + drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n"); } struct intel_global_state * @@ -148,7 +148,7 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state) for_each_oldnew_global_obj_in_state(state, obj, old_obj_state, new_obj_state, i) { - WARN_ON(obj->state != old_obj_state); + drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state); /* * If the new state wasn't modified (and properly From e278f07679b022c47bbfe592da11a2215739bcfc Mon Sep 17 00:00:00 2001 From: Pankaj Bharadiya Date: Mon, 6 Apr 2020 16:57:52 +0530 Subject: [PATCH 062/121] drm/i915/display/overlay: Prefer drm_WARN_ON over WARN_ON struct drm_device specific drm_WARN* macros include device information in the backtrace, so we know what device the warnings originate from. Prefer drm_WARN_ON over WARN_ON. Signed-off-by: Pankaj Bharadiya Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200406112800.23762-11-pankaj.laxminarayan.bharadiya@intel.com --- drivers/gpu/drm/i915/display/intel_overlay.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 6e1d66323223..66711e62fa71 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -281,7 +281,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay, enum pipe pipe = overlay->crtc->pipe; struct intel_frontbuffer *from = NULL, *to = NULL; - WARN_ON(overlay->old_vma); + drm_WARN_ON(&overlay->i915->drm, overlay->old_vma); if (overlay->vma) from = intel_frontbuffer_get(overlay->vma->obj); @@ -350,7 +350,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay) struct i915_vma *vma; vma = fetch_and_zero(&overlay->old_vma); - if (WARN_ON(!vma)) + if (drm_WARN_ON(&overlay->i915->drm, !vma)) return; intel_frontbuffer_flip_complete(overlay->i915, @@ -396,7 +396,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) struct i915_request *rq; u32 *cs, flip_addr = overlay->flip_addr; - WARN_ON(!overlay->active); + drm_WARN_ON(&overlay->i915->drm, !overlay->active); /* According to intel docs the overlay hw may hang (when switching * off) without loading the filter coeffs. It is however unclear whether From 007ff34e61c5874629a7016b4045522c05dbe64b Mon Sep 17 00:00:00 2001 From: Pankaj Bharadiya Date: Mon, 6 Apr 2020 16:57:55 +0530 Subject: [PATCH 063/121] drm/i915/display/vlv_dsi: Prefer drm_WARN_ON over WARN_ON struct drm_device specific drm_WARN* macros include device information in the backtrace, so we know what device the warnings originate from. Prefer drm_WARN_ON over WARN_ON. Signed-off-by: Pankaj Bharadiya Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200406112800.23762-14-pankaj.laxminarayan.bharadiya@intel.com --- drivers/gpu/drm/i915/display/vlv_dsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 4e18d4627065..46e2895d916d 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -864,7 +864,7 @@ static void bxt_dsi_enable(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - WARN_ON(crtc_state->has_pch_encoder); + drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder); intel_crtc_vblank_on(crtc_state); } From 74f103928df70de5e9bf1a7646236986820e7009 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Apr 2020 21:30:40 +0100 Subject: [PATCH 064/121] drm/i915/selftests: Show the pstate limits on any failure to reset min We want to see the pstate limits whenever we fail to set the minimum frequency as that may help for debugging. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200420203040.8984-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_rps.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index d7cd673550ef..e0a791eac752 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -238,6 +238,7 @@ int live_rps_control(void *arg) pr_err("%s: could not set minimum frequency [%x], only %x!\n", engine->name, rps->min_freq, read_cagf(rps)); igt_spinner_end(&spin); + show_pstate_limits(rps); err = -EINVAL; break; } @@ -278,6 +279,7 @@ int live_rps_control(void *arg) if (limit == rps->min_freq) { pr_err("%s: GPU throttled to minimum!\n", engine->name); + show_pstate_limits(rps); err = -ENODEV; break; } From 4ea6b1c456273c6da43a503c55f9eaec2da331cf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 21 Apr 2020 13:46:36 +0100 Subject: [PATCH 065/121] drm/i915/selftests: Show the full scaling curve on failure If we detect that the RPS end points do not scale perfectly, take the time to measure all the in between values as well. We are aborting the test, so we might as well spend the available time gathering critical debug information instead. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200421124636.22554-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_rps.c | 40 ++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index e0a791eac752..395265121e43 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -484,11 +484,31 @@ int live_rps_frequency_cs(void *arg) if (!scaled_within(max.freq * min.count, min.freq * max.count, 2, 3)) { + int f; + pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n", engine->name, max.freq * min.count, min.freq * max.count); show_pcu_config(rps); + + for (f = min.freq + 1; f <= rps->max_freq; f++) { + int act = f; + u64 count; + + count = measure_cs_frequency_at(rps, engine, &act); + if (act < f) + break; + + pr_info("%s: %x:%uMHz: %lluKHz [%d%%]\n", + engine->name, + act, intel_gpu_freq(rps, act), count, + (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * count, + act * min.count)); + + f = act; /* may skip ahead [pcu granularity] */ + } + err = -EINVAL; } @@ -593,11 +613,31 @@ int live_rps_frequency_srm(void *arg) if (!scaled_within(max.freq * min.count, min.freq * max.count, 1, 2)) { + int f; + pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n", engine->name, max.freq * min.count, min.freq * max.count); show_pcu_config(rps); + + for (f = min.freq + 1; f <= rps->max_freq; f++) { + int act = f; + u64 count; + + count = measure_frequency_at(rps, cntr, &act); + if (act < f) + break; + + pr_info("%s: %x:%uMHz: %lluKHz [%d%%]\n", + engine->name, + act, intel_gpu_freq(rps, act), count, + (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * count, + act * min.count)); + + f = act; /* may skip ahead [pcu granularity] */ + } + err = -EINVAL; } From cf9ba27840c26f649a90bb04db5b455d47810292 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 21 Apr 2020 15:22:36 +0100 Subject: [PATCH 066/121] drm/i915/selftests: Disable C-states when measuring RPS frequency response Let's isolate the impact of cpu frequency selection on determing the GPU throughput in response to selection of RPS frequencies. For real systems, we do have to be concerned with the impact of integrating c-states, p-states and rp-states, but for the sake of proving whether or not RPS works, one baby step at a time. For the record, as one would hope, it does not seem to impact on the measured performance, but we do it anyway to reduce the number of variables. Later, we can extend the testing to encourage the the cpu/pkg to try and sleep while the GPU is busy. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200421142236.8614-1-chris@chris-wilson.co.uk Link: https://patchwork.freedesktop.org/patch/msgid/20200421142236.8614-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_rps.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 395265121e43..e2afc2003caa 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -3,6 +3,7 @@ * Copyright © 2020 Intel Corporation */ +#include #include #include "intel_engine_pm.h" @@ -14,6 +15,9 @@ #include "selftests/igt_spinner.h" #include "selftests/librapl.h" +/* Try to isolate the impact of cstates from determing frequency response */ +#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */ + static void dummy_rps_work(struct work_struct *wrk) { } @@ -406,6 +410,7 @@ int live_rps_frequency_cs(void *arg) struct intel_gt *gt = arg; struct intel_rps *rps = >->rps; struct intel_engine_cs *engine; + struct pm_qos_request qos; enum intel_engine_id id; int err = 0; @@ -421,6 +426,9 @@ int live_rps_frequency_cs(void *arg) if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */ return 0; + if (CPU_LATENCY >= 0) + cpu_latency_qos_add_request(&qos, CPU_LATENCY); + intel_gt_pm_wait_for_idle(gt); saved_work = rps->work.func; rps->work.func = dummy_rps_work; @@ -527,6 +535,9 @@ err_vma: intel_gt_pm_wait_for_idle(gt); rps->work.func = saved_work; + if (CPU_LATENCY >= 0) + cpu_latency_qos_remove_request(&qos); + return err; } @@ -536,6 +547,7 @@ int live_rps_frequency_srm(void *arg) struct intel_gt *gt = arg; struct intel_rps *rps = >->rps; struct intel_engine_cs *engine; + struct pm_qos_request qos; enum intel_engine_id id; int err = 0; @@ -551,6 +563,9 @@ int live_rps_frequency_srm(void *arg) if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */ return 0; + if (CPU_LATENCY >= 0) + cpu_latency_qos_add_request(&qos, CPU_LATENCY); + intel_gt_pm_wait_for_idle(gt); saved_work = rps->work.func; rps->work.func = dummy_rps_work; @@ -656,6 +671,9 @@ err_vma: intel_gt_pm_wait_for_idle(gt); rps->work.func = saved_work; + if (CPU_LATENCY >= 0) + cpu_latency_qos_remove_request(&qos); + return err; } From bd3ec9e75893dacfa17f37c7f2bf1c7ed73d4043 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 21 Apr 2020 10:25:04 +0100 Subject: [PATCH 067/121] drm/i915/gt: Poison residual state [HWSP] across resume. Since we may lose the content of any buffer when we relinquish control of the system (e.g. suspend/resume), we have to be careful not to rely on regaining control. A good method to detect when we might be using garbage is by always injecting that garbage prior to first use on load/resume/etc. v2: Drop sanitize callback on cleanup v3: Move seqno reset to timeline enter, so we reset all timelines. However, this is done on every activation during runtime and not reset. The similar level of paranoia we apply to correcting context state after a period of inactivity. Suggested-by: Tvrtko Ursulin Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Venkata Ramana Nayana Cc: Daniele Ceraolo Spurio Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200421092504.7416-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 23 ++++++++++++++++++++++- drivers/gpu/drm/i915/gt/intel_timeline.c | 17 ++++++++++++++++- drivers/gpu/drm/i915/gt/intel_timeline.h | 2 ++ 3 files changed, 40 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 34f67eb9bfa1..d42a9d6767d4 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3649,7 +3649,26 @@ static void reset_csb_pointers(struct intel_engine_cs *engine) static void execlists_sanitize(struct intel_engine_cs *engine) { + /* + * Poison residual state on resume, in case the suspend didn't! + * + * We have to assume that across suspend/resume (or other loss + * of control) that the contents of our pinned buffers has been + * lost, replaced by garbage. Since this doesn't always happen, + * let's poison such state so that we more quickly spot when + * we falsely assume it has been preserved. + */ + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); + reset_csb_pointers(engine); + + /* + * The kernel_context HWSP is stored in the status_page. As above, + * that may be lost on resume/initialisation, and so we need to + * reset the value in the HWSP. + */ + intel_timeline_reset_seqno(engine->kernel_context->timeline); } static void enable_error_interrupt(struct intel_engine_cs *engine) @@ -4539,6 +4558,8 @@ static void execlists_shutdown(struct intel_engine_cs *engine) static void execlists_release(struct intel_engine_cs *engine) { + engine->sanitize = NULL; /* no longer in control, nothing to sanitize */ + execlists_shutdown(engine); intel_engine_cleanup_common(engine); @@ -4550,7 +4571,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) { /* Default vfuncs which can be overriden by each engine. */ - engine->sanitize = execlists_sanitize; engine->resume = execlists_resume; engine->cops = &execlists_context_ops; @@ -4666,6 +4686,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) execlists->csb_size = GEN11_CSB_ENTRIES; /* Finally, take ownership and responsibility for cleanup! */ + engine->sanitize = execlists_sanitize; engine->release = execlists_release; return 0; diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 3779c2ae0d65..29a39e44fa36 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -337,6 +337,13 @@ int intel_timeline_pin(struct intel_timeline *tl) return 0; } +void intel_timeline_reset_seqno(const struct intel_timeline *tl) +{ + /* Must be pinned to be writable, and no requests in flight. */ + GEM_BUG_ON(!atomic_read(&tl->pin_count)); + WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); +} + void intel_timeline_enter(struct intel_timeline *tl) { struct intel_gt_timelines *timelines = &tl->gt->timelines; @@ -365,8 +372,16 @@ void intel_timeline_enter(struct intel_timeline *tl) return; spin_lock(&timelines->lock); - if (!atomic_fetch_inc(&tl->active_count)) + if (!atomic_fetch_inc(&tl->active_count)) { + /* + * The HWSP is volatile, and may have been lost while inactive, + * e.g. across suspend/resume. Be paranoid, and ensure that + * the HWSP value matches our seqno so we don't proclaim + * the next request as already complete. + */ + intel_timeline_reset_seqno(tl); list_add_tail(&tl->link, &timelines->active_list); + } spin_unlock(&timelines->lock); } diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h index f5b7eade3809..c8e59a333182 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline.h @@ -84,6 +84,8 @@ int intel_timeline_get_seqno(struct intel_timeline *tl, void intel_timeline_exit(struct intel_timeline *tl); void intel_timeline_unpin(struct intel_timeline *tl); +void intel_timeline_reset_seqno(const struct intel_timeline *tl); + int intel_timeline_read_hwsp(struct i915_request *from, struct i915_request *until, u32 *hwsp_offset); From 33883310cd8ed365a4279600b329c50992e8f528 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 21 Apr 2020 18:13:51 +0100 Subject: [PATCH 068/121] drm/i915/selftests: Unroll the CS frequency loop Having noticed that MI_BB_START is incurring a memory stall (see the correlation with uncore frequency), we have to unroll the loop in order to diminish the impact of the MI_BB_START on the instruction throughput. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200421171351.19575-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_rps.c | 31 ++++++++++++++++---------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index e2afc2003caa..0d7ed000aff0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -49,14 +49,17 @@ create_spin_counter(struct intel_engine_cs *engine, #define CS_GPR(x) GEN8_RING_CS_GPR(engine->mmio_base, x) struct drm_i915_gem_object *obj; struct i915_vma *vma; + unsigned long end; u32 *base, *cs; int loop, i; int err; - obj = i915_gem_object_create_internal(vm->i915, 4096); + obj = i915_gem_object_create_internal(vm->i915, 64 << 10); if (IS_ERR(obj)) return ERR_CAST(obj); + end = obj->base.size / sizeof(u32) - 1; + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { i915_gem_object_put(obj); @@ -90,27 +93,31 @@ create_spin_counter(struct intel_engine_cs *engine, loop = cs - base; - *cs++ = MI_MATH(4); - *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(COUNT)); - *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(INC)); - *cs++ = MI_MATH_ADD; - *cs++ = MI_MATH_STORE(MI_MATH_REG(COUNT), MI_MATH_REG_ACCU); + /* Unroll the loop to avoid MI_BB_START stalls impacting measurements */ + for (i = 0; i < 1024; i++) { + *cs++ = MI_MATH(4); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(COUNT)); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(INC)); + *cs++ = MI_MATH_ADD; + *cs++ = MI_MATH_STORE(MI_MATH_REG(COUNT), MI_MATH_REG_ACCU); - if (srm) { - *cs++ = MI_STORE_REGISTER_MEM_GEN8; - *cs++ = i915_mmio_reg_offset(CS_GPR(COUNT)); - *cs++ = lower_32_bits(vma->node.start + 1000 * sizeof(*cs)); - *cs++ = upper_32_bits(vma->node.start + 1000 * sizeof(*cs)); + if (srm) { + *cs++ = MI_STORE_REGISTER_MEM_GEN8; + *cs++ = i915_mmio_reg_offset(CS_GPR(COUNT)); + *cs++ = lower_32_bits(vma->node.start + end * sizeof(*cs)); + *cs++ = upper_32_bits(vma->node.start + end * sizeof(*cs)); + } } *cs++ = MI_BATCH_BUFFER_START_GEN8; *cs++ = lower_32_bits(vma->node.start + loop * sizeof(*cs)); *cs++ = upper_32_bits(vma->node.start + loop * sizeof(*cs)); + GEM_BUG_ON(cs - base > end); i915_gem_object_flush_map(obj); *cancel = base + loop; - *counter = srm ? memset32(base + 1000, 0, 1) : NULL; + *counter = srm ? memset32(base + end, 0, 1) : NULL; return vma; } From cbb6f8805a4cfcf6bd97dfd0de399f9f3f8f7cdc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Apr 2020 09:38:55 +0100 Subject: [PATCH 069/121] drm/i915/selftests: Disable heartbeat around RPS interrupt testing For verifying reciving the EI interrupts, we need to hold the GPU in very precise conditions (in terms of C0 cycles during the EI). If we preempt the busy load to handle the heartbeat, this may perturb the busy load causing us to miss the interrupt. The other tests, while not as time sensitive, may also be slightly perturbed, so apply the heartbeat protection across all the measurements. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200422083855.26842-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_rps.c | 59 ++++++++++++++++++++++++-- 1 file changed, 55 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 0d7ed000aff0..9d9c8e0aa2e7 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -6,6 +6,7 @@ #include #include +#include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" #include "intel_gpu_commands.h" #include "intel_gt_pm.h" @@ -18,6 +19,26 @@ /* Try to isolate the impact of cstates from determing frequency response */ #define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */ +static unsigned long engine_heartbeat_disable(struct intel_engine_cs *engine) +{ + unsigned long old; + + old = fetch_and_zero(&engine->props.heartbeat_interval_ms); + + intel_engine_pm_get(engine); + intel_engine_park_heartbeat(engine); + + return old; +} + +static void engine_heartbeat_enable(struct intel_engine_cs *engine, + unsigned long saved) +{ + intel_engine_pm_put(engine); + + engine->props.heartbeat_interval_ms = saved; +} + static void dummy_rps_work(struct work_struct *wrk) { } @@ -218,6 +239,7 @@ int live_rps_control(void *arg) intel_gt_pm_get(gt); for_each_engine(engine, gt, id) { + unsigned long saved_heartbeat; struct i915_request *rq; ktime_t min_dt, max_dt; int f, limit; @@ -226,6 +248,8 @@ int live_rps_control(void *arg) if (!intel_engine_can_store_dword(engine)) continue; + saved_heartbeat = engine_heartbeat_disable(engine); + rq = igt_spinner_create_request(&spin, engine->kernel_context, MI_NOOP); @@ -240,6 +264,7 @@ int live_rps_control(void *arg) pr_err("%s: RPS spinner did not start\n", engine->name); igt_spinner_end(&spin); + engine_heartbeat_enable(engine, saved_heartbeat); intel_gt_set_wedged(engine->gt); err = -EIO; break; @@ -249,6 +274,7 @@ int live_rps_control(void *arg) pr_err("%s: could not set minimum frequency [%x], only %x!\n", engine->name, rps->min_freq, read_cagf(rps)); igt_spinner_end(&spin); + engine_heartbeat_enable(engine, saved_heartbeat); show_pstate_limits(rps); err = -EINVAL; break; @@ -265,6 +291,7 @@ int live_rps_control(void *arg) pr_err("%s: could not restore minimum frequency [%x], only %x!\n", engine->name, rps->min_freq, read_cagf(rps)); igt_spinner_end(&spin); + engine_heartbeat_enable(engine, saved_heartbeat); show_pstate_limits(rps); err = -EINVAL; break; @@ -279,6 +306,7 @@ int live_rps_control(void *arg) min_dt = ktime_sub(ktime_get(), min_dt); igt_spinner_end(&spin); + engine_heartbeat_enable(engine, saved_heartbeat); pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n", engine->name, @@ -441,6 +469,7 @@ int live_rps_frequency_cs(void *arg) rps->work.func = dummy_rps_work; for_each_engine(engine, gt, id) { + unsigned long saved_heartbeat; struct i915_request *rq; struct i915_vma *vma; u32 *cancel, *cntr; @@ -449,11 +478,14 @@ int live_rps_frequency_cs(void *arg) int freq; } min, max; + saved_heartbeat = engine_heartbeat_disable(engine); + vma = create_spin_counter(engine, engine->kernel_context->vm, false, &cancel, &cntr); if (IS_ERR(vma)) { err = PTR_ERR(vma); + engine_heartbeat_enable(engine, saved_heartbeat); break; } @@ -533,6 +565,7 @@ err_vma: i915_vma_unpin(vma); i915_vma_put(vma); + engine_heartbeat_enable(engine, saved_heartbeat); if (igt_flush_test(gt->i915)) err = -EIO; if (err) @@ -578,6 +611,7 @@ int live_rps_frequency_srm(void *arg) rps->work.func = dummy_rps_work; for_each_engine(engine, gt, id) { + unsigned long saved_heartbeat; struct i915_request *rq; struct i915_vma *vma; u32 *cancel, *cntr; @@ -586,11 +620,14 @@ int live_rps_frequency_srm(void *arg) int freq; } min, max; + saved_heartbeat = engine_heartbeat_disable(engine); + vma = create_spin_counter(engine, engine->kernel_context->vm, true, &cancel, &cntr); if (IS_ERR(vma)) { err = PTR_ERR(vma); + engine_heartbeat_enable(engine, saved_heartbeat); break; } @@ -669,6 +706,7 @@ err_vma: i915_vma_unpin(vma); i915_vma_put(vma); + engine_heartbeat_enable(engine, saved_heartbeat); if (igt_flush_test(gt->i915)) err = -EIO; if (err) @@ -858,12 +896,16 @@ int live_rps_interrupt(void *arg) for_each_engine(engine, gt, id) { /* Keep the engine busy with a spinner; expect an UP! */ if (pm_events & GEN6_PM_RP_UP_THRESHOLD) { + unsigned long saved_heartbeat; + intel_gt_pm_wait_for_idle(engine->gt); GEM_BUG_ON(rps->active); - intel_engine_pm_get(engine); + saved_heartbeat = engine_heartbeat_disable(engine); + err = __rps_up_interrupt(rps, engine, &spin); - intel_engine_pm_put(engine); + + engine_heartbeat_enable(engine, saved_heartbeat); if (err) goto out; @@ -872,13 +914,15 @@ int live_rps_interrupt(void *arg) /* Keep the engine awake but idle and check for DOWN */ if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) { - intel_engine_pm_get(engine); + unsigned long saved_heartbeat; + + saved_heartbeat = engine_heartbeat_disable(engine); intel_rc6_disable(>->rc6); err = __rps_down_interrupt(rps, engine); intel_rc6_enable(>->rc6); - intel_engine_pm_put(engine); + engine_heartbeat_enable(engine, saved_heartbeat); if (err) goto out; } @@ -954,6 +998,7 @@ int live_rps_power(void *arg) rps->work.func = dummy_rps_work; for_each_engine(engine, gt, id) { + unsigned long saved_heartbeat; struct i915_request *rq; struct { u64 power; @@ -963,10 +1008,13 @@ int live_rps_power(void *arg) if (!intel_engine_can_store_dword(engine)) continue; + saved_heartbeat = engine_heartbeat_disable(engine); + rq = igt_spinner_create_request(&spin, engine->kernel_context, MI_NOOP); if (IS_ERR(rq)) { + engine_heartbeat_enable(engine, saved_heartbeat); err = PTR_ERR(rq); break; } @@ -976,6 +1024,8 @@ int live_rps_power(void *arg) if (!igt_wait_for_spinner(&spin, rq)) { pr_err("%s: RPS spinner did not start\n", engine->name); + igt_spinner_end(&spin); + engine_heartbeat_enable(engine, saved_heartbeat); intel_gt_set_wedged(engine->gt); err = -EIO; break; @@ -988,6 +1038,7 @@ int live_rps_power(void *arg) min.power = measure_power_at(rps, &min.freq); igt_spinner_end(&spin); + engine_heartbeat_enable(engine, saved_heartbeat); pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n", engine->name, From c92724de6db1be85be679a58af25659ba696e64d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Apr 2020 11:09:03 +0100 Subject: [PATCH 070/121] drm/i915/selftests: Try to detect rollback during batchbuffer preemption Since batch buffers dominant execution time, most preemption requests should naturally occur during execution of a batch buffer. We wish to verify that should a preemption occur within a batch buffer, when we come to restart that batch buffer, it occurs at the interrupted instruction and most importantly does not rollback to an earlier point. v2: Do not clear the GPR at the start of the batch, but rely on them being clear for new contexts. Suggested-by: Mika Kuoppala Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200422100903.25216-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_lrc.c | 329 ++++++++++++++++++++++++- 1 file changed, 328 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 6f5e35afe1b2..fc3f9a248764 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -21,7 +21,8 @@ #include "gem/selftests/mock_context.h" #define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4) -#define NUM_GPR_DW (16 * 2) /* each GPR is 2 dwords */ +#define NUM_GPR 16 +#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */ static struct i915_vma *create_scratch(struct intel_gt *gt) { @@ -2791,6 +2792,331 @@ static int live_preempt_gang(void *arg) return 0; } +static struct i915_vma * +create_gpr_user(struct intel_engine_cs *engine, + struct i915_vma *result, + unsigned int offset) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 *cs; + int err; + int i; + + obj = i915_gem_object_create_internal(engine->i915, 4096); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, result->vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) { + i915_vma_put(vma); + return ERR_PTR(err); + } + + cs = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(cs)) { + i915_vma_put(vma); + return ERR_CAST(cs); + } + + /* All GPR are clear for new contexts. We use GPR(0) as a constant */ + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = CS_GPR(engine, 0); + *cs++ = 1; + + for (i = 1; i < NUM_GPR; i++) { + u64 addr; + + /* + * Perform: GPR[i]++ + * + * As we read and write into the context saved GPR[i], if + * we restart this batch buffer from an earlier point, we + * will repeat the increment and store a value > 1. + */ + *cs++ = MI_MATH(4); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i)); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0)); + *cs++ = MI_MATH_ADD; + *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU); + + addr = result->node.start + offset + i * sizeof(*cs); + *cs++ = MI_STORE_REGISTER_MEM_GEN8; + *cs++ = CS_GPR(engine, 2 * i); + *cs++ = lower_32_bits(addr); + *cs++ = upper_32_bits(addr); + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_GTE_SDD; + *cs++ = i; + *cs++ = lower_32_bits(result->node.start); + *cs++ = upper_32_bits(result->node.start); + } + + *cs++ = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + + return vma; +} + +static struct i915_vma *create_global(struct intel_gt *gt, size_t sz) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_internal(gt->i915, sz); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; + } + + err = i915_ggtt_pin(vma, 0, 0); + if (err) { + i915_vma_put(vma); + return ERR_PTR(err); + } + + return vma; +} + +static struct i915_request * +create_gpr_client(struct intel_engine_cs *engine, + struct i915_vma *global, + unsigned int offset) +{ + struct i915_vma *batch, *vma; + struct intel_context *ce; + struct i915_request *rq; + int err; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return ERR_CAST(ce); + + vma = i915_vma_instance(global->obj, ce->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_ce; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto out_ce; + + batch = create_gpr_user(engine, vma, offset); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto out_vma; + } + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_batch; + } + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, false); + if (!err) + err = i915_vma_move_to_active(vma, rq, 0); + i915_vma_unlock(vma); + + i915_vma_lock(batch); + if (!err) + err = i915_request_await_object(rq, batch->obj, false); + if (!err) + err = i915_vma_move_to_active(batch, rq, 0); + if (!err) + err = rq->engine->emit_bb_start(rq, + batch->node.start, + PAGE_SIZE, 0); + i915_vma_unlock(batch); + i915_vma_unpin(batch); + + if (!err) + i915_request_get(rq); + i915_request_add(rq); + +out_batch: + i915_vma_put(batch); +out_vma: + i915_vma_unpin(vma); +out_ce: + intel_context_put(ce); + return err ? ERR_PTR(err) : rq; +} + +static int preempt_user(struct intel_engine_cs *engine, + struct i915_vma *global, + int id) +{ + struct i915_sched_attr attr = { + .priority = I915_PRIORITY_MAX + }; + struct i915_request *rq; + int err = 0; + u32 *cs; + + rq = intel_engine_create_kernel_request(engine); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + return PTR_ERR(cs); + } + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(global); + *cs++ = 0; + *cs++ = id; + + intel_ring_advance(rq, cs); + + i915_request_get(rq); + i915_request_add(rq); + + engine->schedule(rq, &attr); + + if (i915_request_wait(rq, 0, HZ / 2) < 0) + err = -ETIME; + i915_request_put(rq); + + return err; +} + +static int live_preempt_user(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_vma *global; + enum intel_engine_id id; + u32 *result; + int err = 0; + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + /* + * In our other tests, we look at preemption in carefully + * controlled conditions in the ringbuffer. Since most of the + * time is spent in user batches, most of our preemptions naturally + * occur there. We want to verify that when we preempt inside a batch + * we continue on from the current instruction and do not roll back + * to the start, or another earlier arbitration point. + * + * To verify this, we create a batch which is a mixture of + * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with + * a few preempting contexts thrown into the mix, we look for any + * repeated instructions (which show up as incorrect values). + */ + + global = create_global(gt, 4096); + if (IS_ERR(global)) + return PTR_ERR(global); + + result = i915_gem_object_pin_map(global->obj, I915_MAP_WC); + if (IS_ERR(result)) { + i915_vma_unpin_and_release(&global, 0); + return PTR_ERR(result); + } + + for_each_engine(engine, gt, id) { + struct i915_request *client[3] = {}; + struct igt_live_test t; + int i; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS) + continue; /* we need per-context GPR */ + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + break; + } + + memset(result, 0, 4096); + + for (i = 0; i < ARRAY_SIZE(client); i++) { + struct i915_request *rq; + + rq = create_gpr_client(engine, global, + NUM_GPR * i * sizeof(u32)); + if (IS_ERR(rq)) + goto end_test; + + client[i] = rq; + } + + /* Continuously preempt the set of 3 running contexts */ + for (i = 1; i <= NUM_GPR; i++) { + err = preempt_user(engine, global, i); + if (err) + goto end_test; + } + + if (READ_ONCE(result[0]) != NUM_GPR) { + pr_err("%s: Failed to release semaphore\n", + engine->name); + err = -EIO; + goto end_test; + } + + for (i = 0; i < ARRAY_SIZE(client); i++) { + int gpr; + + if (i915_request_wait(client[i], 0, HZ / 2) < 0) { + err = -ETIME; + goto end_test; + } + + for (gpr = 1; gpr < NUM_GPR; gpr++) { + if (result[NUM_GPR * i + gpr] != 1) { + pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n", + engine->name, + i, gpr, result[NUM_GPR * i + gpr]); + err = -EINVAL; + goto end_test; + } + } + } + +end_test: + for (i = 0; i < ARRAY_SIZE(client); i++) { + if (!client[i]) + break; + + i915_request_put(client[i]); + } + + /* Flush the semaphores on error */ + smp_store_mb(result[0], -1); + if (igt_live_test_end(&t)) + err = -EIO; + if (err) + break; + } + + i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP); + return err; +} + static int live_preempt_timeout(void *arg) { struct intel_gt *gt = arg; @@ -3998,6 +4324,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_chain_preempt), SUBTEST(live_preempt_gang), SUBTEST(live_preempt_timeout), + SUBTEST(live_preempt_user), SUBTEST(live_preempt_smoke), SUBTEST(live_virtual_engine), SUBTEST(live_virtual_mask), From cb593e5d2b6d3ad489669914d9fd1c64c7a4a6af Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Apr 2020 08:28:05 +0100 Subject: [PATCH 071/121] drm/i915/gem: Hold obj->vma.lock over for_each_ggtt_vma() While the ggtt vma are protected by their object lifetime, the list continues until it hits a non-ggtt vma, and that vma is not protected and may be freed as we inspect it. Hence, we require the obj->vma.lock to protect the list as we iterate. An example of forgetting to hold the obj->vma.lock is [1642834.464973] general protection fault, probably for non-canonical address 0xdead000000000122: 0000 [#1] SMP PTI [1642834.464977] CPU: 3 PID: 1954 Comm: Xorg Not tainted 5.6.0-300.fc32.x86_64 #1 [1642834.464979] Hardware name: LENOVO 20ARS25701/20ARS25701, BIOS GJET94WW (2.44 ) 09/14/2017 [1642834.465021] RIP: 0010:i915_gem_object_set_tiling+0x2c0/0x3e0 [i915] [1642834.465024] Code: 8b 84 24 18 01 00 00 f6 c4 80 74 59 49 8b 94 24 a0 00 00 00 49 8b 84 24 e0 00 00 00 49 8b 74 24 10 48 8b 92 30 01 00 00 89 c7 <80> ba 0a 06 00 00 03 0f 87 86 00 00 00 ba 00 00 08 00 b9 00 00 10 [1642834.465025] RSP: 0018:ffffa98780c77d60 EFLAGS: 00010282 [1642834.465028] RAX: ffff8d232bfb2578 RBX: 0000000000000002 RCX: ffff8d25873a0000 [1642834.465029] RDX: dead000000000122 RSI: fffff0af8ac6e408 RDI: 000000002bfb2578 [1642834.465030] RBP: ffff8d25873a0000 R08: ffff8d252bfb5638 R09: 0000000000000000 [1642834.465031] R10: 0000000000000000 R11: ffff8d252bfb5640 R12: ffffa987801cb8f8 [1642834.465032] R13: 0000000000001000 R14: ffff8d233e972e50 R15: ffff8d233e972d00 [1642834.465034] FS: 00007f6a3d327f00(0000) GS:ffff8d25926c0000(0000) knlGS:0000000000000000 [1642834.465036] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [1642834.465037] CR2: 00007f6a2064d000 CR3: 00000002fb57c001 CR4: 00000000001606e0 [1642834.465038] Call Trace: [1642834.465083] i915_gem_set_tiling_ioctl+0x122/0x230 [i915] [1642834.465121] ? i915_gem_object_set_tiling+0x3e0/0x3e0 [i915] [1642834.465151] drm_ioctl_kernel+0x86/0xd0 [drm] [1642834.465156] ? avc_has_perm+0x3b/0x160 [1642834.465178] drm_ioctl+0x206/0x390 [drm] [1642834.465216] ? i915_gem_object_set_tiling+0x3e0/0x3e0 [i915] [1642834.465221] ? selinux_file_ioctl+0x122/0x1c0 [1642834.465226] ? __do_munmap+0x24b/0x4d0 [1642834.465231] ksys_ioctl+0x82/0xc0 [1642834.465235] __x64_sys_ioctl+0x16/0x20 [1642834.465238] do_syscall_64+0x5b/0xf0 [1642834.465243] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [1642834.465245] RIP: 0033:0x7f6a3d7b047b [1642834.465247] Code: 0f 1e fa 48 8b 05 1d aa 0c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d ed a9 0c 00 f7 d8 64 89 01 48 [1642834.465249] RSP: 002b:00007ffe71adba28 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [1642834.465251] RAX: ffffffffffffffda RBX: 000055f99048fa40 RCX: 00007f6a3d7b047b [1642834.465253] RDX: 00007ffe71adba30 RSI: 00000000c0106461 RDI: 000000000000000e [1642834.465254] RBP: 0000000000000002 R08: 000055f98f3f1798 R09: 0000000000000002 [1642834.465255] R10: 0000000000001000 R11: 0000000000000246 R12: 0000000000000080 [1642834.465257] R13: 000055f98f3f1690 R14: 00000000c0106461 R15: 00007ffe71adba30 Now to take the spinlock during the list iteration, we need to break it down into two phases. In the first phase under the lock, we cannot sleep and so must defer the actual work to a second list, protected by the ggtt->mutex. We also need to hold the spinlock during creation of a new vma to serialise with updates of the tiling on the object. Reported-by: Dave Airlie Fixes: 2850748ef876 ("drm/i915: Pull i915_vma_pin under the vm->mutex") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Dave Airlie Cc: # v5.5+ Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200422072805.17340-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_tiling.c | 24 ++++++++++++++++++---- drivers/gpu/drm/i915/i915_vma.c | 10 +++++---- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index 37f77aee1212..0158e49bf9bb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -182,21 +182,35 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode, unsigned int stride) { struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; - struct i915_vma *vma; + struct i915_vma *vma, *vn; + LIST_HEAD(unbind); int ret = 0; if (tiling_mode == I915_TILING_NONE) return 0; mutex_lock(&ggtt->vm.mutex); + + spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) { + GEM_BUG_ON(vma->vm != &ggtt->vm); + if (i915_vma_fence_prepare(vma, tiling_mode, stride)) continue; - ret = __i915_vma_unbind(vma); - if (ret) - break; + list_move(&vma->vm_link, &unbind); } + spin_unlock(&obj->vma.lock); + + list_for_each_entry_safe(vma, vn, &unbind, vm_link) { + ret = __i915_vma_unbind(vma); + if (ret) { + /* Restore the remaining vma on an error */ + list_splice(&unbind, &ggtt->vm.bound_list); + break; + } + } + mutex_unlock(&ggtt->vm.mutex); return ret; @@ -268,6 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, } mutex_unlock(&obj->mm.lock); + spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) { vma->fence_size = i915_gem_fence_size(i915, vma->size, tiling, stride); @@ -278,6 +293,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, if (vma->fence) vma->fence->dirty = true; } + spin_unlock(&obj->vma.lock); obj->tiling_and_stride = tiling | stride; i915_gem_object_unlock(obj); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index f0383a68c981..20fe5a134d92 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -158,16 +158,18 @@ vma_create(struct drm_i915_gem_object *obj, GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); + spin_lock(&obj->vma.lock); + if (i915_is_ggtt(vm)) { if (unlikely(overflows_type(vma->size, u32))) - goto err_vma; + goto err_unlock; vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, i915_gem_object_get_tiling(obj), i915_gem_object_get_stride(obj)); if (unlikely(vma->fence_size < vma->size || /* overflow */ vma->fence_size > vm->total)) - goto err_vma; + goto err_unlock; GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); @@ -179,8 +181,6 @@ vma_create(struct drm_i915_gem_object *obj, __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); } - spin_lock(&obj->vma.lock); - rb = NULL; p = &obj->vma.tree.rb_node; while (*p) { @@ -225,6 +225,8 @@ vma_create(struct drm_i915_gem_object *obj, return vma; +err_unlock: + spin_unlock(&obj->vma.lock); err_vma: i915_vma_free(vma); return ERR_PTR(-E2BIG); From 15501287b1c1827d21cce14a868467a80060eccd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Apr 2020 15:17:49 +0100 Subject: [PATCH 072/121] drm/i915/execlists: Drop request-before-CS assertion When we migrated to execlists, one of the conditions we wanted to test for was whether the breadcrumb seqno was being written before the breadcumb interrupt was delivered. This was following on from issues observed on previous generations which were not so strongly ordered. With the removal of the missed interrupt detection, we have not reliable means of detecting the out-of-order seqno/interrupt but instead tried to assert that the relationship between the CS event interrupt and the breadwrite should be strongly ordered. However, Icelake proves it is possible for the HW implementation to forget about minor little details such as write ordering and so the order between *processing* the CS event and the breadcrumb is unreliable. Remove the unreliable assertion, but leave a debug telltale in case we have reason to suspect. Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1658 Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200422141749.28709-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 33 ++++++----------------------- 1 file changed, 7 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index d42a9d6767d4..fba774a0abbf 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2384,13 +2384,6 @@ gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); } -static inline void flush_hwsp(const struct i915_request *rq) -{ - mb(); - clflush((void *)READ_ONCE(rq->hwsp_seqno)); - mb(); -} - static void process_csb(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -2498,7 +2491,11 @@ static void process_csb(struct intel_engine_cs *engine) * We rely on the hardware being strongly * ordered, that the breadcrumb write is * coherent (visible from the CPU) before the - * user interrupt and CSB is processed. + * user interrupt is processed. One might assume + * that the breadcrumb write being before the + * user interrupt and the CS event for the context + * switch would therefore be before the CS event + * itself... */ if (GEM_SHOW_DEBUG() && !i915_request_completed(*execlists->active)) { @@ -2506,19 +2503,8 @@ static void process_csb(struct intel_engine_cs *engine) const u32 *regs __maybe_unused = rq->context->lrc_reg_state; - /* - * Flush the breadcrumb before crying foul. - * - * Since we have hit this on icl and seen the - * breadcrumb advance as we print out the debug - * info (so the problem corrected itself without - * lasting damage), and we know that icl suffers - * from missing global observation points in - * execlists, presume that affects even more - * coherency. - */ - flush_hwsp(rq); - + ENGINE_TRACE(engine, + "context completed before request!\n"); ENGINE_TRACE(engine, "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n", ENGINE_READ(engine, RING_START), @@ -2538,11 +2524,6 @@ static void process_csb(struct intel_engine_cs *engine) regs[CTX_RING_START], regs[CTX_RING_HEAD], regs[CTX_RING_TAIL]); - - /* Still? Declare it caput! */ - if (!i915_request_completed(rq) && - !reset_in_progress(execlists)) - GEM_BUG_ON("context completed before request"); } execlists_schedule_out(*execlists->active++); From 8372e3227f80a440afe780922968d2f3d0df3a8d Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 22 Apr 2020 15:34:40 +0300 Subject: [PATCH 073/121] drm/i915/icl: Fix timeout handling during TypeC AUX power well enabling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the check for when an AUX power well enabling timeout is expected on a legacy TypeC port. Fixes: 89e01caac641 ("drm/i915: Use single set of AUX powerwell ops for gen11+") Cc: Matt Roper Cc: José Roberto de Souza Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200422123440.19522-1-imre.deak@intel.com --- .../drm/i915/display/intel_display_power.c | 74 +++++++------------ 1 file changed, 25 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 721e9ba96d34..49998906cc61 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -20,8 +20,6 @@ #include "intel_tc.h" #include "intel_vga.h" -static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops; - bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); @@ -328,30 +326,9 @@ aux_ch_to_digital_port(struct drm_i915_private *dev_priv, return dig_port; } -static bool tc_phy_aux_timeout_expected(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - /* An AUX timeout is expected if the TBT DP tunnel is down. */ - if (power_well->desc->hsw.is_tc_tbt) - return true; - - /* - * An AUX timeout is expected because we enable TC legacy port aux - * to hold port out of TC cold - */ - if (INTEL_GEN(dev_priv) == 11 && - power_well->desc->ops == &icl_tc_phy_aux_power_well_ops) { - enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); - struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); - - return dig_port->tc_legacy_port; - } - - return false; -} - static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) + struct i915_power_well *power_well, + bool timeout_expected) { const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; int pw_idx = power_well->desc->hsw.idx; @@ -362,8 +339,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", power_well->desc->name); - drm_WARN_ON(&dev_priv->drm, - !tc_phy_aux_timeout_expected(dev_priv, power_well)); + drm_WARN_ON(&dev_priv->drm, !timeout_expected); } } @@ -422,8 +398,8 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, SKL_FUSE_PG_DIST_STATUS(pg), 1)); } -static void hsw_power_well_enable_prepare(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) +static void hsw_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; int pw_idx = power_well->desc->hsw.idx; @@ -448,14 +424,8 @@ static void hsw_power_well_enable_prepare(struct drm_i915_private *dev_priv, val = intel_de_read(dev_priv, regs->driver); intel_de_write(dev_priv, regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); -} -static void hsw_power_well_enable_complete(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - int pw_idx = power_well->desc->hsw.idx; - - hsw_wait_for_power_well_enable(dev_priv, power_well); + hsw_wait_for_power_well_enable(dev_priv, power_well, false); /* Display WA #1178: cnl */ if (IS_CANNONLAKE(dev_priv) && @@ -481,13 +451,6 @@ static void hsw_power_well_enable_complete(struct drm_i915_private *dev_priv, power_well->desc->hsw.has_vga); } -static void hsw_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - hsw_power_well_enable_prepare(dev_priv, power_well); - hsw_power_well_enable_complete(dev_priv, power_well); -} - static void hsw_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { @@ -527,7 +490,7 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, val | ICL_LANE_ENABLE_AUX); } - hsw_wait_for_power_well_enable(dev_priv, power_well); + hsw_wait_for_power_well_enable(dev_priv, power_well, false); /* Display WA #1178: icl */ if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && @@ -633,24 +596,37 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, { enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + bool is_tbt = power_well->desc->hsw.is_tc_tbt; + bool timeout_expected; u32 val; icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); val &= ~DP_AUX_CH_CTL_TBT_IO; - if (power_well->desc->hsw.is_tc_tbt) + if (is_tbt) val |= DP_AUX_CH_CTL_TBT_IO; intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); - hsw_power_well_enable_prepare(dev_priv, power_well); + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); - if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) + /* + * An AUX timeout is expected if the TBT DP tunnel is down, + * or need to enable AUX on a legacy TypeC port as part of the TC-cold + * exit sequence. + */ + timeout_expected = is_tbt; + if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) { icl_tc_cold_exit(dev_priv); + timeout_expected = true; + } - hsw_power_well_enable_complete(dev_priv, power_well); + hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); - if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) { + if (INTEL_GEN(dev_priv) >= 12 && !is_tbt) { enum tc_port tc_port; tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); From 36fe164d8d780f6eb93f1ffa4ab5d3e9e3e81711 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 23 Apr 2020 12:53:15 +0100 Subject: [PATCH 074/121] drm/i915/gt: Carefully order virtual_submission_tasklet During the virtual engine's submission tasklet, we take the request and insert into the submission queue on each of our siblings. This seems quite simply, and so no problems with ordering. However, the sibling execlists' submission tasklets may run concurrently with the virtual engine's tasklet, submitting the request to HW before the virtual finishes its task of telling all the siblings. If this happens, the sibling tasklet may *reorder* the ve->sibling[] array that the virtual engine tasklet is processing. This can *only* reorder within the elements already processed by the virtual engine, nevertheless the race is detected by KCSAN: [ 185.580014] BUG: KCSAN: data-race in execlists_dequeue [i915] / virtual_submission_tasklet [i915] [ 185.580054] [ 185.580076] write to 0xffff8881f1919860 of 8 bytes by interrupt on cpu 2: [ 185.580553] execlists_dequeue+0x6ad/0x1600 [i915] [ 185.581044] __execlists_submission_tasklet+0x48/0x60 [i915] [ 185.581517] execlists_submission_tasklet+0xd3/0x170 [i915] [ 185.581554] tasklet_action_common.isra.0+0x42/0x90 [ 185.581585] __do_softirq+0xc8/0x206 [ 185.581613] run_ksoftirqd+0x15/0x20 [ 185.581641] smpboot_thread_fn+0x15a/0x270 [ 185.581669] kthread+0x19a/0x1e0 [ 185.581695] ret_from_fork+0x1f/0x30 [ 185.581717] [ 185.581736] read to 0xffff8881f1919860 of 8 bytes by interrupt on cpu 0: [ 185.582231] virtual_submission_tasklet+0x10e/0x5c0 [i915] [ 185.582265] tasklet_action_common.isra.0+0x42/0x90 [ 185.582291] __do_softirq+0xc8/0x206 [ 185.582315] run_ksoftirqd+0x15/0x20 [ 185.582340] smpboot_thread_fn+0x15a/0x270 [ 185.582368] kthread+0x19a/0x1e0 [ 185.582395] ret_from_fork+0x1f/0x30 [ 185.582417] We can prevent this race by checking for the ve->request after looking up the sibling array. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200423115315.26825-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index fba774a0abbf..dead24aaf45d 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -5092,12 +5092,15 @@ static void virtual_submission_tasklet(unsigned long data) return; local_irq_disable(); - for (n = 0; READ_ONCE(ve->request) && n < ve->num_siblings; n++) { - struct intel_engine_cs *sibling = ve->siblings[n]; + for (n = 0; n < ve->num_siblings; n++) { + struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]); struct ve_node * const node = &ve->nodes[sibling->id]; struct rb_node **parent, *rb; bool first; + if (!READ_ONCE(ve->request)) + break; /* already handled by a sibling's tasklet */ + if (unlikely(!(mask & sibling->mask))) { if (!RB_EMPTY_NODE(&node->rb)) { spin_lock(&sibling->active.lock); From b97f77baa845e779341150881bdc12971da18e52 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 23 Apr 2020 09:59:39 +0100 Subject: [PATCH 075/121] drm/i915/gt: Check carefully for an idle engine in wait-for-idle intel_gt_wait_for_idle() tries to wait until all the outstanding requests are retired and the GPU is idle. As a side effect of retiring requests, we may submit more work to flush any pm barriers, and so the wait-for-idle tries to flush the background pm work and catch the new requests. However, if the work completed in the background before we were able to flush, it would queue the extra barrier request without us noticing -- and so we would return from wait-for-idle with one request remaining. (This breaks e.g. record_default_state where we need to wait until that barrier is retired, and it may slow suspend down by causing us to wait on the background retirement worker as opposed to immediately retiring the barrier.) However, since we track if there has been a submission since the engine pm barrier, we can very quickly detect if the idle barrier is still outstanding. Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1763 Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200423085940.28168-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_gt_requests.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index dec96a731a77..16ff47c83bd5 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -26,6 +26,11 @@ static bool retire_requests(struct intel_timeline *tl) return !i915_active_fence_isset(&tl->last_request); } +static bool engine_active(const struct intel_engine_cs *engine) +{ + return !list_empty(&engine->kernel_context->timeline->requests); +} + static bool flush_submission(struct intel_gt *gt) { struct intel_engine_cs *engine; @@ -37,8 +42,13 @@ static bool flush_submission(struct intel_gt *gt) for_each_engine(engine, gt, id) { intel_engine_flush_submission(engine); - active |= flush_work(&engine->retire_work); - active |= flush_delayed_work(&engine->wakeref.work); + + /* Flush the background retirement and idle barriers */ + flush_work(&engine->retire_work); + flush_delayed_work(&engine->wakeref.work); + + /* Is the idle barrier still outstanding? */ + active |= engine_active(engine); } return active; @@ -173,7 +183,6 @@ out_active: spin_lock(&timelines->lock); if (atomic_dec_and_test(&tl->active_count)) list_del(&tl->link); - /* Defer the final release to after the spinlock */ if (refcount_dec_and_test(&tl->kref.refcount)) { GEM_BUG_ON(atomic_read(&tl->active_count)); From cbfd3a0c5a558bff441d39535ed5cafc862ce38a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Apr 2020 08:42:03 +0100 Subject: [PATCH 076/121] drm/i915/selftests: Add request throughput measurement to perf Under ideal circumstances, the driver should be able to keep the GPU fully saturated with work. Measure how close to ideal we get under the harshest of conditions with no user payload. v2: Also measure throughput using only one thread. Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200422074203.9799-1-chris@chris-wilson.co.uk --- .../drm/i915/selftests/i915_perf_selftests.h | 1 + drivers/gpu/drm/i915/selftests/i915_request.c | 580 +++++++++++++++++- 2 files changed, 580 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h index 3bf7f53e9924..d8da142985eb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h @@ -16,5 +16,6 @@ * Tests are executed in order by igt/i915_selftest */ selftest(engine_cs, intel_engine_cs_perf_selftests) +selftest(request, i915_request_perf_selftests) selftest(blt, i915_gem_object_blt_perf_selftests) selftest(region, intel_memory_region_perf_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 1dab0360f76a..3b319c0953cb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -23,6 +23,7 @@ */ #include +#include #include "gem/i915_gem_pm.h" #include "gem/selftests/mock_context.h" @@ -1239,7 +1240,7 @@ static int live_parallel_engines(void *arg) struct igt_live_test t; unsigned int idx; - snprintf(name, sizeof(name), "%ps", fn); + snprintf(name, sizeof(name), "%ps", *fn); err = igt_live_test_begin(&t, i915, __func__, name); if (err) break; @@ -1476,3 +1477,580 @@ int i915_request_live_selftests(struct drm_i915_private *i915) return i915_subtests(tests, i915); } + +static int switch_to_kernel_sync(struct intel_context *ce, int err) +{ + struct i915_request *rq; + struct dma_fence *fence; + + rq = intel_engine_create_kernel_request(ce->engine); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + fence = i915_active_fence_get(&ce->timeline->last_request); + if (fence) { + i915_request_await_dma_fence(rq, fence); + dma_fence_put(fence); + } + + rq = i915_request_get(rq); + i915_request_add(rq); + if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err) + err = -ETIME; + i915_request_put(rq); + + while (!err && !intel_engine_is_idle(ce->engine)) + intel_engine_flush_submission(ce->engine); + + return err; +} + +struct perf_stats { + struct intel_engine_cs *engine; + unsigned long count; + ktime_t time; + ktime_t busy; + u64 runtime; +}; + +struct perf_series { + struct drm_i915_private *i915; + unsigned int nengines; + struct intel_context *ce[]; +}; + +static int s_sync0(void *arg) +{ + struct perf_series *ps = arg; + IGT_TIMEOUT(end_time); + unsigned int idx = 0; + int err = 0; + + GEM_BUG_ON(!ps->nengines); + do { + struct i915_request *rq; + + rq = i915_request_create(ps->ce[idx]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(rq); + if (err) + break; + + if (++idx == ps->nengines) + idx = 0; + } while (!__igt_timeout(end_time, NULL)); + + return err; +} + +static int s_sync1(void *arg) +{ + struct perf_series *ps = arg; + struct i915_request *prev = NULL; + IGT_TIMEOUT(end_time); + unsigned int idx = 0; + int err = 0; + + GEM_BUG_ON(!ps->nengines); + do { + struct i915_request *rq; + + rq = i915_request_create(ps->ce[idx]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (prev && i915_request_wait(prev, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(prev); + prev = rq; + if (err) + break; + + if (++idx == ps->nengines) + idx = 0; + } while (!__igt_timeout(end_time, NULL)); + i915_request_put(prev); + + return err; +} + +static int s_many(void *arg) +{ + struct perf_series *ps = arg; + IGT_TIMEOUT(end_time); + unsigned int idx = 0; + + GEM_BUG_ON(!ps->nengines); + do { + struct i915_request *rq; + + rq = i915_request_create(ps->ce[idx]); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_add(rq); + + if (++idx == ps->nengines) + idx = 0; + } while (!__igt_timeout(end_time, NULL)); + + return 0; +} + +static int perf_series_engines(void *arg) +{ + struct drm_i915_private *i915 = arg; + static int (* const func[])(void *arg) = { + s_sync0, + s_sync1, + s_many, + NULL, + }; + const unsigned int nengines = num_uabi_engines(i915); + struct intel_engine_cs *engine; + int (* const *fn)(void *arg); + struct pm_qos_request qos; + struct perf_stats *stats; + struct perf_series *ps; + unsigned int idx; + int err = 0; + + stats = kcalloc(nengines, sizeof(*stats), GFP_KERNEL); + if (!stats) + return -ENOMEM; + + ps = kzalloc(struct_size(ps, ce, nengines), GFP_KERNEL); + if (!ps) { + kfree(stats); + return -ENOMEM; + } + + cpu_latency_qos_add_request(&qos, 0); /* disable cstates */ + + ps->i915 = i915; + ps->nengines = nengines; + + idx = 0; + for_each_uabi_engine(engine, i915) { + struct intel_context *ce; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + goto out; + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + goto out; + } + + ps->ce[idx++] = ce; + } + GEM_BUG_ON(idx != ps->nengines); + + for (fn = func; *fn && !err; fn++) { + char name[KSYM_NAME_LEN]; + struct igt_live_test t; + + snprintf(name, sizeof(name), "%ps", *fn); + err = igt_live_test_begin(&t, i915, __func__, name); + if (err) + break; + + for (idx = 0; idx < nengines; idx++) { + struct perf_stats *p = + memset(&stats[idx], 0, sizeof(stats[idx])); + struct intel_context *ce = ps->ce[idx]; + + p->engine = ps->ce[idx]->engine; + intel_engine_pm_get(p->engine); + + if (intel_engine_supports_stats(p->engine) && + !intel_enable_engine_stats(p->engine)) + p->busy = intel_engine_get_busy_time(p->engine) + 1; + p->runtime = -intel_context_get_total_runtime_ns(ce); + p->time = ktime_get(); + } + + err = (*fn)(ps); + if (igt_live_test_end(&t)) + err = -EIO; + + for (idx = 0; idx < nengines; idx++) { + struct perf_stats *p = &stats[idx]; + struct intel_context *ce = ps->ce[idx]; + int integer, decimal; + u64 busy, dt; + + p->time = ktime_sub(ktime_get(), p->time); + if (p->busy) { + p->busy = ktime_sub(intel_engine_get_busy_time(p->engine), + p->busy - 1); + intel_disable_engine_stats(p->engine); + } + + err = switch_to_kernel_sync(ce, err); + p->runtime += intel_context_get_total_runtime_ns(ce); + intel_engine_pm_put(p->engine); + + busy = 100 * ktime_to_ns(p->busy); + dt = ktime_to_ns(p->time); + if (dt) { + integer = div64_u64(busy, dt); + busy -= integer * dt; + decimal = div64_u64(100 * busy, dt); + } else { + integer = 0; + decimal = 0; + } + + pr_info("%s %5s: { seqno:%d, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n", + name, p->engine->name, ce->timeline->seqno, + integer, decimal, + div_u64(p->runtime, 1000 * 1000), + div_u64(ktime_to_ns(p->time), 1000 * 1000)); + } + } + +out: + for (idx = 0; idx < nengines; idx++) { + if (IS_ERR_OR_NULL(ps->ce[idx])) + break; + + intel_context_unpin(ps->ce[idx]); + intel_context_put(ps->ce[idx]); + } + kfree(ps); + + cpu_latency_qos_remove_request(&qos); + kfree(stats); + return err; +} + +static int p_sync0(void *arg) +{ + struct perf_stats *p = arg; + struct intel_engine_cs *engine = p->engine; + struct intel_context *ce; + IGT_TIMEOUT(end_time); + unsigned long count; + bool busy; + int err = 0; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + return err; + } + + busy = false; + if (intel_engine_supports_stats(engine) && + !intel_enable_engine_stats(engine)) { + p->busy = intel_engine_get_busy_time(engine); + busy = true; + } + + p->time = ktime_get(); + count = 0; + do { + struct i915_request *rq; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_get(rq); + i915_request_add(rq); + + err = 0; + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(rq); + if (err) + break; + + count++; + } while (!__igt_timeout(end_time, NULL)); + p->time = ktime_sub(ktime_get(), p->time); + + if (busy) { + p->busy = ktime_sub(intel_engine_get_busy_time(engine), + p->busy); + intel_disable_engine_stats(engine); + } + + err = switch_to_kernel_sync(ce, err); + p->runtime = intel_context_get_total_runtime_ns(ce); + p->count = count; + + intel_context_unpin(ce); + intel_context_put(ce); + return err; +} + +static int p_sync1(void *arg) +{ + struct perf_stats *p = arg; + struct intel_engine_cs *engine = p->engine; + struct i915_request *prev = NULL; + struct intel_context *ce; + IGT_TIMEOUT(end_time); + unsigned long count; + bool busy; + int err = 0; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + return err; + } + + busy = false; + if (intel_engine_supports_stats(engine) && + !intel_enable_engine_stats(engine)) { + p->busy = intel_engine_get_busy_time(engine); + busy = true; + } + + p->time = ktime_get(); + count = 0; + do { + struct i915_request *rq; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_get(rq); + i915_request_add(rq); + + err = 0; + if (prev && i915_request_wait(prev, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(prev); + prev = rq; + if (err) + break; + + count++; + } while (!__igt_timeout(end_time, NULL)); + i915_request_put(prev); + p->time = ktime_sub(ktime_get(), p->time); + + if (busy) { + p->busy = ktime_sub(intel_engine_get_busy_time(engine), + p->busy); + intel_disable_engine_stats(engine); + } + + err = switch_to_kernel_sync(ce, err); + p->runtime = intel_context_get_total_runtime_ns(ce); + p->count = count; + + intel_context_unpin(ce); + intel_context_put(ce); + return err; +} + +static int p_many(void *arg) +{ + struct perf_stats *p = arg; + struct intel_engine_cs *engine = p->engine; + struct intel_context *ce; + IGT_TIMEOUT(end_time); + unsigned long count; + int err = 0; + bool busy; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + return err; + } + + busy = false; + if (intel_engine_supports_stats(engine) && + !intel_enable_engine_stats(engine)) { + p->busy = intel_engine_get_busy_time(engine); + busy = true; + } + + count = 0; + p->time = ktime_get(); + do { + struct i915_request *rq; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + count++; + } while (!__igt_timeout(end_time, NULL)); + p->time = ktime_sub(ktime_get(), p->time); + + if (busy) { + p->busy = ktime_sub(intel_engine_get_busy_time(engine), + p->busy); + intel_disable_engine_stats(engine); + } + + err = switch_to_kernel_sync(ce, err); + p->runtime = intel_context_get_total_runtime_ns(ce); + p->count = count; + + intel_context_unpin(ce); + intel_context_put(ce); + return err; +} + +static int perf_parallel_engines(void *arg) +{ + struct drm_i915_private *i915 = arg; + static int (* const func[])(void *arg) = { + p_sync0, + p_sync1, + p_many, + NULL, + }; + const unsigned int nengines = num_uabi_engines(i915); + struct intel_engine_cs *engine; + int (* const *fn)(void *arg); + struct pm_qos_request qos; + struct { + struct perf_stats p; + struct task_struct *tsk; + } *engines; + int err = 0; + + engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL); + if (!engines) + return -ENOMEM; + + cpu_latency_qos_add_request(&qos, 0); + + for (fn = func; *fn; fn++) { + char name[KSYM_NAME_LEN]; + struct igt_live_test t; + unsigned int idx; + + snprintf(name, sizeof(name), "%ps", *fn); + err = igt_live_test_begin(&t, i915, __func__, name); + if (err) + break; + + atomic_set(&i915->selftest.counter, nengines); + + idx = 0; + for_each_uabi_engine(engine, i915) { + intel_engine_pm_get(engine); + + memset(&engines[idx].p, 0, sizeof(engines[idx].p)); + engines[idx].p.engine = engine; + + engines[idx].tsk = kthread_run(*fn, &engines[idx].p, + "igt:%s", engine->name); + if (IS_ERR(engines[idx].tsk)) { + err = PTR_ERR(engines[idx].tsk); + intel_engine_pm_put(engine); + break; + } + get_task_struct(engines[idx++].tsk); + } + + yield(); /* start all threads before we kthread_stop() */ + + idx = 0; + for_each_uabi_engine(engine, i915) { + int status; + + if (IS_ERR(engines[idx].tsk)) + break; + + status = kthread_stop(engines[idx].tsk); + if (status && !err) + err = status; + + intel_engine_pm_put(engine); + put_task_struct(engines[idx++].tsk); + } + + if (igt_live_test_end(&t)) + err = -EIO; + if (err) + break; + + idx = 0; + for_each_uabi_engine(engine, i915) { + struct perf_stats *p = &engines[idx].p; + u64 busy = 100 * ktime_to_ns(p->busy); + u64 dt = ktime_to_ns(p->time); + int integer, decimal; + + if (dt) { + integer = div64_u64(busy, dt); + busy -= integer * dt; + decimal = div64_u64(100 * busy, dt); + } else { + integer = 0; + decimal = 0; + } + + GEM_BUG_ON(engine != p->engine); + pr_info("%s %5s: { count:%lu, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n", + name, engine->name, p->count, integer, decimal, + div_u64(p->runtime, 1000 * 1000), + div_u64(ktime_to_ns(p->time), 1000 * 1000)); + idx++; + } + } + + cpu_latency_qos_remove_request(&qos); + kfree(engines); + return err; +} + +int i915_request_perf_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(perf_series_engines), + SUBTEST(perf_parallel_engines), + }; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return i915_subtests(tests, i915); +} From f1cc6acf22dd7b2a3548fa5dd9537b90a44668b5 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Thu, 23 Apr 2020 23:41:59 +0100 Subject: [PATCH 077/121] drm/i915/selftests: Add context batchbuffers registers to live_lrc_fixed Add per ctx bb and indirect ctx bb register locations to live_lrc_fixed for verification. Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200423224159.22078-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 128 ++++++++++++++---------- drivers/gpu/drm/i915/gt/intel_lrc_reg.h | 6 +- drivers/gpu/drm/i915/gt/selftest_lrc.c | 15 +++ 3 files changed, 89 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index dead24aaf45d..090be5981b55 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -238,6 +238,70 @@ __execlists_update_reg_state(const struct intel_context *ce, const struct intel_engine_cs *engine, u32 head); +static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) +{ + if (INTEL_GEN(engine->i915) >= 12) + return 0x60; + else if (INTEL_GEN(engine->i915) >= 9) + return 0x54; + else if (engine->class == RENDER_CLASS) + return 0x58; + else + return -1; +} + +static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine) +{ + if (INTEL_GEN(engine->i915) >= 12) + return 0x12; + else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS) + return 0x18; + else + return -1; +} + +static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine) +{ + int x; + + x = lrc_ring_wa_bb_per_ctx(engine); + if (x < 0) + return x; + + return x + 2; +} + +static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine) +{ + int x; + + x = lrc_ring_indirect_ptr(engine); + if (x < 0) + return x; + + return x + 2; +} + +static u32 +lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine) +{ + switch (INTEL_GEN(engine->i915)) { + default: + MISSING_CASE(INTEL_GEN(engine->i915)); + fallthrough; + case 12: + return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + case 11: + return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + case 10: + return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + case 9: + return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + case 8: + return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + } +} + static u32 intel_context_get_runtime(const struct intel_context *ce) { /* @@ -1102,18 +1166,6 @@ static void intel_engine_context_out(struct intel_engine_cs *engine) write_sequnlock_irqrestore(&engine->stats.lock, flags); } -static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) -{ - if (INTEL_GEN(engine->i915) >= 12) - return 0x60; - else if (INTEL_GEN(engine->i915) >= 9) - return 0x54; - else if (engine->class == RENDER_CLASS) - return 0x58; - else - return -1; -} - static void execlists_check_context(const struct intel_context *ce, const struct intel_engine_cs *engine) @@ -4673,39 +4725,6 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) return 0; } -static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine) -{ - u32 indirect_ctx_offset; - - switch (INTEL_GEN(engine->i915)) { - default: - MISSING_CASE(INTEL_GEN(engine->i915)); - /* fall through */ - case 12: - indirect_ctx_offset = - GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; - break; - case 11: - indirect_ctx_offset = - GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; - break; - case 10: - indirect_ctx_offset = - GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; - break; - case 9: - indirect_ctx_offset = - GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; - break; - case 8: - indirect_ctx_offset = - GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; - break; - } - - return indirect_ctx_offset; -} - static void init_common_reg_state(u32 * const regs, const struct intel_engine_cs *engine, @@ -4728,27 +4747,29 @@ static void init_common_reg_state(u32 * const regs, } static void init_wa_bb_reg_state(u32 * const regs, - const struct intel_engine_cs *engine, - u32 pos_bb_per_ctx) + const struct intel_engine_cs *engine) { const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx; if (wa_ctx->per_ctx.size) { const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); - regs[pos_bb_per_ctx] = + GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1); + regs[lrc_ring_wa_bb_per_ctx(engine) + 1] = (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; } if (wa_ctx->indirect_ctx.size) { const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); - regs[pos_bb_per_ctx + 2] = + GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1); + regs[lrc_ring_indirect_ptr(engine) + 1] = (ggtt_offset + wa_ctx->indirect_ctx.offset) | (wa_ctx->indirect_ctx.size / CACHELINE_BYTES); - regs[pos_bb_per_ctx + 4] = - intel_lr_indirect_ctx_offset(engine) << 6; + GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1); + regs[lrc_ring_indirect_offset(engine) + 1] = + lrc_ring_indirect_offset_default(engine) << 6; } } @@ -4797,10 +4818,7 @@ static void execlists_init_reg_state(u32 *regs, init_common_reg_state(regs, engine, ring, inhibit); init_ppgtt_reg_state(regs, vm_alias(ce->vm)); - init_wa_bb_reg_state(regs, engine, - INTEL_GEN(engine->i915) >= 12 ? - GEN12_CTX_BB_PER_CTX_PTR : - CTX_BB_PER_CTX_PTR); + init_wa_bb_reg_state(regs, engine); __reset_stop_ring(regs, engine); } diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h index d39b72590e40..93cb6c460508 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -9,14 +9,13 @@ #include -/* GEN8 to GEN11 Reg State Context */ +/* GEN8 to GEN12 Reg State Context */ #define CTX_CONTEXT_CONTROL (0x02 + 1) #define CTX_RING_HEAD (0x04 + 1) #define CTX_RING_TAIL (0x06 + 1) #define CTX_RING_START (0x08 + 1) #define CTX_RING_CTL (0x0a + 1) #define CTX_BB_STATE (0x10 + 1) -#define CTX_BB_PER_CTX_PTR (0x18 + 1) #define CTX_TIMESTAMP (0x22 + 1) #define CTX_PDP3_UDW (0x24 + 1) #define CTX_PDP3_LDW (0x26 + 1) @@ -30,9 +29,6 @@ #define GEN9_CTX_RING_MI_MODE 0x54 -/* GEN12+ Reg State Context */ -#define GEN12_CTX_BB_PER_CTX_PTR (0x12 + 1) - #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \ u32 *reg_state__ = (reg_state); \ const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \ diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index fc3f9a248764..ae0a0a692498 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -4593,6 +4593,21 @@ static int live_lrc_fixed(void *arg) CTX_BB_STATE - 1, "BB_STATE" }, + { + i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(engine->mmio_base)), + lrc_ring_wa_bb_per_ctx(engine), + "RING_BB_PER_CTX_PTR" + }, + { + i915_mmio_reg_offset(RING_INDIRECT_CTX(engine->mmio_base)), + lrc_ring_indirect_ptr(engine), + "RING_INDIRECT_CTX_PTR" + }, + { + i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(engine->mmio_base)), + lrc_ring_indirect_offset(engine), + "RING_INDIRECT_CTX_OFFSET" + }, { i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)), CTX_TIMESTAMP - 1, From b4892e44043235368dfc714f4a6530f6793802b0 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Thu, 23 Apr 2020 21:23:52 +0300 Subject: [PATCH 078/121] drm/i915: Make define for lrc state offset More often than not, we need a byte offset into lrc register state from the start of the hw state. Make it so. Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200423182355.21837-3-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_context_sseu.c | 3 +-- drivers/gpu/drm/i915/gt/intel_lrc.c | 8 ++++---- drivers/gpu/drm/i915/gt/intel_lrc.h | 1 + drivers/gpu/drm/i915/gt/selftest_lrc.c | 14 +++++++------- drivers/gpu/drm/i915/i915_perf.c | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c index 57a30956c922..487299cb91f2 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c @@ -25,8 +25,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq, return PTR_ERR(cs); offset = i915_ggtt_offset(ce->state) + - LRC_STATE_PN * PAGE_SIZE + - CTX_R_PWR_CLK_STATE * 4; + LRC_STATE_OFFSET + CTX_R_PWR_CLK_STATE * 4; *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = lower_32_bits(offset); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 090be5981b55..214ea2a34693 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1213,7 +1213,7 @@ static void restore_default_state(struct intel_context *ce, if (engine->pinned_default_state) memcpy(regs, /* skip restoring the vanilla PPHWSP */ - engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, + engine->pinned_default_state + LRC_STATE_OFFSET, engine->context_size - PAGE_SIZE); execlists_init_reg_state(regs, ce, engine, ce->ring, false); @@ -3169,7 +3169,7 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine) static void execlists_context_unpin(struct intel_context *ce) { - check_redzone((void *)ce->lrc_reg_state - LRC_STATE_PN * PAGE_SIZE, + check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET, ce->engine); i915_gem_object_unpin_map(ce->state->obj); @@ -3216,7 +3216,7 @@ __execlists_context_pin(struct intel_context *ce, return PTR_ERR(vaddr); ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; - ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; + ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET; __execlists_update_reg_state(ce, engine, ce->ring->tail); return 0; @@ -4866,7 +4866,7 @@ populate_lr_context(struct intel_context *ce, * The second page of the context object contains some registers which * must be set up prior to the first execution. */ - execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE, + execlists_init_reg_state(vaddr + LRC_STATE_OFFSET, ce, engine, ring, inhibit); ret = 0; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index dfbc214e14f5..91fd8e452d9b 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -90,6 +90,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine); #define LRC_PPHWSP_SZ (1) /* After the PPHWSP we have the logical state for the context */ #define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ) +#define LRC_STATE_OFFSET (LRC_STATE_PN * PAGE_SIZE) /* Space within PPHWSP reserved to be used as scratch */ #define LRC_PPHWSP_SCRATCH 0x34 diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index ae0a0a692498..e964c1402d29 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -4458,7 +4458,7 @@ static int live_lrc_layout(void *arg) err = PTR_ERR(hw); break; } - hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + hw += LRC_STATE_OFFSET / sizeof(*hw); execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE), engine->kernel_context, @@ -4626,7 +4626,7 @@ static int live_lrc_fixed(void *arg) err = PTR_ERR(hw); break; } - hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + hw += LRC_STATE_OFFSET / sizeof(*hw); for (t = tbl; t->name; t++) { int dw = find_offset(hw, t->reg); @@ -5212,7 +5212,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch) x = 0; dw = 0; hw = ce->engine->pinned_default_state; - hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + hw += LRC_STATE_OFFSET / sizeof(*hw); do { u32 len = hw[dw] & 0x7f; @@ -5365,7 +5365,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison) dw = 0; hw = ce->engine->pinned_default_state; - hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + hw += LRC_STATE_OFFSET / sizeof(*hw); do { u32 len = hw[dw] & 0x7f; @@ -5489,12 +5489,12 @@ static int compare_isolation(struct intel_engine_cs *engine, err = PTR_ERR(lrc); goto err_B1; } - lrc += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + lrc += LRC_STATE_OFFSET / sizeof(*hw); x = 0; dw = 0; hw = engine->pinned_default_state; - hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + hw += LRC_STATE_OFFSET / sizeof(*hw); do { u32 len = hw[dw] & 0x7f; @@ -5736,7 +5736,7 @@ static struct i915_request *garbage(struct intel_context *ce, prandom_bytes_state(prng, ce->lrc_reg_state, ce->engine->context_size - - LRC_STATE_PN * PAGE_SIZE); + LRC_STATE_OFFSET); rq = intel_context_create_request(ce); if (IS_ERR(rq)) { diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 5cde3e4e7be6..dec1b33e4da8 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2098,7 +2098,7 @@ gen8_store_flex(struct i915_request *rq, if (IS_ERR(cs)) return PTR_ERR(cs); - offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; + offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET; do { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = offset + flex->offset * sizeof(u32); From 50689771c8f073e97f7758e5b696c64f3044bbd8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Apr 2020 20:05:58 +0100 Subject: [PATCH 079/121] drm/i915: Only close vma we open The history of i915_vma_close() is confusing, as is its use. As the lifetime of the i915_vma is currently bounded by the object it is attached to, we needed a means of identify when a vma was no longer in use by userspace (via the user's fd). This is further complicated by that only ppgtt vma should be closed at the user's behest, as the ggtt were always shared. Now that we attach the vma to a lut on the user's context, the open count does indicate how many unique and open context/vm are referencing this vma from the user. As such, we can and should just use the open_count to track when the vma is still in use by userspace. It's a poor man's replacement for reference counting. Closes: https://gitlab.freedesktop.org/drm/intel/issues/1193 Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200422190558.30509-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 4 +- .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_object.c | 4 +- .../gpu/drm/i915/gem/selftests/huge_pages.c | 83 +++++-------------- .../drm/i915/gem/selftests/i915_gem_context.c | 1 - drivers/gpu/drm/i915/gt/intel_renderstate.c | 4 +- drivers/gpu/drm/i915/gvt/scheduler.c | 5 +- drivers/gpu/drm/i915/i915_vma.c | 28 +++++-- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 32 +++---- 9 files changed, 56 insertions(+), 107 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 11d9135cf21a..900ea8b7fc8f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -130,9 +130,7 @@ static void lut_close(struct i915_gem_context *ctx) if (&lut->obj_link != &obj->lut_list) { i915_lut_handle_free(lut); radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); - if (atomic_dec_and_test(&vma->open_count) && - !i915_vma_is_ggtt(vma)) - i915_vma_close(vma); + i915_vma_close(vma); i915_gem_object_put(obj); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 517898aa634c..964f73f062c1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -830,7 +830,7 @@ static int __eb_add_lut(struct i915_execbuffer *eb, return 0; err: - atomic_dec(&vma->open_count); + i915_vma_close(vma); i915_vma_put(vma); i915_lut_handle_free(lut); return err; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 3f01cdd1a39b..9d1d0131f7c2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -135,9 +135,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) if (vma) { GEM_BUG_ON(vma->obj != obj); GEM_BUG_ON(!atomic_read(&vma->open_count)); - if (atomic_dec_and_test(&vma->open_count) && - !i915_vma_is_ggtt(vma)) - i915_vma_close(vma); + i915_vma_close(vma); } mutex_unlock(&ctx->mutex); diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index d4f94ca9ae0d..c9988b6d5c88 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -421,7 +421,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) err = i915_vma_pin(vma, 0, 0, PIN_USER); if (err) - goto out_close; + goto out_put; err = igt_check_page_sizes(vma); @@ -432,8 +432,6 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) } i915_vma_unpin(vma); - i915_vma_close(vma); - i915_gem_object_put(obj); if (err) @@ -443,8 +441,6 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) goto out_device; -out_close: - i915_vma_close(vma); out_put: i915_gem_object_put(obj); out_device: @@ -492,7 +488,7 @@ static int igt_mock_memory_region_huge_pages(void *arg) err = i915_vma_pin(vma, 0, 0, PIN_USER); if (err) - goto out_close; + goto out_put; err = igt_check_page_sizes(vma); if (err) @@ -515,8 +511,6 @@ static int igt_mock_memory_region_huge_pages(void *arg) } i915_vma_unpin(vma); - i915_vma_close(vma); - __i915_gem_object_put_pages(obj); i915_gem_object_put(obj); } @@ -526,8 +520,6 @@ static int igt_mock_memory_region_huge_pages(void *arg) out_unpin: i915_vma_unpin(vma); -out_close: - i915_vma_close(vma); out_put: i915_gem_object_put(obj); out_region: @@ -587,10 +579,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) } err = i915_vma_pin(vma, 0, 0, flags); - if (err) { - i915_vma_close(vma); + if (err) goto out_unpin; - } err = igt_check_page_sizes(vma); @@ -603,10 +593,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) i915_vma_unpin(vma); - if (err) { - i915_vma_close(vma); + if (err) goto out_unpin; - } /* * Try all the other valid offsets until the next @@ -615,16 +603,12 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) */ for (offset = 4096; offset < page_size; offset += 4096) { err = i915_vma_unbind(vma); - if (err) { - i915_vma_close(vma); + if (err) goto out_unpin; - } err = i915_vma_pin(vma, 0, 0, flags | offset); - if (err) { - i915_vma_close(vma); + if (err) goto out_unpin; - } err = igt_check_page_sizes(vma); @@ -636,10 +620,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) i915_vma_unpin(vma); - if (err) { - i915_vma_close(vma); + if (err) goto out_unpin; - } if (igt_timeout(end_time, "%s timed out at offset %x with page-size %x\n", @@ -647,8 +629,6 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) break; } - i915_vma_close(vma); - i915_gem_object_unpin_pages(obj); __i915_gem_object_put_pages(obj); i915_gem_object_put(obj); @@ -670,12 +650,6 @@ static void close_object_list(struct list_head *objects, struct drm_i915_gem_object *obj, *on; list_for_each_entry_safe(obj, on, objects, st_link) { - struct i915_vma *vma; - - vma = i915_vma_instance(obj, &ppgtt->vm, NULL); - if (!IS_ERR(vma)) - i915_vma_close(vma); - list_del(&obj->st_link); i915_gem_object_unpin_pages(obj); __i915_gem_object_put_pages(obj); @@ -912,7 +886,7 @@ static int igt_mock_ppgtt_64K(void *arg) err = i915_vma_pin(vma, 0, 0, flags); if (err) - goto out_vma_close; + goto out_object_unpin; err = igt_check_page_sizes(vma); if (err) @@ -945,8 +919,6 @@ static int igt_mock_ppgtt_64K(void *arg) } i915_vma_unpin(vma); - i915_vma_close(vma); - i915_gem_object_unpin_pages(obj); __i915_gem_object_put_pages(obj); i915_gem_object_put(obj); @@ -957,8 +929,6 @@ static int igt_mock_ppgtt_64K(void *arg) out_vma_unpin: i915_vma_unpin(vma); -out_vma_close: - i915_vma_close(vma); out_object_unpin: i915_gem_object_unpin_pages(obj); out_object_put: @@ -1070,7 +1040,7 @@ static int __igt_write_huge(struct intel_context *ce, err = i915_vma_unbind(vma); if (err) - goto out_vma_close; + return err; err = i915_vma_pin(vma, size, 0, flags | offset); if (err) { @@ -1081,7 +1051,7 @@ static int __igt_write_huge(struct intel_context *ce, if (err == -ENOSPC && i915_is_ggtt(ce->vm)) err = 0; - goto out_vma_close; + return err; } err = igt_check_page_sizes(vma); @@ -1102,8 +1072,6 @@ static int __igt_write_huge(struct intel_context *ce, out_vma_unpin: i915_vma_unpin(vma); -out_vma_close: - __i915_vma_put(vma); return err; } @@ -1490,7 +1458,7 @@ static int igt_ppgtt_pin_update(void *arg) err = i915_vma_pin(vma, SZ_2M, 0, flags); if (err) - goto out_close; + goto out_put; if (vma->page_sizes.sg < page_size) { pr_info("Unable to allocate page-size %x, finishing test early\n", @@ -1527,8 +1495,6 @@ static int igt_ppgtt_pin_update(void *arg) goto out_unpin; i915_vma_unpin(vma); - i915_vma_close(vma); - i915_gem_object_put(obj); } @@ -1546,7 +1512,7 @@ static int igt_ppgtt_pin_update(void *arg) err = i915_vma_pin(vma, 0, 0, flags); if (err) - goto out_close; + goto out_put; /* * Make sure we don't end up with something like where the pde is still @@ -1576,8 +1542,6 @@ static int igt_ppgtt_pin_update(void *arg) out_unpin: i915_vma_unpin(vma); -out_close: - i915_vma_close(vma); out_put: i915_gem_object_put(obj); out_vm: @@ -1629,13 +1593,11 @@ static int igt_tmpfs_fallback(void *arg) err = i915_vma_pin(vma, 0, 0, PIN_USER); if (err) - goto out_close; + goto out_put; err = igt_check_page_sizes(vma); i915_vma_unpin(vma); -out_close: - i915_vma_close(vma); out_put: i915_gem_object_put(obj); out_restore: @@ -1682,7 +1644,7 @@ static int igt_shrink_thp(void *arg) err = i915_vma_pin(vma, 0, 0, flags); if (err) - goto out_close; + goto out_put; if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { pr_info("failed to allocate THP, finishing test early\n"); @@ -1706,7 +1668,7 @@ static int igt_shrink_thp(void *arg) i915_gem_context_unlock_engines(ctx); i915_vma_unpin(vma); if (err) - goto out_close; + goto out_put; /* * Now that the pages are *unpinned* shrink-all should invoke @@ -1716,18 +1678,18 @@ static int igt_shrink_thp(void *arg) if (i915_gem_object_has_pages(obj)) { pr_err("shrink-all didn't truncate the pages\n"); err = -EINVAL; - goto out_close; + goto out_put; } if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) { pr_err("residual page-size bits left\n"); err = -EINVAL; - goto out_close; + goto out_put; } err = i915_vma_pin(vma, 0, 0, flags); if (err) - goto out_close; + goto out_put; while (n--) { err = cpu_check(obj, n, 0xdeadbeaf); @@ -1737,8 +1699,6 @@ static int igt_shrink_thp(void *arg) out_unpin: i915_vma_unpin(vma); -out_close: - i915_vma_close(vma); out_put: i915_gem_object_put(obj); out_vm: @@ -1777,21 +1737,20 @@ int i915_gem_huge_page_mock_selftests(void) if (!i915_vm_is_4lvl(&ppgtt->vm)) { pr_err("failed to create 48b PPGTT\n"); err = -EINVAL; - goto out_close; + goto out_put; } /* If we were ever hit this then it's time to mock the 64K scratch */ if (!i915_vm_has_scratch_64K(&ppgtt->vm)) { pr_err("PPGTT missing 64K scratch page\n"); err = -EINVAL; - goto out_close; + goto out_put; } err = i915_subtests(tests, ppgtt); -out_close: +out_put: i915_vm_put(&ppgtt->vm); - out_unlock: drm_dev_put(&dev_priv->drm); return err; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index f4f933240b39..87d264fe54b2 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1687,7 +1687,6 @@ static int read_from_scratch(struct i915_gem_context *ctx, goto skip_request; i915_vma_unpin(vma); - i915_vma_close(vma); i915_request_add(rq); diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index 26e78db33675..708cb7808865 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -194,7 +194,7 @@ int intel_renderstate_init(struct intel_renderstate *so, err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) - goto err_vma; + goto err_obj; err = render_state_setup(so, engine->i915); if (err) @@ -204,8 +204,6 @@ int intel_renderstate_init(struct intel_renderstate *so, err_unpin: i915_vma_unpin(so->vma); -err_vma: - i915_vma_close(so->vma); err_obj: i915_gem_object_put(obj); so->vma = NULL; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index cb11c3184085..2f5c59111821 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -595,10 +595,9 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) if (bb->va && !IS_ERR(bb->va)) i915_gem_object_unpin_map(bb->obj); - if (bb->vma && !IS_ERR(bb->vma)) { + if (bb->vma && !IS_ERR(bb->vma)) i915_vma_unpin(bb->vma); - i915_vma_close(bb->vma); - } + i915_gem_object_put(bb->obj); } list_del(&bb->list); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 20fe5a134d92..fc14ebf9a0b7 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -522,7 +522,6 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) GEM_BUG_ON(!obj); i915_vma_unpin(vma); - i915_vma_close(vma); if (flags & I915_VMA_RELEASE_MAP) i915_gem_object_unpin_map(obj); @@ -1023,13 +1022,8 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags) } while (1); } -void i915_vma_close(struct i915_vma *vma) +static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) { - struct intel_gt *gt = vma->vm->gt; - unsigned long flags; - - GEM_BUG_ON(i915_vma_is_closed(vma)); - /* * We defer actually closing, unbinding and destroying the VMA until * the next idle point, or if the object is freed in the meantime. By @@ -1042,9 +1036,25 @@ void i915_vma_close(struct i915_vma *vma) * causing us to rebind the VMA once more. This ends up being a lot * of wasted work for the steady state. */ - spin_lock_irqsave(>->closed_lock, flags); + GEM_BUG_ON(i915_vma_is_closed(vma)); list_add(&vma->closed_link, >->closed_vma); - spin_unlock_irqrestore(>->closed_lock, flags); +} + +void i915_vma_close(struct i915_vma *vma) +{ + struct intel_gt *gt = vma->vm->gt; + unsigned long flags; + + if (i915_vma_is_ggtt(vma)) + return; + + GEM_BUG_ON(!atomic_read(&vma->open_count)); + if (atomic_dec_and_lock_irqsave(&vma->open_count, + >->closed_lock, + flags)) { + __vma_close(vma, gt); + spin_unlock_irqrestore(>->closed_lock, flags); + } } static void __i915_vma_remove_closed(struct i915_vma *vma) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 5d2a02fcf595..2e471500a646 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -331,9 +331,6 @@ static void close_object_list(struct list_head *objects, vma = i915_vma_instance(obj, vm, NULL); if (!IS_ERR(vma)) ignored = i915_vma_unbind(vma); - /* Only ppgtt vma may be closed before the object is freed */ - if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma)) - i915_vma_close(vma); list_del(&obj->st_link); i915_gem_object_put(obj); @@ -591,7 +588,7 @@ static int walk_hole(struct i915_address_space *vm, pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n", __func__, addr, vma->size, hole_start, hole_end, err); - goto err_close; + goto err_put; } i915_vma_unpin(vma); @@ -600,14 +597,14 @@ static int walk_hole(struct i915_address_space *vm, pr_err("%s incorrect at %llx + %llx\n", __func__, addr, vma->size); err = -EINVAL; - goto err_close; + goto err_put; } err = i915_vma_unbind(vma); if (err) { pr_err("%s unbind failed at %llx + %llx with err=%d\n", __func__, addr, vma->size, err); - goto err_close; + goto err_put; } GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); @@ -616,13 +613,10 @@ static int walk_hole(struct i915_address_space *vm, "%s timed out at %llx\n", __func__, addr)) { err = -EINTR; - goto err_close; + goto err_put; } } -err_close: - if (!i915_vma_is_ggtt(vma)) - i915_vma_close(vma); err_put: i915_gem_object_put(obj); if (err) @@ -675,7 +669,7 @@ static int pot_hole(struct i915_address_space *vm, addr, hole_start, hole_end, err); - goto err; + goto err_obj; } if (!drm_mm_node_allocated(&vma->node) || @@ -685,7 +679,7 @@ static int pot_hole(struct i915_address_space *vm, i915_vma_unpin(vma); err = i915_vma_unbind(vma); err = -EINVAL; - goto err; + goto err_obj; } i915_vma_unpin(vma); @@ -697,13 +691,10 @@ static int pot_hole(struct i915_address_space *vm, "%s timed out after %d/%d\n", __func__, pot, fls64(hole_end - 1) - 1)) { err = -EINTR; - goto err; + goto err_obj; } } -err: - if (!i915_vma_is_ggtt(vma)) - i915_vma_close(vma); err_obj: i915_gem_object_put(obj); return err; @@ -778,7 +769,7 @@ static int drunk_hole(struct i915_address_space *vm, addr, BIT_ULL(size), hole_start, hole_end, err); - goto err; + goto err_obj; } if (!drm_mm_node_allocated(&vma->node) || @@ -788,7 +779,7 @@ static int drunk_hole(struct i915_address_space *vm, i915_vma_unpin(vma); err = i915_vma_unbind(vma); err = -EINVAL; - goto err; + goto err_obj; } i915_vma_unpin(vma); @@ -799,13 +790,10 @@ static int drunk_hole(struct i915_address_space *vm, "%s timed out after %d/%d\n", __func__, n, count)) { err = -EINTR; - goto err; + goto err_obj; } } -err: - if (!i915_vma_is_ggtt(vma)) - i915_vma_close(vma); err_obj: i915_gem_object_put(obj); kfree(order); From c5a01ec7579a264ad332be8fa89f446efa2aaa58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 22 Apr 2020 19:19:12 +0300 Subject: [PATCH 080/121] drm/i915: Fix skl+ non-scaled pfit modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix skl_update_scaler_crtc() to deal with different scaling modes correctly. The current implementation assumes DRM_MODE_SCALE_FULLSCREEN. Fortunately we don't expose any border properties currently so the code does actually end up doing the right thing (assigning a scaler for pfit). The code does need to be fixed before any borders are exposed. Also we have redundant calls to skl_update_scaler_crtc() in dp/hdmi .compute_config() which can be nuked. They were anyway called before we had even computed the pfit state so were basically nonsense. The real call we need to keep is in intel_crtc_atomic_check(). v2: Deal witrh skl_update_scaler_crtc() in intel_dp_ycbcr420_config() Reviewed-by: Manasi Navare Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200422161917.17389-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 40 ++++++++++---------- drivers/gpu/drm/i915/display/intel_display.h | 1 - drivers/gpu/drm/i915/display/intel_dp.c | 17 --------- drivers/gpu/drm/i915/display/intel_hdmi.c | 7 ---- 4 files changed, 19 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index adb08a00bb57..375f3ff1642f 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6089,30 +6089,28 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, return 0; } -/** - * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. - * - * @state: crtc's scaler state - * - * Return - * 0 - scaler_usage updated successfully - * error - requested scaling cannot be supported or other error condition - */ -int skl_update_scaler_crtc(struct intel_crtc_state *state) +static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) { - const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode; - bool need_scaler = false; + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + int width, height; - if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || - state->pch_pfit.enabled) - need_scaler = true; + if (crtc_state->pch_pfit.enabled) { + u32 pfit_size = crtc_state->pch_pfit.size; - return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX, - &state->scaler_state.scaler_id, - state->pipe_src_w, state->pipe_src_h, - adjusted_mode->crtc_hdisplay, - adjusted_mode->crtc_vdisplay, NULL, 0, - need_scaler); + width = pfit_size >> 16; + height = pfit_size & 0xffff; + } else { + width = adjusted_mode->crtc_hdisplay; + height = adjusted_mode->crtc_vdisplay; + } + + return skl_update_scaler(crtc_state, !crtc_state->hw.active, + SKL_CRTC_INDEX, + &crtc_state->scaler_state.scaler_id, + crtc_state->pipe_src_w, crtc_state->pipe_src_h, + width, height, NULL, 0, + crtc_state->pch_pfit.enabled); } /** diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 8d872ed0de36..efb4da205ea2 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -591,7 +591,6 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center); -int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state); void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state); u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 66f8a9d1503d..9312be686413 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2342,12 +2342,10 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp, struct drm_connector *connector, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); const struct drm_display_info *info = &connector->display_info; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - int ret; if (!drm_mode_is_420_only(info, adjusted_mode) || !intel_dp_get_colorimetry_status(intel_dp) || @@ -2356,14 +2354,6 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp, crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - /* YCBCR 420 output conversion needs a scaler */ - ret = skl_update_scaler_crtc(crtc_state); - if (ret) { - drm_dbg_kms(&i915->drm, - "Scaler allocation for output failed\n"); - return ret; - } - intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN); return 0; @@ -2563,7 +2553,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, else ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base, pipe_config); - if (ret) return ret; @@ -2579,12 +2568,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_fixed_panel_mode(intel_connector->panel.fixed_mode, adjusted_mode); - if (INTEL_GEN(dev_priv) >= 9) { - ret = skl_update_scaler_crtc(pipe_config); - if (ret) - return ret; - } - if (HAS_GMCH(dev_priv)) intel_gmch_panel_fitting(intel_crtc, pipe_config, conn_state->scaling_mode); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index cdf9dedca36b..000ac0fc4edc 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2336,13 +2336,6 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - /* YCBCR 420 output conversion needs a scaler */ - if (skl_update_scaler_crtc(config)) { - drm_dbg_kms(&i915->drm, - "Scaler allocation for output failed\n"); - return false; - } - intel_pch_panel_fitting(intel_crtc, config, DRM_MODE_SCALE_FULLSCREEN); From eac9c58539aa2db9e1173964025760fdea905656 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 22 Apr 2020 19:19:13 +0300 Subject: [PATCH 081/121] drm/i915: Flatten a bunch of the pfit functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Most of the pfit functions are of the form: func() { if (pfit_enabled) { ... } } Flip the pfit_enabled check around to flatten the functions. And while we're touching all this let's do the usual s/pipe_config/crtc_state/ replacement. Reviewed-by: Manasi Navare Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200422161917.17389-2-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 235 +++++++++---------- 1 file changed, 116 insertions(+), 119 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 375f3ff1642f..96d0768ecf5d 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6222,43 +6222,43 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) enum pipe pipe = crtc->pipe; const struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; + u16 uv_rgb_hphase, uv_rgb_vphase; + int pfit_w, pfit_h, hscale, vscale; + unsigned long irqflags; + int id; - if (crtc_state->pch_pfit.enabled) { - u16 uv_rgb_hphase, uv_rgb_vphase; - int pfit_w, pfit_h, hscale, vscale; - unsigned long irqflags; - int id; + if (!crtc_state->pch_pfit.enabled) + return; - if (drm_WARN_ON(&dev_priv->drm, - crtc_state->scaler_state.scaler_id < 0)) - return; + if (drm_WARN_ON(&dev_priv->drm, + crtc_state->scaler_state.scaler_id < 0)) + return; - pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF; - pfit_h = crtc_state->pch_pfit.size & 0xFFFF; + pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF; + pfit_h = crtc_state->pch_pfit.size & 0xFFFF; - hscale = (crtc_state->pipe_src_w << 16) / pfit_w; - vscale = (crtc_state->pipe_src_h << 16) / pfit_h; + hscale = (crtc_state->pipe_src_w << 16) / pfit_w; + vscale = (crtc_state->pipe_src_h << 16) / pfit_h; - uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); - uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); + uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); + uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); - id = scaler_state->scaler_id; + id = scaler_state->scaler_id; - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN | - PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); - intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id), - PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); - intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), - PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); - intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id), - crtc_state->pch_pfit.pos); - intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id), - crtc_state->pch_pfit.size); + intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN | + PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); + intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id), + PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); + intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), + PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); + intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id), + crtc_state->pch_pfit.pos); + intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id), + crtc_state->pch_pfit.size); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); - } + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) @@ -6267,22 +6267,23 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - if (crtc_state->pch_pfit.enabled) { - /* Force use of hard-coded filter coefficients - * as some pre-programmed values are broken, - * e.g. x201. - */ - if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) - intel_de_write(dev_priv, PF_CTL(pipe), - PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); - else - intel_de_write(dev_priv, PF_CTL(pipe), - PF_ENABLE | PF_FILTER_MED_3x3); - intel_de_write(dev_priv, PF_WIN_POS(pipe), - crtc_state->pch_pfit.pos); - intel_de_write(dev_priv, PF_WIN_SZ(pipe), - crtc_state->pch_pfit.size); - } + if (!crtc_state->pch_pfit.enabled) + return; + + /* Force use of hard-coded filter coefficients + * as some pre-programmed values are broken, + * e.g. x201. + */ + if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) + intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | + PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); + else + intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | + PF_FILTER_MED_3x3); + intel_de_write(dev_priv, PF_WIN_POS(pipe), + crtc_state->pch_pfit.pos); + intel_de_write(dev_priv, PF_WIN_SZ(pipe), + crtc_state->pch_pfit.size); } void hsw_enable_ips(const struct intel_crtc_state *crtc_state) @@ -7099,11 +7100,12 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) /* To avoid upsetting the power well on haswell only disable the pfit if * it's in use. The hw state code will make sure we get this right. */ - if (old_crtc_state->pch_pfit.enabled) { - intel_de_write(dev_priv, PF_CTL(pipe), 0); - intel_de_write(dev_priv, PF_WIN_POS(pipe), 0); - intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0); - } + if (!old_crtc_state->pch_pfit.enabled) + return; + + intel_de_write(dev_priv, PF_CTL(pipe), 0); + intel_de_write(dev_priv, PF_WIN_POS(pipe), 0); + intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0); } static void ilk_crtc_disable(struct intel_atomic_state *state, @@ -7931,40 +7933,36 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); } -static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) +static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) { - u32 pixel_rate; - - pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock; + u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock; + u32 pfit_size = crtc_state->pch_pfit.size; + u64 pipe_w, pipe_h, pfit_w, pfit_h; /* * We only use IF-ID interlacing. If we ever use * PF-ID we'll need to adjust the pixel_rate here. */ - if (pipe_config->pch_pfit.enabled) { - u64 pipe_w, pipe_h, pfit_w, pfit_h; - u32 pfit_size = pipe_config->pch_pfit.size; + if (!crtc_state->pch_pfit.enabled) + return pixel_rate; - pipe_w = pipe_config->pipe_src_w; - pipe_h = pipe_config->pipe_src_h; + pipe_w = crtc_state->pipe_src_w; + pipe_h = crtc_state->pipe_src_h; - pfit_w = (pfit_size >> 16) & 0xFFFF; - pfit_h = pfit_size & 0xFFFF; - if (pipe_w < pfit_w) - pipe_w = pfit_w; - if (pipe_h < pfit_h) - pipe_h = pfit_h; + pfit_w = (pfit_size >> 16) & 0xFFFF; + pfit_h = pfit_size & 0xFFFF; + if (pipe_w < pfit_w) + pipe_w = pfit_w; + if (pipe_h < pfit_h) + pipe_h = pfit_h; - if (drm_WARN_ON(pipe_config->uapi.crtc->dev, - !pfit_w || !pfit_h)) - return pixel_rate; + if (drm_WARN_ON(crtc_state->uapi.crtc->dev, + !pfit_w || !pfit_h)) + return pixel_rate; - pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), - pfit_w * pfit_h); - } - - return pixel_rate; + return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), + pfit_w * pfit_h); } static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) @@ -9158,9 +9156,9 @@ static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); } -static void i9xx_get_pfit_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 tmp; @@ -9180,9 +9178,9 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, return; } - pipe_config->gmch_pfit.control = tmp; - pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv, - PFIT_PGM_RATIOS); + crtc_state->gmch_pfit.control = tmp; + crtc_state->gmch_pfit.pgm_ratios = + intel_de_read(dev_priv, PFIT_PGM_RATIOS); } static void vlv_crtc_clock_get(struct intel_crtc *crtc, @@ -9432,7 +9430,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, intel_get_pipe_timings(crtc, pipe_config); intel_get_pipe_src_size(crtc, pipe_config); - i9xx_get_pfit_config(crtc, pipe_config); + i9xx_get_pfit_config(pipe_config); if (INTEL_GEN(dev_priv) >= 4) { /* No way to read it out on pipes B and C */ @@ -10402,37 +10400,37 @@ static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, &pipe_config->fdi_m_n, NULL); } -static void skl_get_pfit_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void skl_get_pfit_config(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; - u32 ps_ctrl = 0; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; int id = -1; int i; /* find scaler attached to this pipe */ for (i = 0; i < crtc->num_scalers; i++) { - ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); - if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { - id = i; - pipe_config->pch_pfit.enabled = true; - pipe_config->pch_pfit.pos = intel_de_read(dev_priv, - SKL_PS_WIN_POS(crtc->pipe, i)); - pipe_config->pch_pfit.size = intel_de_read(dev_priv, - SKL_PS_WIN_SZ(crtc->pipe, i)); - scaler_state->scalers[i].in_use = true; - break; - } + u32 tmp; + + tmp = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); + if ((tmp & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN) + continue; + + id = i; + crtc_state->pch_pfit.enabled = true; + crtc_state->pch_pfit.pos = + intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i)); + crtc_state->pch_pfit.size = + intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i)); + scaler_state->scalers[i].in_use = true; + break; } scaler_state->scaler_id = id; - if (id >= 0) { + if (id >= 0) scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); - } else { + else scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); - } } static void @@ -10568,30 +10566,29 @@ error: kfree(intel_fb); } -static void ilk_get_pfit_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 tmp; tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); + if ((tmp & PF_ENABLE) == 0) + return; - if (tmp & PF_ENABLE) { - pipe_config->pch_pfit.enabled = true; - pipe_config->pch_pfit.pos = intel_de_read(dev_priv, - PF_WIN_POS(crtc->pipe)); - pipe_config->pch_pfit.size = intel_de_read(dev_priv, - PF_WIN_SZ(crtc->pipe)); + crtc_state->pch_pfit.enabled = true; + crtc_state->pch_pfit.pos = + intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); + crtc_state->pch_pfit.size = + intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); - /* We currently do not free assignements of panel fitters on - * ivb/hsw (since we don't use the higher upscaling modes which - * differentiates them) so just WARN about this case for now. */ - if (IS_GEN(dev_priv, 7)) { - drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) != - PF_PIPE_SEL_IVB(crtc->pipe)); - } - } + /* + * We currently do not free assignements of panel fitters on + * ivb/hsw (since we don't use the higher upscaling modes which + * differentiates them) so just WARN about this case for now. + */ + drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) && + (tmp & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe)); } static bool ilk_get_pipe_config(struct intel_crtc *crtc, @@ -10702,7 +10699,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, intel_get_pipe_timings(crtc, pipe_config); intel_get_pipe_src_size(crtc, pipe_config); - ilk_get_pfit_config(crtc, pipe_config); + ilk_get_pfit_config(pipe_config); ret = true; @@ -11176,9 +11173,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, power_domain_mask |= BIT_ULL(power_domain); if (INTEL_GEN(dev_priv) >= 9) - skl_get_pfit_config(crtc, pipe_config); + skl_get_pfit_config(pipe_config); else - ilk_get_pfit_config(crtc, pipe_config); + ilk_get_pfit_config(pipe_config); } if (hsw_crtc_supports_ips(crtc)) { From 35dd95b4ee19787d4dd321ea88dd72c9b87d9425 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 22 Apr 2020 19:19:14 +0300 Subject: [PATCH 082/121] drm/i915: Use drm_rect to store the pfit window pos/size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make things a bit more abstract by replacing the pch_pfit.pos/size raw register values with a drm_rect. Makes it slighly more convenient to eg. compute the scaling factors. v2: Use drm_rect_init() Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200422161917.17389-3-ville.syrjala@linux.intel.com Reviewed-by: Manasi Navare --- drivers/gpu/drm/i915/display/intel_display.c | 101 +++++++++++------- .../drm/i915/display/intel_display_types.h | 3 +- drivers/gpu/drm/i915/display/intel_panel.c | 13 ++- 3 files changed, 67 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 96d0768ecf5d..6bb87965801e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6096,10 +6096,8 @@ static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) int width, height; if (crtc_state->pch_pfit.enabled) { - u32 pfit_size = crtc_state->pch_pfit.size; - - width = pfit_size >> 16; - height = pfit_size & 0xffff; + width = drm_rect_width(&crtc_state->pch_pfit.dst); + height = drm_rect_height(&crtc_state->pch_pfit.dst); } else { width = adjusted_mode->crtc_hdisplay; height = adjusted_mode->crtc_vdisplay; @@ -6219,11 +6217,20 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; const struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; + struct drm_rect src = { + .x2 = crtc_state->pipe_src_w << 16, + .y2 = crtc_state->pipe_src_h << 16, + }; + const struct drm_rect *dst = &crtc_state->pch_pfit.dst; u16 uv_rgb_hphase, uv_rgb_vphase; - int pfit_w, pfit_h, hscale, vscale; + enum pipe pipe = crtc->pipe; + int width = drm_rect_width(dst); + int height = drm_rect_height(dst); + int x = dst->x1; + int y = dst->y1; + int hscale, vscale; unsigned long irqflags; int id; @@ -6234,11 +6241,8 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) crtc_state->scaler_state.scaler_id < 0)) return; - pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF; - pfit_h = crtc_state->pch_pfit.size & 0xFFFF; - - hscale = (crtc_state->pipe_src_w << 16) / pfit_w; - vscale = (crtc_state->pipe_src_h << 16) / pfit_h; + hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX); + vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX); uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); @@ -6254,9 +6258,9 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id), - crtc_state->pch_pfit.pos); + x << 16 | y); intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id), - crtc_state->pch_pfit.size); + width << 16 | height); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -6265,7 +6269,12 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_rect *dst = &crtc_state->pch_pfit.dst; enum pipe pipe = crtc->pipe; + int width = drm_rect_width(dst); + int height = drm_rect_height(dst); + int x = dst->x1; + int y = dst->y1; if (!crtc_state->pch_pfit.enabled) return; @@ -6280,10 +6289,8 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) else intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); - intel_de_write(dev_priv, PF_WIN_POS(pipe), - crtc_state->pch_pfit.pos); - intel_de_write(dev_priv, PF_WIN_SZ(pipe), - crtc_state->pch_pfit.size); + intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y); + intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height); } void hsw_enable_ips(const struct intel_crtc_state *crtc_state) @@ -7936,8 +7943,7 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) { u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock; - u32 pfit_size = crtc_state->pch_pfit.size; - u64 pipe_w, pipe_h, pfit_w, pfit_h; + unsigned int pipe_w, pipe_h, pfit_w, pfit_h; /* * We only use IF-ID interlacing. If we ever use @@ -7950,8 +7956,9 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) pipe_w = crtc_state->pipe_src_w; pipe_h = crtc_state->pipe_src_h; - pfit_w = (pfit_size >> 16) & 0xFFFF; - pfit_h = pfit_size & 0xFFFF; + pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst); + pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst); + if (pipe_w < pfit_w) pipe_w = pfit_w; if (pipe_h < pfit_h) @@ -10400,6 +10407,14 @@ static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, &pipe_config->fdi_m_n, NULL); } +static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state, + u32 pos, u32 size) +{ + drm_rect_init(&crtc_state->pch_pfit.dst, + pos >> 16, pos & 0xffff, + size >> 16, size & 0xffff); +} + static void skl_get_pfit_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -10410,18 +10425,20 @@ static void skl_get_pfit_config(struct intel_crtc_state *crtc_state) /* find scaler attached to this pipe */ for (i = 0; i < crtc->num_scalers; i++) { - u32 tmp; + u32 ctl, pos, size; - tmp = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); - if ((tmp & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN) + ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); + if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN) continue; id = i; crtc_state->pch_pfit.enabled = true; - crtc_state->pch_pfit.pos = - intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i)); - crtc_state->pch_pfit.size = - intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i)); + + pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i)); + size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i)); + + ilk_get_pfit_pos_size(crtc_state, pos, size); + scaler_state->scalers[i].in_use = true; break; } @@ -10570,17 +10587,18 @@ static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 tmp; + u32 ctl, pos, size; - tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); - if ((tmp & PF_ENABLE) == 0) + ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); + if ((ctl & PF_ENABLE) == 0) return; crtc_state->pch_pfit.enabled = true; - crtc_state->pch_pfit.pos = - intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); - crtc_state->pch_pfit.size = - intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); + + pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); + size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); + + ilk_get_pfit_pos_size(crtc_state, pos, size); /* * We currently do not free assignements of panel fitters on @@ -10588,7 +10606,7 @@ static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) * differentiates them) so just WARN about this case for now. */ drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) && - (tmp & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe)); + (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe)); } static bool ilk_get_pipe_config(struct intel_crtc *crtc, @@ -13036,9 +13054,8 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, pipe_config->gmch_pfit.lvds_border_bits); else drm_dbg_kms(&dev_priv->drm, - "pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n", - pipe_config->pch_pfit.pos, - pipe_config->pch_pfit.size, + "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n", + DRM_RECT_ARG(&pipe_config->pch_pfit.dst), enableddisabled(pipe_config->pch_pfit.enabled), yesno(pipe_config->pch_pfit.force_thru)); @@ -13780,8 +13797,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); if (current_config->pch_pfit.enabled) { - PIPE_CONF_CHECK_X(pch_pfit.pos); - PIPE_CONF_CHECK_X(pch_pfit.size); + PIPE_CONF_CHECK_I(pch_pfit.dst.x1); + PIPE_CONF_CHECK_I(pch_pfit.dst.y1); + PIPE_CONF_CHECK_I(pch_pfit.dst.x2); + PIPE_CONF_CHECK_I(pch_pfit.dst.y2); } PIPE_CONF_CHECK_I(scaler_state.scaler_id); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 8752f4d6ea9b..b37129ca0518 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -974,8 +974,7 @@ struct intel_crtc_state { /* Panel fitter placement and size for Ironlake+ */ struct { - u32 pos; - u32 size; + struct drm_rect dst; bool enabled; bool force_thru; } pch_pfit; diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index bcd2cc1aba90..3e370888ecef 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -182,13 +182,13 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc, int fitting_mode) { const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - int x = 0, y = 0, width = 0, height = 0; + int x, y, width, height; /* Native modes don't need fitting */ if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w && adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h && pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) - goto done; + return; switch (fitting_mode) { case DRM_MODE_SCALE_CENTER: @@ -234,14 +234,13 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc, break; default: - WARN(1, "bad panel fit mode: %d\n", fitting_mode); + MISSING_CASE(fitting_mode); return; } -done: - pipe_config->pch_pfit.pos = (x << 16) | y; - pipe_config->pch_pfit.size = (width << 16) | height; - pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0; + drm_rect_init(&pipe_config->pch_pfit.dst, + x, y, width, height); + pipe_config->pch_pfit.enabled = true; } static void From f650af72e5ba499755eaa729eb9f1bc802ecf2f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 22 Apr 2020 19:19:15 +0300 Subject: [PATCH 083/121] drm/i915: s/pipe_config/crtc_state/ in pfit functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow the new naming convention and call the crtc state "crtc_state", and while at it drop the redundant crtc argument. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200422161917.17389-4-ville.syrjala@linux.intel.com Reviewed-by: Manasi Navare --- drivers/gpu/drm/i915/display/icl_dsi.c | 3 +- drivers/gpu/drm/i915/display/intel_dp.c | 8 +- drivers/gpu/drm/i915/display/intel_hdmi.c | 4 +- drivers/gpu/drm/i915/display/intel_lvds.c | 4 +- drivers/gpu/drm/i915/display/intel_panel.c | 93 +++++++++++----------- drivers/gpu/drm/i915/display/intel_panel.h | 6 +- drivers/gpu/drm/i915/display/vlv_dsi.c | 5 +- 7 files changed, 58 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 73ebd387f549..fb9291de55d0 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1525,7 +1525,6 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; - struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; struct drm_display_mode *adjusted_mode = @@ -1533,7 +1532,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; intel_fixed_panel_mode(fixed_mode, adjusted_mode); - intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode); + intel_pch_panel_fitting(pipe_config, conn_state->scaling_mode); adjusted_mode->flags = 0; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 9312be686413..b9a760012b3f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2345,7 +2345,6 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp, const struct drm_display_info *info = &connector->display_info; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (!drm_mode_is_420_only(info, adjusted_mode) || !intel_dp_get_colorimetry_status(intel_dp) || @@ -2354,7 +2353,7 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp, crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN); + intel_pch_panel_fitting(crtc_state, DRM_MODE_SCALE_FULLSCREEN); return 0; } @@ -2535,7 +2534,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); enum port port = encoder->port; - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_connector *intel_connector = intel_dp->attached_connector; struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); @@ -2569,10 +2567,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, adjusted_mode); if (HAS_GMCH(dev_priv)) - intel_gmch_panel_fitting(intel_crtc, pipe_config, + intel_gmch_panel_fitting(pipe_config, conn_state->scaling_mode); else - intel_pch_panel_fitting(intel_crtc, pipe_config, + intel_pch_panel_fitting(pipe_config, conn_state->scaling_mode); } diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 000ac0fc4edc..275a676c7b08 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2326,7 +2326,6 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, struct intel_crtc_state *config) { struct drm_i915_private *i915 = to_i915(connector->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc); if (!connector->ycbcr_420_allowed) { drm_err(&i915->drm, @@ -2336,8 +2335,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - intel_pch_panel_fitting(intel_crtc, config, - DRM_MODE_SCALE_FULLSCREEN); + intel_pch_panel_fitting(config, DRM_MODE_SCALE_FULLSCREEN); return true; } diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index fe591f82163e..276db41b95ef 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -439,10 +439,10 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, if (HAS_PCH_SPLIT(dev_priv)) { pipe_config->has_pch_encoder = true; - intel_pch_panel_fitting(intel_crtc, pipe_config, + intel_pch_panel_fitting(pipe_config, conn_state->scaling_mode); } else { - intel_gmch_panel_fitting(intel_crtc, pipe_config, + intel_gmch_panel_fitting(pipe_config, conn_state->scaling_mode); } diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 3e370888ecef..3ea1704277a8 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -177,23 +177,23 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector) /* adjusted_mode has been preset to be the panel's fixed mode */ void -intel_pch_panel_fitting(struct intel_crtc *intel_crtc, - struct intel_crtc_state *pipe_config, +intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, int fitting_mode) { - const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; int x, y, width, height; /* Native modes don't need fitting */ - if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w && - adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h && - pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) + if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w && + adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) return; switch (fitting_mode) { case DRM_MODE_SCALE_CENTER: - width = pipe_config->pipe_src_w; - height = pipe_config->pipe_src_h; + width = crtc_state->pipe_src_w; + height = crtc_state->pipe_src_h; x = (adjusted_mode->crtc_hdisplay - width + 1)/2; y = (adjusted_mode->crtc_vdisplay - height + 1)/2; break; @@ -202,18 +202,18 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc, /* Scale but preserve the aspect ratio */ { u32 scaled_width = adjusted_mode->crtc_hdisplay - * pipe_config->pipe_src_h; - u32 scaled_height = pipe_config->pipe_src_w + * crtc_state->pipe_src_h; + u32 scaled_height = crtc_state->pipe_src_w * adjusted_mode->crtc_vdisplay; if (scaled_width > scaled_height) { /* pillar */ - width = scaled_height / pipe_config->pipe_src_h; + width = scaled_height / crtc_state->pipe_src_h; if (width & 1) width++; x = (adjusted_mode->crtc_hdisplay - width + 1) / 2; y = 0; height = adjusted_mode->crtc_vdisplay; } else if (scaled_width < scaled_height) { /* letter */ - height = scaled_width / pipe_config->pipe_src_w; + height = scaled_width / crtc_state->pipe_src_w; if (height & 1) height++; y = (adjusted_mode->crtc_vdisplay - height + 1) / 2; @@ -238,9 +238,9 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc, return; } - drm_rect_init(&pipe_config->pch_pfit.dst, + drm_rect_init(&crtc_state->pch_pfit.dst, x, y, width, height); - pipe_config->pch_pfit.enabled = true; + crtc_state->pch_pfit.enabled = true; } static void @@ -299,13 +299,14 @@ static u32 panel_fitter_scaling(u32 source, u32 target) return (FACTOR * ratio + FACTOR/2) / FACTOR; } -static void i965_scale_aspect(struct intel_crtc_state *pipe_config, +static void i965_scale_aspect(struct intel_crtc_state *crtc_state, u32 *pfit_control) { - const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; u32 scaled_width = adjusted_mode->crtc_hdisplay * - pipe_config->pipe_src_h; - u32 scaled_height = pipe_config->pipe_src_w * + crtc_state->pipe_src_h; + u32 scaled_height = crtc_state->pipe_src_w * adjusted_mode->crtc_vdisplay; /* 965+ is easy, it does everything in hw */ @@ -315,18 +316,18 @@ static void i965_scale_aspect(struct intel_crtc_state *pipe_config, else if (scaled_width < scaled_height) *pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER; - else if (adjusted_mode->crtc_hdisplay != pipe_config->pipe_src_w) + else if (adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w) *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; } -static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config, +static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state, u32 *pfit_control, u32 *pfit_pgm_ratios, u32 *border) { - struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; u32 scaled_width = adjusted_mode->crtc_hdisplay * - pipe_config->pipe_src_h; - u32 scaled_height = pipe_config->pipe_src_w * + crtc_state->pipe_src_h; + u32 scaled_height = crtc_state->pipe_src_w * adjusted_mode->crtc_vdisplay; u32 bits; @@ -338,11 +339,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config, if (scaled_width > scaled_height) { /* pillar */ centre_horizontally(adjusted_mode, scaled_height / - pipe_config->pipe_src_h); + crtc_state->pipe_src_h); *border = LVDS_BORDER_ENABLE; - if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay) { - bits = panel_fitter_scaling(pipe_config->pipe_src_h, + if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay) { + bits = panel_fitter_scaling(crtc_state->pipe_src_h, adjusted_mode->crtc_vdisplay); *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | @@ -354,11 +355,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config, } else if (scaled_width < scaled_height) { /* letter */ centre_vertically(adjusted_mode, scaled_width / - pipe_config->pipe_src_w); + crtc_state->pipe_src_w); *border = LVDS_BORDER_ENABLE; - if (pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) { - bits = panel_fitter_scaling(pipe_config->pipe_src_w, + if (crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) { + bits = panel_fitter_scaling(crtc_state->pipe_src_w, adjusted_mode->crtc_hdisplay); *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | @@ -376,17 +377,17 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config, } } -void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, - struct intel_crtc_state *pipe_config, +void intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, int fitting_mode) { - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; - struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; /* Native modes don't need fitting */ - if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w && - adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h) + if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w && + adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h) goto out; switch (fitting_mode) { @@ -395,16 +396,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, * For centered modes, we have to calculate border widths & * heights and modify the values programmed into the CRTC. */ - centre_horizontally(adjusted_mode, pipe_config->pipe_src_w); - centre_vertically(adjusted_mode, pipe_config->pipe_src_h); + centre_horizontally(adjusted_mode, crtc_state->pipe_src_w); + centre_vertically(adjusted_mode, crtc_state->pipe_src_h); border = LVDS_BORDER_ENABLE; break; case DRM_MODE_SCALE_ASPECT: /* Scale but preserve the aspect ratio */ if (INTEL_GEN(dev_priv) >= 4) - i965_scale_aspect(pipe_config, &pfit_control); + i965_scale_aspect(crtc_state, &pfit_control); else - i9xx_scale_aspect(pipe_config, &pfit_control, + i9xx_scale_aspect(crtc_state, &pfit_control, &pfit_pgm_ratios, &border); break; case DRM_MODE_SCALE_FULLSCREEN: @@ -412,8 +413,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, * Full scaling, even if it changes the aspect ratio. * Fortunately this is all done for us in hw. */ - if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay || - pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) { + if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay || + crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) { pfit_control |= PFIT_ENABLE; if (INTEL_GEN(dev_priv) >= 4) pfit_control |= PFIT_SCALING_AUTO; @@ -433,7 +434,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, /* 965+ wants fuzzy fitting */ /* FIXME: handle multiple panels by failing gracefully */ if (INTEL_GEN(dev_priv) >= 4) - pfit_control |= PFIT_PIPE(intel_crtc->pipe) | PFIT_FILTER_FUZZY; + pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY; out: if ((pfit_control & PFIT_ENABLE) == 0) { @@ -442,12 +443,12 @@ out: } /* Make sure pre-965 set dither correctly for 18bpp panels. */ - if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18) + if (INTEL_GEN(dev_priv) < 4 && crtc_state->pipe_bpp == 18) pfit_control |= PANEL_8TO6_DITHER_ENABLE; - pipe_config->gmch_pfit.control = pfit_control; - pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios; - pipe_config->gmch_pfit.lvds_border_bits = border; + crtc_state->gmch_pfit.control = pfit_control; + crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios; + crtc_state->gmch_pfit.lvds_border_bits = border; } /** diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index 11f2f6b628d8..92dcb773763c 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -25,11 +25,9 @@ int intel_panel_init(struct intel_panel *panel, void intel_panel_fini(struct intel_panel *panel); void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); -void intel_pch_panel_fitting(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config, +void intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, int fitting_mode); -void intel_gmch_panel_fitting(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config, +void intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, int fitting_mode); void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state, u32 level, u32 max); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 46e2895d916d..fe7c9d3ec418 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -267,7 +267,6 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; - struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int ret; @@ -279,10 +278,10 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, intel_fixed_panel_mode(fixed_mode, adjusted_mode); if (HAS_GMCH(dev_priv)) - intel_gmch_panel_fitting(crtc, pipe_config, + intel_gmch_panel_fitting(pipe_config, conn_state->scaling_mode); else - intel_pch_panel_fitting(crtc, pipe_config, + intel_pch_panel_fitting(pipe_config, conn_state->scaling_mode); } From 4cecc7c0cc2b5d6e5d8350f77d210d189a25dfb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 22 Apr 2020 19:19:16 +0300 Subject: [PATCH 084/121] drm/i915: Pass connector state to pfit calculations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass the entire connector state to intel_{gmch,pch}_panel_fitting(). For now we just need to get at .scaling_mode but in the future we'll want access to the margin properties as well. v2: Deal with intel_dp_ycbcr420_config() Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200422161917.17389-5-ville.syrjala@linux.intel.com Reviewed-by: Manasi Navare --- drivers/gpu/drm/i915/display/icl_dsi.c | 2 +- drivers/gpu/drm/i915/display/intel_dp.c | 17 ++++++++--------- drivers/gpu/drm/i915/display/intel_hdmi.c | 11 ++++++----- drivers/gpu/drm/i915/display/intel_lvds.c | 7 ++----- drivers/gpu/drm/i915/display/intel_panel.c | 17 ++++++++++------- drivers/gpu/drm/i915/display/intel_panel.h | 4 ++-- drivers/gpu/drm/i915/display/vlv_dsi.c | 6 ++---- 7 files changed, 31 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index fb9291de55d0..6650590f18fd 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1532,7 +1532,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; intel_fixed_panel_mode(fixed_mode, adjusted_mode); - intel_pch_panel_fitting(pipe_config, conn_state->scaling_mode); + intel_pch_panel_fitting(pipe_config, conn_state); adjusted_mode->flags = 0; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index b9a760012b3f..7c4c9a4f4238 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2339,9 +2339,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, static int intel_dp_ycbcr420_config(struct intel_dp *intel_dp, - struct drm_connector *connector, - struct intel_crtc_state *crtc_state) + struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { + struct drm_connector *connector = conn_state->connector; const struct drm_display_info *info = &connector->display_info; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -2353,7 +2354,7 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp, crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - intel_pch_panel_fitting(crtc_state, DRM_MODE_SCALE_FULLSCREEN); + intel_pch_panel_fitting(crtc_state, conn_state); return 0; } @@ -2549,8 +2550,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, if (lspcon->active) lspcon_ycbcr420_config(&intel_connector->base, pipe_config); else - ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base, - pipe_config); + ret = intel_dp_ycbcr420_config(intel_dp, pipe_config, + conn_state); if (ret) return ret; @@ -2567,11 +2568,9 @@ intel_dp_compute_config(struct intel_encoder *encoder, adjusted_mode); if (HAS_GMCH(dev_priv)) - intel_gmch_panel_fitting(pipe_config, - conn_state->scaling_mode); + intel_gmch_panel_fitting(pipe_config, conn_state); else - intel_pch_panel_fitting(pipe_config, - conn_state->scaling_mode); + intel_pch_panel_fitting(pipe_config, conn_state); } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 275a676c7b08..0edbdd39f462 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2322,9 +2322,10 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, } static bool -intel_hdmi_ycbcr420_config(struct drm_connector *connector, - struct intel_crtc_state *config) +intel_hdmi_ycbcr420_config(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { + struct drm_connector *connector = conn_state->connector; struct drm_i915_private *i915 = to_i915(connector->dev); if (!connector->ycbcr_420_allowed) { @@ -2333,9 +2334,9 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, return false; } - config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; + crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - intel_pch_panel_fitting(config, DRM_MODE_SCALE_FULLSCREEN); + intel_pch_panel_fitting(crtc_state, conn_state); return true; } @@ -2466,7 +2467,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, pipe_config->pixel_multiplier = 2; if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) { - if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) { + if (!intel_hdmi_ycbcr420_config(pipe_config, conn_state)) { drm_err(&dev_priv->drm, "Can't support YCBCR420 output\n"); return -EINVAL; diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 276db41b95ef..ae658d9354b7 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -439,12 +439,9 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, if (HAS_PCH_SPLIT(dev_priv)) { pipe_config->has_pch_encoder = true; - intel_pch_panel_fitting(pipe_config, - conn_state->scaling_mode); + intel_pch_panel_fitting(pipe_config, conn_state); } else { - intel_gmch_panel_fitting(pipe_config, - conn_state->scaling_mode); - + intel_gmch_panel_fitting(pipe_config, conn_state); } /* diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 3ea1704277a8..b4bb1cfc54a9 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -178,7 +178,7 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector) /* adjusted_mode has been preset to be the panel's fixed mode */ void intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, - int fitting_mode) + const struct drm_connector_state *conn_state) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -190,7 +190,7 @@ intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) return; - switch (fitting_mode) { + switch (conn_state->scaling_mode) { case DRM_MODE_SCALE_CENTER: width = crtc_state->pipe_src_w; height = crtc_state->pipe_src_h; @@ -227,6 +227,10 @@ intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, } break; + case DRM_MODE_SCALE_NONE: + WARN_ON(adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w); + WARN_ON(adjusted_mode->crtc_vdisplay != crtc_state->pipe_src_h); + /* fall through */ case DRM_MODE_SCALE_FULLSCREEN: x = y = 0; width = adjusted_mode->crtc_hdisplay; @@ -234,7 +238,7 @@ intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, break; default: - MISSING_CASE(fitting_mode); + MISSING_CASE(conn_state->scaling_mode); return; } @@ -378,7 +382,7 @@ static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state, } void intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, - int fitting_mode) + const struct drm_connector_state *conn_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -390,7 +394,7 @@ void intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h) goto out; - switch (fitting_mode) { + switch (conn_state->scaling_mode) { case DRM_MODE_SCALE_CENTER: /* * For centered modes, we have to calculate border widths & @@ -426,8 +430,7 @@ void intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, } break; default: - drm_WARN(&dev_priv->drm, 1, "bad panel fit mode: %d\n", - fitting_mode); + MISSING_CASE(conn_state->scaling_mode); return; } diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index 92dcb773763c..a26db895038e 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -26,9 +26,9 @@ void intel_panel_fini(struct intel_panel *panel); void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); void intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, - int fitting_mode); + const struct drm_connector_state *conn_state); void intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, - int fitting_mode); + const struct drm_connector_state *conn_state); void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state, u32 level, u32 max); int intel_panel_setup_backlight(struct drm_connector *connector, diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index fe7c9d3ec418..9c9ea89d2107 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -278,11 +278,9 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, intel_fixed_panel_mode(fixed_mode, adjusted_mode); if (HAS_GMCH(dev_priv)) - intel_gmch_panel_fitting(pipe_config, - conn_state->scaling_mode); + intel_gmch_panel_fitting(pipe_config, conn_state); else - intel_pch_panel_fitting(pipe_config, - conn_state->scaling_mode); + intel_pch_panel_fitting(pipe_config, conn_state); } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) From d7ff281c6d27d6a5329a426e5782d31f44073538 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 22 Apr 2020 19:19:17 +0300 Subject: [PATCH 085/121] drm/i915: Have pfit calculations return an error code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change intel_{gmch,pch}_panel_fitting() to return a normal error vs. success int. We'll need this later to validate that the margin properties aren't misconfigured. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200422161917.17389-6-ville.syrjala@linux.intel.com Reviewed-by: Manasi Navare --- drivers/gpu/drm/i915/display/icl_dsi.c | 10 +++++++--- drivers/gpu/drm/i915/display/intel_dp.c | 10 +++++----- drivers/gpu/drm/i915/display/intel_hdmi.c | 23 +++++++++++----------- drivers/gpu/drm/i915/display/intel_lvds.c | 13 +++++++----- drivers/gpu/drm/i915/display/intel_panel.c | 19 ++++++++++-------- drivers/gpu/drm/i915/display/intel_panel.h | 6 +++--- drivers/gpu/drm/i915/display/vlv_dsi.c | 6 ++++-- 7 files changed, 49 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 6650590f18fd..4fec5bd64920 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1526,13 +1526,17 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; const struct drm_display_mode *fixed_mode = - intel_connector->panel.fixed_mode; + intel_connector->panel.fixed_mode; struct drm_display_mode *adjusted_mode = - &pipe_config->hw.adjusted_mode; + &pipe_config->hw.adjusted_mode; + int ret; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; intel_fixed_panel_mode(fixed_mode, adjusted_mode); - intel_pch_panel_fitting(pipe_config, conn_state); + + ret = intel_pch_panel_fitting(pipe_config, conn_state); + if (ret) + return ret; adjusted_mode->flags = 0; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 7c4c9a4f4238..5c7009b74c24 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2354,9 +2354,7 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp, crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - intel_pch_panel_fitting(crtc_state, conn_state); - - return 0; + return intel_pch_panel_fitting(crtc_state, conn_state); } bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, @@ -2568,9 +2566,11 @@ intel_dp_compute_config(struct intel_encoder *encoder, adjusted_mode); if (HAS_GMCH(dev_priv)) - intel_gmch_panel_fitting(pipe_config, conn_state); + ret = intel_gmch_panel_fitting(pipe_config, conn_state); else - intel_pch_panel_fitting(pipe_config, conn_state); + ret = intel_pch_panel_fitting(pipe_config, conn_state); + if (ret) + return ret; } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 0edbdd39f462..010f37240710 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2321,24 +2321,27 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, return true; } -static bool +static int intel_hdmi_ycbcr420_config(struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_connector *connector = conn_state->connector; struct drm_i915_private *i915 = to_i915(connector->dev); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + + if (!drm_mode_is_420_only(&connector->display_info, adjusted_mode)) + return 0; if (!connector->ycbcr_420_allowed) { drm_err(&i915->drm, "Platform doesn't support YCBCR420 output\n"); - return false; + return -EINVAL; } crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - intel_pch_panel_fitting(crtc_state, conn_state); - - return true; + return intel_pch_panel_fitting(crtc_state, conn_state); } static int intel_hdmi_port_clock(int clock, int bpc) @@ -2466,13 +2469,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) pipe_config->pixel_multiplier = 2; - if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) { - if (!intel_hdmi_ycbcr420_config(pipe_config, conn_state)) { - drm_err(&dev_priv->drm, - "Can't support YCBCR420 output\n"); - return -EINVAL; - } - } + ret = intel_hdmi_ycbcr420_config(pipe_config, conn_state); + if (ret) + return ret; pipe_config->limited_color_range = intel_hdmi_limited_color_range(pipe_config, conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index ae658d9354b7..872f2a489339 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -403,6 +403,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); unsigned int lvds_bpp; + int ret; /* Should never happen!! */ if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) { @@ -436,13 +437,15 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; - if (HAS_PCH_SPLIT(dev_priv)) { + if (HAS_PCH_SPLIT(dev_priv)) pipe_config->has_pch_encoder = true; - intel_pch_panel_fitting(pipe_config, conn_state); - } else { - intel_gmch_panel_fitting(pipe_config, conn_state); - } + if (HAS_GMCH(dev_priv)) + ret = intel_gmch_panel_fitting(pipe_config, conn_state); + else + ret = intel_pch_panel_fitting(pipe_config, conn_state); + if (ret) + return ret; /* * XXX: It would be nice to support lower refresh rates on the diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index b4bb1cfc54a9..aa931f9f0d6a 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -176,9 +176,8 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector) } /* adjusted_mode has been preset to be the panel's fixed mode */ -void -intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -188,7 +187,7 @@ intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w && adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h && crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) - return; + return 0; switch (conn_state->scaling_mode) { case DRM_MODE_SCALE_CENTER: @@ -239,12 +238,14 @@ intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, default: MISSING_CASE(conn_state->scaling_mode); - return; + return -EINVAL; } drm_rect_init(&crtc_state->pch_pfit.dst, x, y, width, height); crtc_state->pch_pfit.enabled = true; + + return 0; } static void @@ -381,8 +382,8 @@ static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state, } } -void intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -431,7 +432,7 @@ void intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, break; default: MISSING_CASE(conn_state->scaling_mode); - return; + return -EINVAL; } /* 965+ wants fuzzy fitting */ @@ -452,6 +453,8 @@ out: crtc_state->gmch_pfit.control = pfit_control; crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios; crtc_state->gmch_pfit.lvds_border_bits = border; + + return 0; } /** diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index a26db895038e..968b95281cb4 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -25,10 +25,10 @@ int intel_panel_init(struct intel_panel *panel, void intel_panel_fini(struct intel_panel *panel); void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); -void intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, +int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); +int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -void intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state, u32 level, u32 max); int intel_panel_setup_backlight(struct drm_connector *connector, diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 9c9ea89d2107..f582ab52f0b0 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -278,9 +278,11 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, intel_fixed_panel_mode(fixed_mode, adjusted_mode); if (HAS_GMCH(dev_priv)) - intel_gmch_panel_fitting(pipe_config, conn_state); + ret = intel_gmch_panel_fitting(pipe_config, conn_state); else - intel_pch_panel_fitting(pipe_config, conn_state); + ret = intel_pch_panel_fitting(pipe_config, conn_state); + if (ret) + return ret; } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) From eee3f91195ad8a096048fb5869e7299fd1e3ea5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 20 Apr 2020 23:06:07 +0300 Subject: [PATCH 086/121] drm/i915: Introduce .set_link_train() vfunc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sort out some of the mess between intel_ddi.c intel_dp.c by introducing a .set_link_train() vfunc. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200420200610.31798-1-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_ddi.c | 42 +++++ .../drm/i915/display/intel_display_types.h | 1 + drivers/gpu/drm/i915/display/intel_dp.c | 150 ++++++++---------- 3 files changed, 108 insertions(+), 85 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index c086ae5eb12f..b8397ea74c80 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3950,6 +3950,46 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) udelay(600); } +static void intel_ddi_set_link_train(struct intel_dp *intel_dp, + u8 dp_train_pat) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); + enum port port = dp_to_dig_port(intel_dp)->base.port; + u32 temp; + + temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); + + if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) + temp |= DP_TP_CTL_SCRAMBLE_DISABLE; + else + temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; + + temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; + switch (dp_train_pat & train_pat_mask) { + case DP_TRAINING_PATTERN_DISABLE: + temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; + break; + case DP_TRAINING_PATTERN_1: + temp |= DP_TP_CTL_LINK_TRAIN_PAT1; + break; + case DP_TRAINING_PATTERN_2: + temp |= DP_TP_CTL_LINK_TRAIN_PAT2; + break; + case DP_TRAINING_PATTERN_3: + temp |= DP_TP_CTL_LINK_TRAIN_PAT3; + break; + case DP_TRAINING_PATTERN_4: + temp |= DP_TP_CTL_LINK_TRAIN_PAT4; + break; + } + + intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp); + + intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); + intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); +} + static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { @@ -4394,6 +4434,8 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); intel_dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain; + intel_dig_port->dp.set_link_train = intel_ddi_set_link_train; + if (INTEL_GEN(dev_priv) < 12) { intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index b37129ca0518..991dbd8ed314 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1367,6 +1367,7 @@ struct intel_dp { /* This is called before a link training is starterd */ void (*prepare_link_retrain)(struct intel_dp *intel_dp); + void (*set_link_train)(struct intel_dp *intel_dp, u8 dp_train_pat); /* Displayport compliance testing */ struct intel_dp_compliance compliance; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 5c7009b74c24..5e35f98f0fd0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3618,90 +3618,63 @@ static void chv_post_disable_dp(struct intel_atomic_state *state, } static void -_intel_dp_set_link_train(struct intel_dp *intel_dp, - u32 *DP, - u8 dp_train_pat) +cpt_set_link_train(struct intel_dp *intel_dp, + u8 dp_train_pat) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - enum port port = intel_dig_port->base.port; - u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); + u32 *DP = &intel_dp->DP; - if (dp_train_pat & train_pat_mask) + *DP &= ~DP_LINK_TRAIN_MASK_CPT; + + switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { + case DP_TRAINING_PATTERN_DISABLE: + *DP |= DP_LINK_TRAIN_OFF_CPT; + break; + case DP_TRAINING_PATTERN_1: + *DP |= DP_LINK_TRAIN_PAT_1_CPT; + break; + case DP_TRAINING_PATTERN_2: + *DP |= DP_LINK_TRAIN_PAT_2_CPT; + break; + case DP_TRAINING_PATTERN_3: drm_dbg_kms(&dev_priv->drm, - "Using DP training pattern TPS%d\n", - dp_train_pat & train_pat_mask); - - if (HAS_DDI(dev_priv)) { - u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); - - if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) - temp |= DP_TP_CTL_SCRAMBLE_DISABLE; - else - temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; - - temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; - switch (dp_train_pat & train_pat_mask) { - case DP_TRAINING_PATTERN_DISABLE: - temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; - - break; - case DP_TRAINING_PATTERN_1: - temp |= DP_TP_CTL_LINK_TRAIN_PAT1; - break; - case DP_TRAINING_PATTERN_2: - temp |= DP_TP_CTL_LINK_TRAIN_PAT2; - break; - case DP_TRAINING_PATTERN_3: - temp |= DP_TP_CTL_LINK_TRAIN_PAT3; - break; - case DP_TRAINING_PATTERN_4: - temp |= DP_TP_CTL_LINK_TRAIN_PAT4; - break; - } - intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp); - - } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || - (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { - *DP &= ~DP_LINK_TRAIN_MASK_CPT; - - switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { - case DP_TRAINING_PATTERN_DISABLE: - *DP |= DP_LINK_TRAIN_OFF_CPT; - break; - case DP_TRAINING_PATTERN_1: - *DP |= DP_LINK_TRAIN_PAT_1_CPT; - break; - case DP_TRAINING_PATTERN_2: - *DP |= DP_LINK_TRAIN_PAT_2_CPT; - break; - case DP_TRAINING_PATTERN_3: - drm_dbg_kms(&dev_priv->drm, - "TPS3 not supported, using TPS2 instead\n"); - *DP |= DP_LINK_TRAIN_PAT_2_CPT; - break; - } - - } else { - *DP &= ~DP_LINK_TRAIN_MASK; - - switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { - case DP_TRAINING_PATTERN_DISABLE: - *DP |= DP_LINK_TRAIN_OFF; - break; - case DP_TRAINING_PATTERN_1: - *DP |= DP_LINK_TRAIN_PAT_1; - break; - case DP_TRAINING_PATTERN_2: - *DP |= DP_LINK_TRAIN_PAT_2; - break; - case DP_TRAINING_PATTERN_3: - drm_dbg_kms(&dev_priv->drm, - "TPS3 not supported, using TPS2 instead\n"); - *DP |= DP_LINK_TRAIN_PAT_2; - break; - } + "TPS3 not supported, using TPS2 instead\n"); + *DP |= DP_LINK_TRAIN_PAT_2_CPT; + break; } + + intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); +} + +static void +g4x_set_link_train(struct intel_dp *intel_dp, + u8 dp_train_pat) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u32 *DP = &intel_dp->DP; + + *DP &= ~DP_LINK_TRAIN_MASK; + + switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { + case DP_TRAINING_PATTERN_DISABLE: + *DP |= DP_LINK_TRAIN_OFF; + break; + case DP_TRAINING_PATTERN_1: + *DP |= DP_LINK_TRAIN_PAT_1; + break; + case DP_TRAINING_PATTERN_2: + *DP |= DP_LINK_TRAIN_PAT_2; + break; + case DP_TRAINING_PATTERN_3: + drm_dbg_kms(&dev_priv->drm, + "TPS3 not supported, using TPS2 instead\n"); + *DP |= DP_LINK_TRAIN_PAT_2; + break; + } + + intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); } static void intel_dp_enable_port(struct intel_dp *intel_dp, @@ -4358,14 +4331,15 @@ void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, u8 dp_train_pat) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = - to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); - _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat); + if (dp_train_pat & train_pat_mask) + drm_dbg_kms(&dev_priv->drm, + "Using DP training pattern TPS%d\n", + dp_train_pat & train_pat_mask); - intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_dp->set_link_train(intel_dp, dp_train_pat); } void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) @@ -8515,6 +8489,12 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder->post_disable = g4x_post_disable_dp; } + if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || + (HAS_PCH_CPT(dev_priv) && port != PORT_A)) + intel_dig_port->dp.set_link_train = cpt_set_link_train; + else + intel_dig_port->dp.set_link_train = g4x_set_link_train; + intel_dig_port->dp.output_reg = output_reg; intel_dig_port->max_lanes = 4; intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); From fb83f72c48a52201431945d31843a964b927a8e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 20 Apr 2020 23:06:08 +0300 Subject: [PATCH 087/121] drm/i915: Introduce .set_signal_levels() vfunc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sort out some of the mess between intel_ddi.c intel_dp.c by introducing a .set_signal_levels() vfunc. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200420200610.31798-2-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_ddi.c | 81 +++++++--- .../drm/i915/display/intel_display_types.h | 1 + drivers/gpu/drm/i915/display/intel_dp.c | 150 +++++++++++------- 3 files changed, 152 insertions(+), 80 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index b8397ea74c80..546089960299 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2661,38 +2661,66 @@ static u32 intel_ddi_dp_level(struct intel_dp *intel_dp) return translate_signal_level(intel_dp, signal_levels); } -u32 bxt_signal_levels(struct intel_dp *intel_dp) +static void +tgl_set_signal_levels(struct intel_dp *intel_dp) { - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); - struct intel_encoder *encoder = &dport->base; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; int level = intel_ddi_dp_level(intel_dp); - if (INTEL_GEN(dev_priv) >= 12) - tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate, - level, encoder->type); - else if (INTEL_GEN(dev_priv) >= 11) - icl_ddi_vswing_sequence(encoder, intel_dp->link_rate, - level, encoder->type); - else if (IS_CANNONLAKE(dev_priv)) - cnl_ddi_vswing_sequence(encoder, level, encoder->type); - else - bxt_ddi_vswing_sequence(encoder, level, encoder->type); - - return 0; + tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate, + level, encoder->type); } -u32 ddi_signal_levels(struct intel_dp *intel_dp) +static void +icl_set_signal_levels(struct intel_dp *intel_dp) { - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); - struct intel_encoder *encoder = &dport->base; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; int level = intel_ddi_dp_level(intel_dp); + icl_ddi_vswing_sequence(encoder, intel_dp->link_rate, + level, encoder->type); +} + +static void +cnl_set_signal_levels(struct intel_dp *intel_dp) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + int level = intel_ddi_dp_level(intel_dp); + + cnl_ddi_vswing_sequence(encoder, level, encoder->type); +} + +static void +bxt_set_signal_levels(struct intel_dp *intel_dp) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + int level = intel_ddi_dp_level(intel_dp); + + bxt_ddi_vswing_sequence(encoder, level, encoder->type); +} + +static void +hsw_set_signal_levels(struct intel_dp *intel_dp) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + int level = intel_ddi_dp_level(intel_dp); + enum port port = encoder->port; + u32 signal_levels; + + signal_levels = DDI_BUF_TRANS_SELECT(level); + + drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", + signal_levels); + + intel_dp->DP &= ~DDI_BUF_EMP_MASK; + intel_dp->DP |= signal_levels; + if (IS_GEN9_BC(dev_priv)) skl_ddi_set_iboost(encoder, level, encoder->type); - return DDI_BUF_TRANS_SELECT(level); + intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); + intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); } static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, @@ -4436,6 +4464,17 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) intel_ddi_prepare_link_retrain; intel_dig_port->dp.set_link_train = intel_ddi_set_link_train; + if (INTEL_GEN(dev_priv) >= 12) + intel_dig_port->dp.set_signal_levels = tgl_set_signal_levels; + else if (INTEL_GEN(dev_priv) >= 11) + intel_dig_port->dp.set_signal_levels = icl_set_signal_levels; + else if (IS_CANNONLAKE(dev_priv)) + intel_dig_port->dp.set_signal_levels = cnl_set_signal_levels; + else if (IS_GEN9_LP(dev_priv)) + intel_dig_port->dp.set_signal_levels = bxt_set_signal_levels; + else + intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels; + if (INTEL_GEN(dev_priv) < 12) { intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 991dbd8ed314..baafd4a6ca24 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1368,6 +1368,7 @@ struct intel_dp { /* This is called before a link training is starterd */ void (*prepare_link_retrain)(struct intel_dp *intel_dp); void (*set_link_train)(struct intel_dp *intel_dp, u8 dp_train_pat); + void (*set_signal_levels)(struct intel_dp *intel_dp); /* Displayport compliance testing */ struct intel_dp_compliance compliance; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 5e35f98f0fd0..6335b181be4b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4013,7 +4013,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing) } } -static u32 vlv_signal_levels(struct intel_dp *intel_dp) +static void vlv_set_signal_levels(struct intel_dp *intel_dp) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; unsigned long demph_reg_value, preemph_reg_value, @@ -4041,7 +4041,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp) uniqtranscale_reg_value = 0x5598DA3A; break; default: - return 0; + return; } break; case DP_TRAIN_PRE_EMPH_LEVEL_1: @@ -4060,7 +4060,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp) uniqtranscale_reg_value = 0x55ADDA3A; break; default: - return 0; + return; } break; case DP_TRAIN_PRE_EMPH_LEVEL_2: @@ -4075,7 +4075,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp) uniqtranscale_reg_value = 0x55ADDA3A; break; default: - return 0; + return; } break; case DP_TRAIN_PRE_EMPH_LEVEL_3: @@ -4086,20 +4086,18 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp) uniqtranscale_reg_value = 0x55ADDA3A; break; default: - return 0; + return; } break; default: - return 0; + return; } vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, uniqtranscale_reg_value, 0); - - return 0; } -static u32 chv_signal_levels(struct intel_dp *intel_dp) +static void chv_set_signal_levels(struct intel_dp *intel_dp) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; u32 deemph_reg_value, margin_reg_value; @@ -4127,7 +4125,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp) uniq_trans_scale = true; break; default: - return 0; + return; } break; case DP_TRAIN_PRE_EMPH_LEVEL_1: @@ -4145,7 +4143,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp) margin_reg_value = 154; break; default: - return 0; + return; } break; case DP_TRAIN_PRE_EMPH_LEVEL_2: @@ -4159,7 +4157,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp) margin_reg_value = 154; break; default: - return 0; + return; } break; case DP_TRAIN_PRE_EMPH_LEVEL_3: @@ -4169,21 +4167,18 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp) margin_reg_value = 154; break; default: - return 0; + return; } break; default: - return 0; + return; } chv_set_phy_signal_level(encoder, deemph_reg_value, margin_reg_value, uniq_trans_scale); - - return 0; } -static u32 -g4x_signal_levels(u8 train_set) +static u32 g4x_signal_levels(u8 train_set) { u32 signal_levels = 0; @@ -4220,12 +4215,31 @@ g4x_signal_levels(u8 train_set) return signal_levels; } -/* SNB CPU eDP voltage swing and pre-emphasis control */ -static u32 -snb_cpu_edp_signal_levels(u8 train_set) +static void +g4x_set_signal_levels(struct intel_dp *intel_dp) { - int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | - DP_TRAIN_PRE_EMPHASIS_MASK); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u8 train_set = intel_dp->train_set[0]; + u32 signal_levels; + + signal_levels = g4x_signal_levels(train_set); + + drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", + signal_levels); + + intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); + intel_dp->DP |= signal_levels; + + intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); +} + +/* SNB CPU eDP voltage swing and pre-emphasis control */ +static u32 snb_cpu_edp_signal_levels(u8 train_set) +{ + u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | + DP_TRAIN_PRE_EMPHASIS_MASK); + switch (signal_levels) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: @@ -4248,12 +4262,31 @@ snb_cpu_edp_signal_levels(u8 train_set) } } -/* IVB CPU eDP voltage swing and pre-emphasis control */ -static u32 -ivb_cpu_edp_signal_levels(u8 train_set) +static void +snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) { - int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | - DP_TRAIN_PRE_EMPHASIS_MASK); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u8 train_set = intel_dp->train_set[0]; + u32 signal_levels; + + signal_levels = snb_cpu_edp_signal_levels(train_set); + + drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", + signal_levels); + + intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; + intel_dp->DP |= signal_levels; + + intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); +} + +/* IVB CPU eDP voltage swing and pre-emphasis control */ +static u32 ivb_cpu_edp_signal_levels(u8 train_set) +{ + u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | + DP_TRAIN_PRE_EMPHASIS_MASK); + switch (signal_levels) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: return EDP_LINK_TRAIN_400MV_0DB_IVB; @@ -4279,38 +4312,29 @@ ivb_cpu_edp_signal_levels(u8 train_set) } } -void -intel_dp_set_signal_levels(struct intel_dp *intel_dp) +static void +ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - enum port port = intel_dig_port->base.port; - u32 signal_levels, mask = 0; u8 train_set = intel_dp->train_set[0]; + u32 signal_levels; - if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) { - signal_levels = bxt_signal_levels(intel_dp); - } else if (HAS_DDI(dev_priv)) { - signal_levels = ddi_signal_levels(intel_dp); - mask = DDI_BUF_EMP_MASK; - } else if (IS_CHERRYVIEW(dev_priv)) { - signal_levels = chv_signal_levels(intel_dp); - } else if (IS_VALLEYVIEW(dev_priv)) { - signal_levels = vlv_signal_levels(intel_dp); - } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { - signal_levels = ivb_cpu_edp_signal_levels(train_set); - mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; - } else if (IS_GEN(dev_priv, 6) && port == PORT_A) { - signal_levels = snb_cpu_edp_signal_levels(train_set); - mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; - } else { - signal_levels = g4x_signal_levels(train_set); - mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; - } + signal_levels = ivb_cpu_edp_signal_levels(train_set); - if (mask) - drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", - signal_levels); + drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", + signal_levels); + + intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; + intel_dp->DP |= signal_levels; + + intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); +} + +void intel_dp_set_signal_levels(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u8 train_set = intel_dp->train_set[0]; drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", train_set & DP_TRAIN_VOLTAGE_SWING_MASK, @@ -4321,10 +4345,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp) train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? " (max)" : ""); - intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels; - - intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_dp->set_signal_levels(intel_dp); } void @@ -8495,6 +8516,17 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, else intel_dig_port->dp.set_link_train = g4x_set_link_train; + if (IS_CHERRYVIEW(dev_priv)) + intel_dig_port->dp.set_signal_levels = chv_set_signal_levels; + else if (IS_VALLEYVIEW(dev_priv)) + intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels; + else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) + intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; + else if (IS_GEN(dev_priv, 6) && port == PORT_A) + intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; + else + intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels; + intel_dig_port->dp.output_reg = output_reg; intel_dig_port->max_lanes = 4; intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); From 8fdda3854967d003f743059bb64ace8154266381 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 20 Apr 2020 23:06:09 +0300 Subject: [PATCH 088/121] drm/i915: Introduce .set_idle_link_train() vfunc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Relocate a bunch of DDI specific code from intel_dp.c to intel_ddi.c by introducing a .set_idle_link_train() vfunc. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200420200610.31798-3-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_ddi.c | 29 +++++++++++++++++++ .../drm/i915/display/intel_display_types.h | 1 + drivers/gpu/drm/i915/display/intel_dp.c | 29 ++----------------- 3 files changed, 32 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 546089960299..ef2b1b752497 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4018,6 +4018,34 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp, intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); } +static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + u32 val; + + val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); + val &= ~DP_TP_CTL_LINK_TRAIN_MASK; + val |= DP_TP_CTL_LINK_TRAIN_IDLE; + intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val); + + /* + * Until TGL on PORT_A we can have only eDP in SST mode. There the only + * reason we need to set idle transmission mode is to work around a HW + * issue where we enable the pipe while not in idle link-training mode. + * In this case there is requirement to wait for a minimum number of + * idle patterns to be sent. + */ + if (port == PORT_A && INTEL_GEN(dev_priv) < 12) + return; + + if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, + DP_TP_STATUS_IDLE_DONE, 1)) + drm_err(&dev_priv->drm, + "Timed out waiting for DP idle patterns\n"); +} + static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { @@ -4463,6 +4491,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) intel_dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain; intel_dig_port->dp.set_link_train = intel_ddi_set_link_train; + intel_dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train; if (INTEL_GEN(dev_priv) >= 12) intel_dig_port->dp.set_signal_levels = tgl_set_signal_levels; diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index baafd4a6ca24..e58e25779bd6 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1368,6 +1368,7 @@ struct intel_dp { /* This is called before a link training is starterd */ void (*prepare_link_retrain)(struct intel_dp *intel_dp); void (*set_link_train)(struct intel_dp *intel_dp, u8 dp_train_pat); + void (*set_idle_link_train)(struct intel_dp *intel_dp); void (*set_signal_levels)(struct intel_dp *intel_dp); /* Displayport compliance testing */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 6335b181be4b..6952b0295096 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4365,33 +4365,8 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - enum port port = intel_dig_port->base.port; - u32 val; - - if (!HAS_DDI(dev_priv)) - return; - - val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); - val &= ~DP_TP_CTL_LINK_TRAIN_MASK; - val |= DP_TP_CTL_LINK_TRAIN_IDLE; - intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val); - - /* - * Until TGL on PORT_A we can have only eDP in SST mode. There the only - * reason we need to set idle transmission mode is to work around a HW - * issue where we enable the pipe while not in idle link-training mode. - * In this case there is requirement to wait for a minimum number of - * idle patterns to be sent. - */ - if (port == PORT_A && INTEL_GEN(dev_priv) < 12) - return; - - if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, - DP_TP_STATUS_IDLE_DONE, 1)) - drm_err(&dev_priv->drm, - "Timed out waiting for DP idle patterns\n"); + if (intel_dp->set_idle_link_train) + intel_dp->set_idle_link_train(intel_dp); } static void From 7db8736db0814dacda4d42f108f8ab366961ecdf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 20 Apr 2020 23:06:10 +0300 Subject: [PATCH 089/121] drm/i915: Split some long lines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Split some overly long lines. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200420200610.31798-4-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_ddi.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index ef2b1b752497..5601673c3f30 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1261,7 +1261,10 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) { /* Configure DP_TP_CTL with auto-training */ intel_de_write(dev_priv, DP_TP_CTL(PORT_E), - DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE); + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_LINK_TRAIN_PAT1 | + DP_TP_CTL_ENABLE); /* Configure and enable DDI_BUF_CTL for DDI E with next voltage. * DDI E does not support port reversal, the functionality is @@ -1337,7 +1340,10 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, /* Enable normal pixel sending for FDI */ intel_de_write(dev_priv, DP_TP_CTL(PORT_E), - DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE); + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_LINK_TRAIN_NORMAL | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_ENABLE); } static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) From 1ebf7aaf3ac01952c49a303d7ded0dc3a19d3cbb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Apr 2020 01:17:01 +0100 Subject: [PATCH 090/121] drm/i915/gt: Prefer soft-rc6 over RPS DOWN_TIMEOUT The RPS DOWN_TIMEOUT interrupt is signaled after a period of rc6, and upon receipt of that interrupt we reprogram the GPU clocks down to the next idle notch [to help convserve power during rc6]. However, on execlists, we benefit from soft-rc6 immediately parking the GPU and setting idle frequencies upon idling [within a jiffie], and here the interrupt prevents us from restarting from our last frequency. In the process, we can simply opt for a static pm_events mask and rely on the enable/disable interrupts to flush the worker on parking. This will reduce the amount of oscillation observed during steady workloads with microsleeps, as each time the rc6 timeout occurs we immediately follow with a waitboost for a dropped frame. Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200422001703.1697-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_rps.c | 41 +++++++++++++---------------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 4dcfae16a7ce..785cd58fba76 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -57,7 +57,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val) if (val < rps->max_freq_softlimit) mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; - mask &= READ_ONCE(rps->pm_events); + mask &= rps->pm_events; return rps_pm_sanitize_mask(rps, ~mask); } @@ -70,19 +70,9 @@ static void rps_reset_ei(struct intel_rps *rps) static void rps_enable_interrupts(struct intel_rps *rps) { struct intel_gt *gt = rps_to_gt(rps); - u32 events; rps_reset_ei(rps); - if (IS_VALLEYVIEW(gt->i915)) - /* WaGsvRC0ResidencyMethod:vlv */ - events = GEN6_PM_RP_UP_EI_EXPIRED; - else - events = (GEN6_PM_RP_UP_THRESHOLD | - GEN6_PM_RP_DOWN_THRESHOLD | - GEN6_PM_RP_DOWN_TIMEOUT); - WRITE_ONCE(rps->pm_events, events); - spin_lock_irq(>->irq_lock); gen6_gt_pm_enable_irq(gt, rps->pm_events); spin_unlock_irq(>->irq_lock); @@ -120,8 +110,6 @@ static void rps_disable_interrupts(struct intel_rps *rps) { struct intel_gt *gt = rps_to_gt(rps); - WRITE_ONCE(rps->pm_events, 0); - intel_uncore_write(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); @@ -919,12 +907,10 @@ static bool gen9_rps_enable(struct intel_rps *rps) intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, GEN9_FREQUENCY(rps->rp1_freq)); - /* 1 second timeout */ - intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, - GT_INTERVAL_FROM_US(i915, 1000000)); - intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa); + rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; + return rps_reset(rps); } @@ -935,12 +921,10 @@ static bool gen8_rps_enable(struct intel_rps *rps) intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(rps->rp1_freq)); - /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ - intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, - 100000000 / 128); /* 1 second timeout */ - intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; + return rps_reset(rps); } @@ -952,6 +936,10 @@ static bool gen6_rps_enable(struct intel_rps *rps) intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000); intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_DOWN_TIMEOUT); + return rps_reset(rps); } @@ -1037,6 +1025,10 @@ static bool chv_rps_enable(struct intel_rps *rps) GEN6_RP_UP_BUSY_AVG | GEN6_RP_DOWN_IDLE_AVG); + rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_DOWN_TIMEOUT); + /* Setting Fixed Bias */ vlv_punit_get(i915); @@ -1135,6 +1127,9 @@ static bool vlv_rps_enable(struct intel_rps *rps) GEN6_RP_UP_BUSY_AVG | GEN6_RP_DOWN_IDLE_CONT); + /* WaGsvRC0ResidencyMethod:vlv */ + rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; + vlv_punit_get(i915); /* Setting Fixed Bias */ @@ -1469,7 +1464,7 @@ static void rps_work(struct work_struct *work) u32 pm_iir = 0; spin_lock_irq(>->irq_lock); - pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events); + pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events; client_boost = atomic_read(&rps->num_waiters); spin_unlock_irq(>->irq_lock); @@ -1572,7 +1567,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) struct intel_gt *gt = rps_to_gt(rps); u32 events; - events = pm_iir & READ_ONCE(rps->pm_events); + events = pm_iir & rps->pm_events; if (events) { spin_lock(>->irq_lock); From 555a3224299720ddecfdcda9aaabab608f9f616a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 24 Apr 2020 17:28:04 +0100 Subject: [PATCH 091/121] drm/i915/gt: Trace RPS events Add tracek to the RPS events (interrupts, worker, enabling, threshold selection, frequency setting), so that if we have to debug reticent HW we have some traces to start from. Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200424162805.25920-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_rps.c | 48 ++++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 785cd58fba76..ff088702c873 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -71,6 +71,9 @@ static void rps_enable_interrupts(struct intel_rps *rps) { struct intel_gt *gt = rps_to_gt(rps); + GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n", + rps->pm_events, rps_pm_mask(rps, rps->last_freq)); + rps_reset_ei(rps); spin_lock_irq(>->irq_lock); @@ -128,6 +131,7 @@ static void rps_disable_interrupts(struct intel_rps *rps) cancel_work_sync(&rps->work); rps_reset_interrupts(rps); + GT_TRACE(gt, "interrupts:off\n"); } static const struct cparams { @@ -569,6 +573,10 @@ static void rps_set_power(struct intel_rps *rps, int new_power) if (IS_VALLEYVIEW(i915)) goto skip_hw_write; + GT_TRACE(rps_to_gt(rps), + "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n", + new_power, threshold_up, ei_up, threshold_down, ei_down); + set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up)); set(uncore, GEN6_RP_UP_THRESHOLD, GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100)); @@ -633,6 +641,8 @@ static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) { + GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive)); + mutex_lock(&rps->power.mutex); if (interactive) { if (!rps->power.interactive++ && READ_ONCE(rps->active)) @@ -660,6 +670,9 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val) GEN6_AGGRESSIVE_TURBO); set(uncore, GEN6_RPNSWREQ, swreq); + GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n", + val, intel_gpu_freq(rps, val), swreq); + return 0; } @@ -672,6 +685,9 @@ static int vlv_rps_set(struct intel_rps *rps, u8 val) err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val); vlv_punit_put(i915); + GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n", + val, intel_gpu_freq(rps, val)); + return err; } @@ -705,6 +721,8 @@ void intel_rps_unpark(struct intel_rps *rps) if (!rps->enabled) return; + GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq); + /* * Use the user's desired frequency as a guide, but for better * performance, jump directly to RPe as our starting frequency. @@ -772,6 +790,8 @@ void intel_rps_park(struct intel_rps *rps) */ rps->cur_freq = max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq); + + GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); } void intel_rps_boost(struct i915_request *rq) @@ -788,6 +808,9 @@ void intel_rps_boost(struct i915_request *rq) !dma_fence_is_signaled_locked(&rq->fence)) { set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags); + GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", + rq->fence.context, rq->fence.seqno); + if (!atomic_fetch_inc(&rps->num_waiters) && READ_ONCE(rps->cur_freq) < rps->boost_freq) schedule_work(&rps->work); @@ -883,6 +906,7 @@ static void gen6_rps_init(struct intel_rps *rps) static bool rps_reset(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); + /* force a reset */ rps->power.mode = -1; rps->last_freq = -1; @@ -1210,11 +1234,17 @@ void intel_rps_enable(struct intel_rps *rps) if (!rps->enabled) return; - drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq); - drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq); + GT_TRACE(rps_to_gt(rps), + "min:%x, max:%x, freq:[%d, %d]\n", + rps->min_freq, rps->max_freq, + intel_gpu_freq(rps, rps->min_freq), + intel_gpu_freq(rps, rps->max_freq)); - drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq); - drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq); + GEM_BUG_ON(rps->max_freq < rps->min_freq); + GEM_BUG_ON(rps->idle_freq > rps->max_freq); + + GEM_BUG_ON(rps->efficient_freq < rps->min_freq); + GEM_BUG_ON(rps->efficient_freq > rps->max_freq); } static void gen6_rps_disable(struct intel_rps *rps) @@ -1482,6 +1512,12 @@ static void rps_work(struct work_struct *work) max = rps->max_freq_softlimit; if (client_boost) max = rps->max_freq; + + GT_TRACE(gt, + "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n", + pm_iir, yesno(client_boost), + adj, new_freq, min, max); + if (client_boost && new_freq < rps->boost_freq) { new_freq = rps->boost_freq; adj = 0; @@ -1556,6 +1592,8 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) if (unlikely(!events)) return; + GT_TRACE(gt, "irq events:%x\n", events); + gen6_gt_pm_mask_irq(gt, events); rps->pm_iir |= events; @@ -1571,6 +1609,8 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) if (events) { spin_lock(>->irq_lock); + GT_TRACE(gt, "irq events:%x\n", events); + gen6_gt_pm_mask_irq(gt, events); rps->pm_iir |= events; From 9c878557b1ebd23316b65e4099313a4b0c21eac4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 24 Apr 2020 17:28:05 +0100 Subject: [PATCH 092/121] drm/i915/gt: Use the RPM config register to determine clk frequencies For many configuration details within RC6 and RPS we are programming intervals for the internal clocks. From gen11, these clocks are configuration via the RPM_CONFIG and so for convenience, we would like to convert to/from more natural units (ns). Signed-off-by: Chris Wilson Cc: Andi Shyti Cc: Mika Kuoppala Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200424162805.25920-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gt/debugfs_gt_pm.c | 55 +++++++--- drivers/gpu/drm/i915/gt/intel_gt.c | 3 + .../gpu/drm/i915/gt/intel_gt_clock_utils.c | 102 ++++++++++++++++++ .../gpu/drm/i915/gt/intel_gt_clock_utils.h | 27 +++++ drivers/gpu/drm/i915/gt/intel_gt_pm.c | 3 + drivers/gpu/drm/i915/gt/intel_gt_types.h | 9 +- drivers/gpu/drm/i915/gt/intel_rps.c | 36 ++++--- drivers/gpu/drm/i915/gt/selftest_rps.c | 7 +- drivers/gpu/drm/i915/i915_debugfs.c | 34 +++--- drivers/gpu/drm/i915/i915_reg.h | 25 ----- 11 files changed, 228 insertions(+), 74 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 6f112d8f80ca..ce24a4ee9591 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -91,6 +91,7 @@ gt-y += \ gt/intel_ggtt.o \ gt/intel_ggtt_fencing.o \ gt/intel_gt.o \ + gt/intel_gt_clock_utils.o \ gt/intel_gt_irq.o \ gt/intel_gt_pm.o \ gt/intel_gt_pm_irq.o \ diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c index aab30d908072..3d3ef62ed89f 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c @@ -10,6 +10,7 @@ #include "debugfs_gt_pm.h" #include "i915_drv.h" #include "intel_gt.h" +#include "intel_gt_clock_utils.h" #include "intel_llc.h" #include "intel_rc6.h" #include "intel_rps.h" @@ -268,7 +269,7 @@ static int frequency_show(struct seq_file *m, void *unused) yesno(rpmodectl & GEN6_RP_ENABLE)); seq_printf(m, "SW control enabled: %s\n", yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == - GEN6_RP_MEDIA_SW_MODE)); + GEN6_RP_MEDIA_SW_MODE)); vlv_punit_get(i915); freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); @@ -300,8 +301,9 @@ static int frequency_show(struct seq_file *m, void *unused) u32 rp_state_cap; u32 rpmodectl, rpinclimit, rpdeclimit; u32 rpstat, cagf, reqf; - u32 rpupei, rpcurup, rpprevup; - u32 rpdownei, rpcurdown, rpprevdown; + u32 rpcurupei, rpcurup, rpprevup; + u32 rpcurdownei, rpcurdown, rpprevdown; + u32 rpupei, rpupt, rpdownei, rpdownt; u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; int max_freq; @@ -337,9 +339,16 @@ static int frequency_show(struct seq_file *m, void *unused) rpupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; - rpdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; + rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; + + rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI); + rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD); + + rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI); + rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); + cagf = intel_rps_read_actual_frequency(rps); intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); @@ -372,7 +381,7 @@ static int frequency_show(struct seq_file *m, void *unused) yesno(rpmodectl & GEN6_RP_ENABLE)); seq_printf(m, "SW control enabled: %s\n", yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == - GEN6_RP_MEDIA_SW_MODE)); + GEN6_RP_MEDIA_SW_MODE)); seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", pm_ier, pm_imr, pm_mask); @@ -394,23 +403,35 @@ static int frequency_show(struct seq_file *m, void *unused) seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); seq_printf(m, "CAGF: %dMHz\n", cagf); - seq_printf(m, "RP CUR UP EI: %d (%dus)\n", - rpupei, GT_PM_INTERVAL_TO_US(i915, rpupei)); - seq_printf(m, "RP CUR UP: %d (%dus)\n", - rpcurup, GT_PM_INTERVAL_TO_US(i915, rpcurup)); - seq_printf(m, "RP PREV UP: %d (%dus)\n", - rpprevup, GT_PM_INTERVAL_TO_US(i915, rpprevup)); + seq_printf(m, "RP CUR UP EI: %d (%dns)\n", + rpcurupei, + intel_gt_pm_interval_to_ns(gt, rpcurupei)); + seq_printf(m, "RP CUR UP: %d (%dns)\n", + rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup)); + seq_printf(m, "RP PREV UP: %d (%dns)\n", + rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup)); seq_printf(m, "Up threshold: %d%%\n", rps->power.up_threshold); + seq_printf(m, "RP UP EI: %d (%dns)\n", + rpupei, intel_gt_pm_interval_to_ns(gt, rpupei)); + seq_printf(m, "RP UP THRESHOLD: %d (%dns)\n", + rpupt, intel_gt_pm_interval_to_ns(gt, rpupt)); - seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", - rpdownei, GT_PM_INTERVAL_TO_US(i915, rpdownei)); - seq_printf(m, "RP CUR DOWN: %d (%dus)\n", - rpcurdown, GT_PM_INTERVAL_TO_US(i915, rpcurdown)); - seq_printf(m, "RP PREV DOWN: %d (%dus)\n", - rpprevdown, GT_PM_INTERVAL_TO_US(i915, rpprevdown)); + seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n", + rpcurdownei, + intel_gt_pm_interval_to_ns(gt, rpcurdownei)); + seq_printf(m, "RP CUR DOWN: %d (%dns)\n", + rpcurdown, + intel_gt_pm_interval_to_ns(gt, rpcurdown)); + seq_printf(m, "RP PREV DOWN: %d (%dns)\n", + rpprevdown, + intel_gt_pm_interval_to_ns(gt, rpprevdown)); seq_printf(m, "Down threshold: %d%%\n", rps->power.down_threshold); + seq_printf(m, "RP DOWN EI: %d (%dns)\n", + rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei)); + seq_printf(m, "RP DOWN THRESHOLD: %d (%dns)\n", + rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt)); max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 : rp_state_cap >> 16) & 0xff; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 1c99cc72305a..d9cf8194c997 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -7,6 +7,7 @@ #include "i915_drv.h" #include "intel_context.h" #include "intel_gt.h" +#include "intel_gt_clock_utils.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" #include "intel_mocs.h" @@ -576,6 +577,8 @@ int intel_gt_init(struct intel_gt *gt) */ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + intel_gt_init_clock_frequency(gt); + err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K); if (err) goto out_fw; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c new file mode 100644 index 000000000000..852a7d731b3b --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_gt.h" +#include "intel_gt_clock_utils.h" + +#define MHZ_19_2 19200000 /* 19.2MHz, 52.083ns */ +#define MHZ_24 24000000 /* 24MHz, 83.333ns */ +#define MHZ_25 25000000 /* 25MHz, 80ns */ + +static u32 read_clock_frequency(const struct intel_gt *gt) +{ + if (INTEL_GEN(gt->i915) >= 11) { + u32 config; + + config = intel_uncore_read(gt->uncore, RPM_CONFIG0); + config &= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK; + config >>= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; + + switch (config) { + case 0: return MHZ_24; + case 1: + case 2: return MHZ_19_2; + default: + case 3: return MHZ_25; + } + } else if (INTEL_GEN(gt->i915) >= 9) { + if (IS_GEN9_LP(gt->i915)) + return MHZ_19_2; + else + return MHZ_24; + } else { + return MHZ_25; + } +} + +void intel_gt_init_clock_frequency(struct intel_gt *gt) +{ + /* + * Note that on gen11+, the clock frequency may be reconfigured. + * We do not, and we assume nobody else does. + */ + gt->clock_frequency = read_clock_frequency(gt); + GT_TRACE(gt, + "Using clock frequency: %dkHz\n", + gt->clock_frequency / 1000); +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) +void intel_gt_check_clock_frequency(const struct intel_gt *gt) +{ + if (gt->clock_frequency != read_clock_frequency(gt)) { + dev_err(gt->i915->drm.dev, + "GT clock frequency changed, was %uHz, now %uHz!\n", + gt->clock_frequency, + read_clock_frequency(gt)); + } +} +#endif + +static u64 div_u64_roundup(u64 nom, u32 den) +{ + return div_u64(nom + den - 1, den); +} + +u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count) +{ + return div_u64_roundup(mul_u32_u32(count, 1000 * 1000 * 1000), + gt->clock_frequency); +} + +u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count) +{ + return intel_gt_clock_interval_to_ns(gt, 16 * count); +} + +u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns) +{ + return div_u64_roundup(mul_u32_u32(gt->clock_frequency, ns), + 1000 * 1000 * 1000); +} + +u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns) +{ + u32 val; + + /* + * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS + * 8300) freezing up around GPU hangs. Looks as if even + * scheduling/timer interrupts start misbehaving if the RPS + * EI/thresholds are "bad", leading to a very sluggish or even + * frozen machine. + */ + val = DIV_ROUND_UP(intel_gt_ns_to_clock_interval(gt, ns), 16); + if (IS_GEN(gt->i915, 6)) + val = roundup(val, 25); + + return val; +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h new file mode 100644 index 000000000000..f793c89f2cbd --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef __INTEL_GT_CLOCK_UTILS_H__ +#define __INTEL_GT_CLOCK_UTILS_H__ + +#include + +struct intel_gt; + +void intel_gt_init_clock_frequency(struct intel_gt *gt); + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) +void intel_gt_check_clock_frequency(const struct intel_gt *gt); +#else +static inline void intel_gt_check_clock_frequency(const struct intel_gt *gt) {} +#endif + +u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count); +u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count); + +u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns); +u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns); + +#endif /* __INTEL_GT_CLOCK_UTILS_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 6bdb74892a1e..4c4c74ef4f21 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -12,6 +12,7 @@ #include "intel_context.h" #include "intel_engine_pm.h" #include "intel_gt.h" +#include "intel_gt_clock_utils.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" #include "intel_llc.h" @@ -138,6 +139,8 @@ static void gt_sanitize(struct intel_gt *gt, bool force) wakeref = intel_runtime_pm_get(gt->uncore->rpm); intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + intel_gt_check_clock_frequency(gt); + /* * As we have just resumed the machine and woken the device up from * deep PCI sleep (presumably D3_cold), assume the HW has been reset diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 96890dd12b5f..d02ccb735e24 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -61,6 +61,7 @@ struct intel_gt { struct list_head closed_vma; spinlock_t closed_lock; /* guards the list of closed_vma */ + ktime_t last_init_time; struct intel_reset reset; /** @@ -72,14 +73,12 @@ struct intel_gt { */ intel_wakeref_t awake; + u32 clock_frequency; + struct intel_llc llc; struct intel_rc6 rc6; struct intel_rps rps; - ktime_t last_init_time; - - struct i915_vma *scratch; - spinlock_t irq_lock; u32 gt_imr; u32 pm_ier; @@ -97,6 +96,8 @@ struct intel_gt { * Reserved for exclusive use by the kernel. */ struct i915_address_space *vm; + + struct i915_vma *scratch; }; enum intel_gt_scratch_field { diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index ff088702c873..2ce006e58b4a 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -8,6 +8,7 @@ #include "i915_drv.h" #include "intel_gt.h" +#include "intel_gt_clock_utils.h" #include "intel_gt_irq.h" #include "intel_gt_pm_irq.h" #include "intel_rps.h" @@ -524,8 +525,8 @@ static u32 rps_limits(struct intel_rps *rps, u8 val) static void rps_set_power(struct intel_rps *rps, int new_power) { - struct intel_uncore *uncore = rps_to_uncore(rps); - struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_gt *gt = rps_to_gt(rps); + struct intel_uncore *uncore = gt->uncore; u32 threshold_up = 0, threshold_down = 0; /* in % */ u32 ei_up = 0, ei_down = 0; @@ -570,23 +571,25 @@ static void rps_set_power(struct intel_rps *rps, int new_power) /* When byt can survive without system hang with dynamic * sw freq adjustments, this restriction can be lifted. */ - if (IS_VALLEYVIEW(i915)) + if (IS_VALLEYVIEW(gt->i915)) goto skip_hw_write; - GT_TRACE(rps_to_gt(rps), + GT_TRACE(gt, "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n", new_power, threshold_up, ei_up, threshold_down, ei_down); - set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up)); + set(uncore, GEN6_RP_UP_EI, + intel_gt_ns_to_pm_interval(gt, ei_up * 1000)); set(uncore, GEN6_RP_UP_THRESHOLD, - GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100)); + intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10)); - set(uncore, GEN6_RP_DOWN_EI, GT_INTERVAL_FROM_US(i915, ei_down)); + set(uncore, GEN6_RP_DOWN_EI, + intel_gt_ns_to_pm_interval(gt, ei_down * 1000)); set(uncore, GEN6_RP_DOWN_THRESHOLD, - GT_INTERVAL_FROM_US(i915, ei_down * threshold_down / 100)); + intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10)); set(uncore, GEN6_RP_CONTROL, - (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | + (INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | GEN6_RP_MEDIA_HW_NORMAL_MODE | GEN6_RP_MEDIA_IS_GFX | GEN6_RP_ENABLE | @@ -923,11 +926,11 @@ static bool rps_reset(struct intel_rps *rps) /* See the Gen9_GT_PM_Programming_Guide doc for the below */ static bool gen9_rps_enable(struct intel_rps *rps) { - struct drm_i915_private *i915 = rps_to_i915(rps); - struct intel_uncore *uncore = rps_to_uncore(rps); + struct intel_gt *gt = rps_to_gt(rps); + struct intel_uncore *uncore = gt->uncore; /* Program defaults and thresholds for RPS */ - if (IS_GEN(i915, 9)) + if (IS_GEN(gt->i915, 9)) intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, GEN9_FREQUENCY(rps->rp1_freq)); @@ -1217,6 +1220,11 @@ void intel_rps_enable(struct intel_rps *rps) struct drm_i915_private *i915 = rps_to_i915(rps); struct intel_uncore *uncore = rps_to_uncore(rps); + if (!HAS_RPS(i915)) + return; + + intel_gt_check_clock_frequency(rps_to_gt(rps)); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); if (IS_CHERRYVIEW(i915)) rps->enabled = chv_rps_enable(rps); @@ -1753,7 +1761,7 @@ static u32 read_cagf(struct intel_rps *rps) freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); vlv_punit_put(i915); } else { - freq = intel_uncore_read(rps_to_gt(rps)->uncore, GEN6_RPSTAT1); + freq = intel_uncore_read(rps_to_uncore(rps), GEN6_RPSTAT1); } return intel_rps_get_cagf(rps, freq); @@ -1761,7 +1769,7 @@ static u32 read_cagf(struct intel_rps *rps) u32 intel_rps_read_actual_frequency(struct intel_rps *rps) { - struct intel_runtime_pm *rpm = rps_to_gt(rps)->uncore->rpm; + struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; intel_wakeref_t wakeref; u32 freq = 0; diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 9d9c8e0aa2e7..e13cbcb82825 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -9,6 +9,7 @@ #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" #include "intel_gpu_commands.h" +#include "intel_gt_clock_utils.h" #include "intel_gt_pm.h" #include "intel_rc6.h" #include "selftest_rps.h" @@ -787,7 +788,8 @@ static int __rps_up_interrupt(struct intel_rps *rps, } timeout = intel_uncore_read(uncore, GEN6_RP_UP_EI); - timeout = GT_PM_INTERVAL_TO_US(engine->i915, timeout); + timeout = intel_gt_pm_interval_to_ns(engine->gt, timeout); + timeout = DIV_ROUND_UP(timeout, 1000); sleep_for_ei(rps, timeout); GEM_BUG_ON(i915_request_completed(rq)); @@ -834,7 +836,8 @@ static int __rps_down_interrupt(struct intel_rps *rps, } timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_EI); - timeout = GT_PM_INTERVAL_TO_US(engine->i915, timeout); + timeout = intel_gt_pm_interval_to_ns(engine->gt, timeout); + timeout = DIV_ROUND_UP(timeout, 1000); sleep_for_ei(rps, timeout); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 2905bcff79cf..037e9e0275b6 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -32,6 +32,7 @@ #include #include "gem/i915_gem_context.h" +#include "gt/intel_gt_clock_utils.h" #include "gt/intel_gt_pm.h" #include "gt/intel_gt_requests.h" #include "gt/intel_reset.h" @@ -926,21 +927,30 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); seq_printf(m, "CAGF: %dMHz\n", cagf); - seq_printf(m, "RP CUR UP EI: %d (%dus)\n", - rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei)); - seq_printf(m, "RP CUR UP: %d (%dus)\n", - rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup)); - seq_printf(m, "RP PREV UP: %d (%dus)\n", - rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup)); + seq_printf(m, "RP CUR UP EI: %d (%dns)\n", + rpupei, + intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei)); + seq_printf(m, "RP CUR UP: %d (%dun)\n", + rpcurup, + intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup)); + seq_printf(m, "RP PREV UP: %d (%dns)\n", + rpprevup, + intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup)); seq_printf(m, "Up threshold: %d%%\n", rps->power.up_threshold); - seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", - rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei)); - seq_printf(m, "RP CUR DOWN: %d (%dus)\n", - rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown)); - seq_printf(m, "RP PREV DOWN: %d (%dus)\n", - rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown)); + seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n", + rpdownei, + intel_gt_pm_interval_to_ns(&dev_priv->gt, + rpdownei)); + seq_printf(m, "RP CUR DOWN: %d (%dns)\n", + rpcurdown, + intel_gt_pm_interval_to_ns(&dev_priv->gt, + rpcurdown)); + seq_printf(m, "RP PREV DOWN: %d (%dns)\n", + rpprevdown, + intel_gt_pm_interval_to_ns(&dev_priv->gt, + rpprevdown)); seq_printf(m, "Down threshold: %d%%\n", rps->power.down_threshold); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4a1965467374..981b52aa5ed8 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4015,31 +4015,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define BXT_RP_STATE_CAP _MMIO(0x138170) #define GEN9_RP_STATE_LIMITS _MMIO(0x138148) -/* - * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS - * 8300) freezing up around GPU hangs. Looks as if even - * scheduling/timer interrupts start misbehaving if the RPS - * EI/thresholds are "bad", leading to a very sluggish or even - * frozen machine. - */ -#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25) -#define INTERVAL_1_33_US(us) (((us) * 3) >> 2) -#define INTERVAL_0_833_US(us) (((us) * 6) / 5) -#define GT_INTERVAL_FROM_US(dev_priv, us) (INTEL_GEN(dev_priv) >= 9 ? \ - (IS_GEN9_LP(dev_priv) ? \ - INTERVAL_0_833_US(us) : \ - INTERVAL_1_33_US(us)) : \ - INTERVAL_1_28_US(us)) - -#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100) -#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3) -#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6) -#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (INTEL_GEN(dev_priv) >= 9 ? \ - (IS_GEN9_LP(dev_priv) ? \ - INTERVAL_0_833_TO_US(interval) : \ - INTERVAL_1_33_TO_US(interval)) : \ - INTERVAL_1_28_TO_US(interval)) - /* * Logical Context regs */ From 9669a5079995ddbe161243ca3e6cc86ca3ab6500 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 24 Apr 2020 20:14:10 +0100 Subject: [PATCH 093/121] drm/i915: Drop rq->ring->vma peeking from error capture We only hold the active spinlock while dumping the error state, and this does not prevent another thread from retiring the request -- as it is quite possible that despite us capturing the current state, the GPU has completed the request. As such, it is dangerous to dereference state below the request as it may already be freed, and the simplest way to avoid the danger is not include it in the error state. Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1788 Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Andi Shyti Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200424191410.27570-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gpu_error.c | 5 ++--- drivers/gpu/drm/i915/i915_gpu_error.h | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 424ad975a360..4d54dba35302 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -467,14 +467,14 @@ static void error_print_request(struct drm_i915_error_state_buf *m, if (!erq->seqno) return; - err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, start %08x, head %08x, tail %08x\n", + err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n", prefix, erq->pid, erq->context, erq->seqno, test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "", test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags) ? "+" : "", erq->sched_attr.priority, - erq->start, erq->head, erq->tail); + erq->head, erq->tail); } static void error_print_context(struct drm_i915_error_state_buf *m, @@ -1213,7 +1213,6 @@ static void record_request(const struct i915_request *request, erq->context = request->fence.context; erq->seqno = request->fence.seqno; erq->sched_attr = request->sched.attr; - erq->start = i915_ggtt_offset(request->ring->vma); erq->head = request->head; erq->tail = request->tail; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 0d1f6c8ff355..fa2d82a6de04 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -50,7 +50,6 @@ struct i915_request_coredump { pid_t pid; u32 context; u32 seqno; - u32 start; u32 head; u32 tail; struct i915_sched_attr sched_attr; From 168c6d231b4b8f0c647eb2fda86e06b62cf68c7a Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Sat, 25 Apr 2020 00:48:38 +0300 Subject: [PATCH 094/121] drm/i915: Add engine scratch register to live_lrc_fixed General purpose registers are per engine and in a fixed location. Add to live_lrc_fixed. Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200424214841.28076-1-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_lrc.c | 12 ++++++++++++ drivers/gpu/drm/i915/gt/selftest_lrc.c | 5 +++++ 2 files changed, 17 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 214ea2a34693..c1f30fe12d5d 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -250,6 +250,18 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) return -1; } +static int lrc_ring_gpr0(const struct intel_engine_cs *engine) +{ + if (INTEL_GEN(engine->i915) >= 12) + return 0x74; + else if (INTEL_GEN(engine->i915) >= 9) + return 0x68; + else if (engine->class == RENDER_CLASS) + return 0xd8; + else + return -1; +} + static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine) { if (INTEL_GEN(engine->i915) >= 12) diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index e964c1402d29..d10c38aec500 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -4613,6 +4613,11 @@ static int live_lrc_fixed(void *arg) CTX_TIMESTAMP - 1, "RING_CTX_TIMESTAMP" }, + { + i915_mmio_reg_offset(GEN8_RING_CS_GPR(engine->mmio_base, 0)), + lrc_ring_gpr0(engine), + "RING_CS_GPR0" + }, { }, }, *t; u32 *hw; From 685d21096f6ceb44bda186dd42520f2a9d21d1aa Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Sat, 25 Apr 2020 02:05:46 +0300 Subject: [PATCH 095/121] drm/i915: Add per ctx batchbuffer wa for timestamp Restoration of a previous timestamp can collide with updating the timestamp, causing a value corruption. Combat this issue by using indirect ctx bb to modify the context image during restoring process. We can preload value into scratch register. From which we then do the actual write with LRR. LRR is faster and thus less error prone as probability of race drops. v2: tidying (Chris) v3: lrr for all engines v4: grp v5: reg bit v6: wa_bb_offset, virtual engines (Chris) References: HSDES#16010904313 Testcase: igt/i915_selftest/gt_lrc Suggested-by: Joseph Koston Cc: Chris Wilson Signed-off-by: Mika Kuoppala Acked-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200424230546.30271-1-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_context_types.h | 2 + drivers/gpu/drm/i915/gt/intel_gpu_commands.h | 3 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 133 ++++++++++++++++-- 3 files changed, 125 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 07cb83a0d017..e0da7bdcbf01 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -96,6 +96,8 @@ struct intel_context { /** sseu: Control eu/slice partitioning */ struct intel_sseu sseu; + + u8 wa_bb_page; /* if set, page num reserved for context workarounds */ }; #endif /* __INTEL_CONTEXT_TYPES__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index f04214a54f75..ee10122a511e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -138,7 +138,7 @@ */ #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) /* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */ -#define MI_LRI_CS_MMIO (1<<19) +#define MI_LRI_LRM_CS_MMIO REG_BIT(19) #define MI_LRI_FORCE_POSTED (1<<12) #define MI_LOAD_REGISTER_IMM_MAX_REGS (126) #define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1) @@ -156,6 +156,7 @@ #define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1) #define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2) #define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1) +#define MI_LRR_SOURCE_CS_MMIO REG_BIT(18) #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_NON_SECURE (1) /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index c1f30fe12d5d..a2fc4ad06deb 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -314,6 +314,23 @@ lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine) } } +static void +lrc_ring_setup_indirect_ctx(u32 *regs, + const struct intel_engine_cs *engine, + u32 ctx_bb_ggtt_addr, + u32 size) +{ + GEM_BUG_ON(!size); + GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES)); + GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1); + regs[lrc_ring_indirect_ptr(engine) + 1] = + ctx_bb_ggtt_addr | (size / CACHELINE_BYTES); + + GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1); + regs[lrc_ring_indirect_offset(engine) + 1] = + lrc_ring_indirect_offset_default(engine) << 6; +} + static u32 intel_context_get_runtime(const struct intel_context *ce) { /* @@ -613,7 +630,7 @@ static void set_offsets(u32 *regs, if (flags & POSTED) *regs |= MI_LRI_FORCE_POSTED; if (INTEL_GEN(engine->i915) >= 11) - *regs |= MI_LRI_CS_MMIO; + *regs |= MI_LRI_LRM_CS_MMIO; regs++; GEM_BUG_ON(!count); @@ -3187,6 +3204,94 @@ static void execlists_context_unpin(struct intel_context *ce) i915_gem_object_unpin_map(ce->state->obj); } +static u32 * +gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs) +{ + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | + MI_SRM_LRM_GLOBAL_GTT | + MI_LRI_LRM_CS_MMIO; + *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); + *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + + CTX_TIMESTAMP * sizeof(u32); + *cs++ = 0; + + *cs++ = MI_LOAD_REGISTER_REG | + MI_LRR_SOURCE_CS_MMIO | + MI_LRI_LRM_CS_MMIO; + *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); + *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0)); + + *cs++ = MI_LOAD_REGISTER_REG | + MI_LRR_SOURCE_CS_MMIO | + MI_LRI_LRM_CS_MMIO; + *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); + *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0)); + + return cs; +} + +static u32 * +gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs) +{ + GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1); + + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | + MI_SRM_LRM_GLOBAL_GTT | + MI_LRI_LRM_CS_MMIO; + *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); + *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + + (lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32); + *cs++ = 0; + + return cs; +} + +static u32 * +gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) +{ + cs = gen12_emit_timestamp_wa(ce, cs); + cs = gen12_emit_restore_scratch(ce, cs); + + return cs; +} + +static inline u32 context_wa_bb_offset(const struct intel_context *ce) +{ + return PAGE_SIZE * ce->wa_bb_page; +} + +static u32 *context_indirect_bb(const struct intel_context *ce) +{ + void *ptr; + + GEM_BUG_ON(!ce->wa_bb_page); + + ptr = ce->lrc_reg_state; + ptr -= LRC_STATE_OFFSET; /* back to start of context image */ + ptr += context_wa_bb_offset(ce); + + return ptr; +} + +static void +setup_indirect_ctx_bb(const struct intel_context *ce, + const struct intel_engine_cs *engine, + u32 *(*emit)(const struct intel_context *, u32 *)) +{ + u32 * const start = context_indirect_bb(ce); + u32 *cs; + + cs = emit(ce, start); + GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs)); + while ((unsigned long)cs % CACHELINE_BYTES) + *cs++ = MI_NOOP; + + lrc_ring_setup_indirect_ctx(ce->lrc_reg_state, engine, + i915_ggtt_offset(ce->state) + + context_wa_bb_offset(ce), + (cs - start) * sizeof(*cs)); +} + static void __execlists_update_reg_state(const struct intel_context *ce, const struct intel_engine_cs *engine, @@ -3210,6 +3315,12 @@ __execlists_update_reg_state(const struct intel_context *ce, i915_oa_init_reg_state(ce, engine); } + + if (ce->wa_bb_page) { + /* Mutually exclusive wrt to global indirect bb */ + GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size); + setup_indirect_ctx_bb(ce, engine, gen12_emit_indirect_ctx_xcs); + } } static int @@ -4737,7 +4848,6 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) return 0; } - static void init_common_reg_state(u32 * const regs, const struct intel_engine_cs *engine, const struct intel_ring *ring, @@ -4772,16 +4882,10 @@ static void init_wa_bb_reg_state(u32 * const regs, } if (wa_ctx->indirect_ctx.size) { - const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); - - GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1); - regs[lrc_ring_indirect_ptr(engine) + 1] = - (ggtt_offset + wa_ctx->indirect_ctx.offset) | - (wa_ctx->indirect_ctx.size / CACHELINE_BYTES); - - GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1); - regs[lrc_ring_indirect_offset(engine) + 1] = - lrc_ring_indirect_offset_default(engine) << 6; + lrc_ring_setup_indirect_ctx(regs, engine, + i915_ggtt_offset(wa_ctx->vma) + + wa_ctx->indirect_ctx.offset, + wa_ctx->indirect_ctx.size); } } @@ -4903,6 +5007,11 @@ static int __execlists_context_alloc(struct intel_context *ce, if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) context_size += I915_GTT_PAGE_SIZE; /* for redzone */ + if (INTEL_GEN(engine->i915) == 12) { + ce->wa_bb_page = context_size / PAGE_SIZE; + context_size += PAGE_SIZE; + } + ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size); if (IS_ERR(ctx_obj)) return PTR_ERR(ctx_obj); From 1dd47b54baeac460f677a3c8bb347b2ffed56d4d Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Sat, 25 Apr 2020 00:48:40 +0300 Subject: [PATCH 096/121] drm/i915: Add live selftests for indirect ctx batchbuffers Indirect ctx batchbuffers are a hw feature of which batch can be run, by hardware, during context restoration stage. Driver can setup a batchbuffer and also an offset into the context image. When context image is marshalled from memory to registers, and when the offset from the start of context register state is equal of what driver pre-determined, batch will run. So one can manipulate context restoration process at cacheline granularity, given some limitations, as you need to have rudimentaries in place before you can run a batch. Add selftest which will write the ring start register to a canary spot. This will test that hardware will run a batchbuffer for the context in question. v2: request wait fix, naming (Chris) v3: test order (Chris) v4: rebase Signed-off-by: Mika Kuoppala Acked-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200424214841.28076-3-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/gt/selftest_lrc.c | 150 +++++++++++++++++++++++++ 1 file changed, 150 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index d10c38aec500..d47dcd754f65 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -5710,6 +5710,155 @@ static int live_lrc_isolation(void *arg) return err; } +static int indirect_ctx_submit_req(struct intel_context *ce) +{ + struct i915_request *rq; + int err = 0; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -ETIME; + + i915_request_put(rq); + + return err; +} + +#define CTX_BB_CANARY_OFFSET (3 * 1024) +#define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32)) + +static u32 * +emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs) +{ + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | + MI_SRM_LRM_GLOBAL_GTT | + MI_LRI_LRM_CS_MMIO; + *cs++ = i915_mmio_reg_offset(RING_START(0)); + *cs++ = i915_ggtt_offset(ce->state) + + context_wa_bb_offset(ce) + + CTX_BB_CANARY_OFFSET; + *cs++ = 0; + + return cs; +} + +static void +indirect_ctx_bb_setup(struct intel_context *ce) +{ + u32 *cs = context_indirect_bb(ce); + + cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d; + + setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary); +} + +static bool check_ring_start(struct intel_context *ce) +{ + const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) - + LRC_STATE_OFFSET + context_wa_bb_offset(ce); + + if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START]) + return true; + + pr_err("ring start mismatch: canary 0x%08x vs state 0x%08x\n", + ctx_bb[CTX_BB_CANARY_INDEX], + ce->lrc_reg_state[CTX_RING_START]); + + return false; +} + +static int indirect_ctx_bb_check(struct intel_context *ce) +{ + int err; + + err = indirect_ctx_submit_req(ce); + if (err) + return err; + + if (!check_ring_start(ce)) + return -EINVAL; + + return 0; +} + +static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine) +{ + struct intel_context *a, *b; + int err = 0; + + a = intel_context_create(engine); + b = intel_context_create(engine); + + err = intel_context_pin(a); + if (err) + return err; + + err = intel_context_pin(b); + if (err) { + intel_context_put(a); + return err; + } + + /* We use the already reserved extra page in context state */ + if (!a->wa_bb_page) { + GEM_BUG_ON(b->wa_bb_page); + GEM_BUG_ON(INTEL_GEN(engine->i915) == 12); + goto out; + } + + /* + * In order to test that our per context bb is truly per context, + * and executes at the intended spot on context restoring process, + * make the batch store the ring start value to memory. + * As ring start is restored apriori of starting the indirect ctx bb and + * as it will be different for each context, it fits to this purpose. + */ + indirect_ctx_bb_setup(a); + indirect_ctx_bb_setup(b); + + err = indirect_ctx_bb_check(a); + if (err) + goto out; + + err = indirect_ctx_bb_check(b); +out: + intel_context_unpin(b); + intel_context_put(b); + + intel_context_unpin(a); + intel_context_put(a); + + return err; +} + +static int live_lrc_indirect_ctx_bb(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + for_each_engine(engine, gt, id) { + intel_engine_pm_get(engine); + err = __live_lrc_indirect_ctx_bb(engine); + intel_engine_pm_put(engine); + + if (igt_flush_test(gt->i915)) + err = -EIO; + + if (err) + break; + } + + return err; +} + static void garbage_reset(struct intel_engine_cs *engine, struct i915_request *rq) { @@ -5945,6 +6094,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915) SUBTEST(live_lrc_timestamp), SUBTEST(live_lrc_garbage), SUBTEST(live_pphwsp_runtime), + SUBTEST(live_lrc_indirect_ctx_bb), }; if (!HAS_LOGICAL_RING_CONTEXTS(i915)) From b8a1181122f7a3d878e8a7a87603cd247ab24082 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Sat, 25 Apr 2020 02:06:32 +0300 Subject: [PATCH 097/121] drm/i915: Use indirect ctx bb to mend CMD_BUF_CCTL Use indirect ctx bb to load cmd buffer control value from context image to avoid corruption. v2: add to lrc layout (Chris) v3: end to a cacheline (Chris) v4: add to lrc fixed (Chris) v5: value in offset+1 Testcase: igt/i915_selftest/gt_lrc Signed-off-by: Mika Kuoppala Acked-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200424230632.30333-1-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_lrc.c | 112 ++++++++++++++++++++++++- drivers/gpu/drm/i915/gt/selftest_lrc.c | 5 ++ drivers/gpu/drm/i915/i915_reg.h | 1 + 3 files changed, 115 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a2fc4ad06deb..c8014c265ffb 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -294,6 +294,19 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine) return x + 2; } +static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine) +{ + if (engine->class != RENDER_CLASS) + return -1; + + if (INTEL_GEN(engine->i915) >= 12) + return 0xb6; + else if (INTEL_GEN(engine->i915) >= 11) + return 0xaa; + else + return -1; +} + static u32 lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine) { @@ -607,7 +620,7 @@ static void set_offsets(u32 *regs, #define REG16(x) \ (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ (((x) >> 2) & 0x7f) -#define END(x) 0, (x) +#define END(total_state_size) 0, (total_state_size) { const u32 base = engine->mmio_base; @@ -1015,8 +1028,63 @@ static const u8 gen12_rcs_offsets[] = { NOP(6), LRI(1, 0), REG(0x0c8), + NOP(3 + 9 + 1), - END(80) + LRI(51, POSTED), + REG16(0x588), + REG16(0x588), + REG16(0x588), + REG16(0x588), + REG16(0x588), + REG16(0x588), + REG(0x028), + REG(0x09c), + REG(0x0c0), + REG(0x178), + REG(0x17c), + REG16(0x358), + REG(0x170), + REG(0x150), + REG(0x154), + REG(0x158), + REG16(0x41c), + REG16(0x600), + REG16(0x604), + REG16(0x608), + REG16(0x60c), + REG16(0x610), + REG16(0x614), + REG16(0x618), + REG16(0x61c), + REG16(0x620), + REG16(0x624), + REG16(0x628), + REG16(0x62c), + REG16(0x630), + REG16(0x634), + REG16(0x638), + REG16(0x63c), + REG16(0x640), + REG16(0x644), + REG16(0x648), + REG16(0x64c), + REG16(0x650), + REG16(0x654), + REG16(0x658), + REG16(0x65c), + REG16(0x660), + REG16(0x664), + REG16(0x668), + REG16(0x66c), + REG16(0x670), + REG16(0x674), + REG16(0x678), + REG16(0x67c), + REG(0x068), + REG(0x084), + NOP(1), + + END(192) }; #undef END @@ -3246,6 +3314,38 @@ gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs) return cs; } +static u32 * +gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs) +{ + GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1); + + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | + MI_SRM_LRM_GLOBAL_GTT | + MI_LRI_LRM_CS_MMIO; + *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); + *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + + (lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32); + *cs++ = 0; + + *cs++ = MI_LOAD_REGISTER_REG | + MI_LRR_SOURCE_CS_MMIO | + MI_LRI_LRM_CS_MMIO; + *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); + *cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0)); + + return cs; +} + +static u32 * +gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) +{ + cs = gen12_emit_timestamp_wa(ce, cs); + cs = gen12_emit_cmd_buf_wa(ce, cs); + cs = gen12_emit_restore_scratch(ce, cs); + + return cs; +} + static u32 * gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) { @@ -3317,9 +3417,15 @@ __execlists_update_reg_state(const struct intel_context *ce, } if (ce->wa_bb_page) { + u32 *(*fn)(const struct intel_context *ce, u32 *cs); + + fn = gen12_emit_indirect_ctx_xcs; + if (ce->engine->class == RENDER_CLASS) + fn = gen12_emit_indirect_ctx_rcs; + /* Mutually exclusive wrt to global indirect bb */ GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size); - setup_indirect_ctx_bb(ce, engine, gen12_emit_indirect_ctx_xcs); + setup_indirect_ctx_bb(ce, engine, fn); } } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index d47dcd754f65..d3fa91aed7de 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -4618,6 +4618,11 @@ static int live_lrc_fixed(void *arg) lrc_ring_gpr0(engine), "RING_CS_GPR0" }, + { + i915_mmio_reg_offset(RING_CMD_BUF_CCTL(engine->mmio_base)), + lrc_ring_cmd_buf_cctl(engine), + "RING_CMD_BUF_CCTL" + }, { }, }, *t; u32 *hw; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 981b52aa5ed8..96d9f8853343 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2657,6 +2657,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */ #define RING_INSTPM(base) _MMIO((base) + 0xc0) #define RING_MI_MODE(base) _MMIO((base) + 0x9c) +#define RING_CMD_BUF_CCTL(base) _MMIO((base) + 0x84) #define INSTPS _MMIO(0x2070) /* 965+ only */ #define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */ #define ACTHD_I965 _MMIO(0x2074) From 9f4069b055d1508c833115df7493b6e0001e5c9b Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Sun, 26 Apr 2020 14:42:15 -0700 Subject: [PATCH 098/121] drm/i915: re-disable -Wframe-address The top level Makefile disables this warning. When building an i386_defconfig with Clang, this warning is triggered a whole bunch via includes of headers from perf. Link: https://github.com/ClangBuiltLinux/continuous-integration/pull/182 Signed-off-by: Nick Desaulniers Reviewed-by: Nathan Chancellor Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200426214215.139435-1-ndesaulniers@google.com --- drivers/gpu/drm/i915/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index ce24a4ee9591..21bb2fb5a6b8 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -22,6 +22,7 @@ subdir-ccflags-y += $(call cc-disable-warning, sign-compare) subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized) subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides) subdir-ccflags-y += $(call cc-disable-warning, uninitialized) +subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror # Fine grained warnings disable From 68ace460c5b2a96a82ee49ab0b589ceed8abd000 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 26 Apr 2020 10:42:31 +0100 Subject: [PATCH 099/121] drm/i915/execlists: Check preempt-timeout target before submit_ports We evaluate *active, which is a pointer into execlists->inflight[] during dequeue to decide how long a preempt-timeout we need to apply. However, as soon as we do the submit_ports, the HW may send its ACK interrupt causing us to promote execlists->pending[] tp execlists->inflight[], overwriting the value of *active. We know *active is only stable until we submit (as we only submit when there is no pending promotion). [ 16.102328] BUG: KCSAN: data-race in execlists_dequeue+0x1449/0x1600 [i915] [ 16.102356] [ 16.102375] race at unknown origin, with read to 0xffff8881e9500488 of 8 bytes by task 429 on cpu 1: [ 16.102780] execlists_dequeue+0x1449/0x1600 [i915] [ 16.103160] __execlists_submission_tasklet+0x48/0x60 [i915] [ 16.103540] execlists_submit_request+0x38e/0x3c0 [i915] [ 16.103940] submit_notify+0x8f/0xc0 [i915] [ 16.104308] __i915_sw_fence_complete+0x61/0x420 [i915] [ 16.104683] i915_sw_fence_complete+0x58/0x80 [i915] [ 16.105054] i915_sw_fence_commit+0x16/0x20 [i915] [ 16.105457] __i915_request_queue+0x60/0x70 [i915] [ 16.105843] i915_gem_do_execbuffer+0x2d6b/0x4230 [i915] [ 16.106227] i915_gem_execbuffer2_ioctl+0x2b0/0x580 [i915] [ 16.106257] drm_ioctl_kernel+0xe9/0x130 [ 16.106279] drm_ioctl+0x27d/0x45e [ 16.106311] ksys_ioctl+0x89/0xb0 [ 16.106336] __x64_sys_ioctl+0x42/0x60 [ 16.106370] do_syscall_64+0x6e/0x2c0 [ 16.106397] entry_SYSCALL_64_after_hwframe+0x44/0xa9 Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200426094231.21995-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index c8014c265ffb..cbd04b74ae2a 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2438,8 +2438,8 @@ done: clear_ports(port + 1, last_port - port); WRITE_ONCE(execlists->yield, -1); - execlists_submit_ports(engine); set_preempt_timeout(engine, *active); + execlists_submit_ports(engine); } else { skip_submit: ring_set_paused(engine, 0); From 2759e395358b2b909577928894f856ab75bea41a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 27 Apr 2020 10:30:38 +0100 Subject: [PATCH 100/121] drm/i915/gt: Check cacheline is valid before acquiring The hwsp_cacheline pointer from i915_request is very, very flimsy. The i915_request.timeline (and the hwsp_cacheline) are lost upon retiring (after an RCU grace). Therefore we need to confirm that once we have the right pointer for the cacheline, it is not in the process of being retired and disposed of before we attempt to acquire a reference to the cacheline. <3>[ 547.208237] BUG: KASAN: use-after-free in active_debug_hint+0x6a/0x70 [i915] <3>[ 547.208366] Read of size 8 at addr ffff88822a0d2710 by task gem_exec_parall/2536 <4>[ 547.208547] CPU: 3 PID: 2536 Comm: gem_exec_parall Tainted: G U 5.7.0-rc2-ged7a286b5d02d-kasan_117+ #1 <4>[ 547.208556] Hardware name: Dell Inc. XPS 13 9350/, BIOS 1.4.12 11/30/2016 <4>[ 547.208564] Call Trace: <4>[ 547.208579] dump_stack+0x96/0xdb <4>[ 547.208707] ? active_debug_hint+0x6a/0x70 [i915] <4>[ 547.208719] print_address_description.constprop.6+0x16/0x310 <4>[ 547.208841] ? active_debug_hint+0x6a/0x70 [i915] <4>[ 547.208963] ? active_debug_hint+0x6a/0x70 [i915] <4>[ 547.208975] __kasan_report+0x137/0x190 <4>[ 547.209106] ? active_debug_hint+0x6a/0x70 [i915] <4>[ 547.209127] kasan_report+0x32/0x50 <4>[ 547.209257] ? i915_gemfs_fini+0x40/0x40 [i915] <4>[ 547.209376] active_debug_hint+0x6a/0x70 [i915] <4>[ 547.209389] debug_print_object+0xa7/0x220 <4>[ 547.209405] ? lockdep_hardirqs_on+0x348/0x5f0 <4>[ 547.209426] debug_object_assert_init+0x297/0x430 <4>[ 547.209449] ? debug_object_free+0x360/0x360 <4>[ 547.209472] ? lock_acquire+0x1ac/0x8a0 <4>[ 547.209592] ? intel_timeline_read_hwsp+0x4f/0x840 [i915] <4>[ 547.209737] ? i915_active_acquire_if_busy+0x66/0x120 [i915] <4>[ 547.209861] i915_active_acquire_if_busy+0x66/0x120 [i915] <4>[ 547.209990] ? __live_alloc.isra.15+0xc0/0xc0 [i915] <4>[ 547.210005] ? rcu_read_lock_sched_held+0xd0/0xd0 <4>[ 547.210017] ? print_usage_bug+0x580/0x580 <4>[ 547.210153] intel_timeline_read_hwsp+0xbc/0x840 [i915] <4>[ 547.210284] __emit_semaphore_wait+0xd5/0x480 [i915] <4>[ 547.210415] ? i915_fence_get_timeline_name+0x110/0x110 [i915] <4>[ 547.210428] ? lockdep_hardirqs_on+0x348/0x5f0 <4>[ 547.210442] ? _raw_spin_unlock_irq+0x2a/0x40 <4>[ 547.210567] ? __await_execution.constprop.51+0x2e0/0x570 [i915] <4>[ 547.210706] i915_request_await_dma_fence+0x8f7/0xc70 [i915] Fixes: 85bedbf191e8 ("drm/i915/gt: Eliminate the trylock for reading a timeline's hwsp") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: # v5.6+ Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200427093038.29219-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_timeline.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 29a39e44fa36..e1fac1b38f27 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -544,6 +544,8 @@ int intel_timeline_read_hwsp(struct i915_request *from, rcu_read_lock(); cl = rcu_dereference(from->hwsp_cacheline); + if (i915_request_completed(from)) /* confirm cacheline is valid */ + goto unlock; if (unlikely(!i915_active_acquire_if_busy(&cl->active))) goto unlock; /* seqno wrapped and completed! */ if (unlikely(i915_request_completed(from))) From 4243cd5388c82ae8942510f349b798c62f89825c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 27 Apr 2020 09:40:00 +0100 Subject: [PATCH 101/121] drm/i915/gt: Sanitize GT first We see that if the HW doesn't actually sleep, the HW may eat the poison we set in its write-only HWSP during sanitize: intel_gt_resume.part.8: 0000:00:02.0 __gt_unpark: 0000:00:02.0 gt_sanitize: 0000:00:02.0 force:yes process_csb: 0000:00:02.0 vcs0: cs-irq head=5, tail=90 process_csb: 0000:00:02.0 vcs0: csb[0]: status=0x5a5a5a5a:0x5a5a5a5a assert_pending_valid: Nothing pending for promotion! The CS TAIL pointer should have been reset by reset_csb_pointers(), so in this case it is likely that we have read back from the CPU cache and so we must clflush our control over that page. In doing so, push the sanitisation to the start of the GT sequence so that our poisoning is assuredly before we start talking to the HW. References: https://gitlab.freedesktop.org/drm/intel/-/issues/1794 Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200427084000.10999-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 3 ++- drivers/gpu/drm/i915/gt/intel_lrc.c | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 4c4c74ef4f21..5097786f4375 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -198,11 +198,12 @@ int intel_gt_resume(struct intel_gt *gt) * Only the kernel contexts should remain pinned over suspend, * allowing us to fixup the user contexts on their first pin. */ + gt_sanitize(gt, true); + intel_gt_pm_get(gt); intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); intel_rc6_sanitize(>->rc6); - gt_sanitize(gt, true); if (intel_gt_is_wedged(gt)) { err = -EIO; goto out_fw; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index cbd04b74ae2a..93a1b73ad96b 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3931,6 +3931,9 @@ static void execlists_sanitize(struct intel_engine_cs *engine) * reset the value in the HWSP. */ intel_timeline_reset_seqno(engine->kernel_context->timeline); + + /* And scrub the dirty cachelines for the HWSP */ + clflush_cache_range(engine->status_page.addr, PAGE_SIZE); } static void enable_error_interrupt(struct intel_engine_cs *engine) From 6dc0d028f53945ca8de3e61a149f103f100d42df Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 27 Apr 2020 16:45:54 +0100 Subject: [PATCH 102/121] drm/i915/gt: Fix up clock frequency The bspec lists both the clock frequency and the effective interval. The interval corresponds to observed behaviour, so adjust the frequency to match. v2: Mika rightfully asked if we could measure the clock frequency from a selftest. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200427154554.12736-1-chris@chris-wilson.co.uk --- .../gpu/drm/i915/gt/intel_gt_clock_utils.c | 12 +- drivers/gpu/drm/i915/gt/selftest_gt_pm.c | 1 + drivers/gpu/drm/i915/gt/selftest_rps.c | 139 ++++++++++++++++++ drivers/gpu/drm/i915/gt/selftest_rps.h | 1 + 4 files changed, 147 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c index 852a7d731b3b..999079686846 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c @@ -7,9 +7,9 @@ #include "intel_gt.h" #include "intel_gt_clock_utils.h" +#define MHZ_12 12000000 /* 12MHz (24MHz/2), 83.333ns */ +#define MHZ_12_5 12500000 /* 12.5MHz (25MHz/2), 80ns */ #define MHZ_19_2 19200000 /* 19.2MHz, 52.083ns */ -#define MHZ_24 24000000 /* 24MHz, 83.333ns */ -#define MHZ_25 25000000 /* 25MHz, 80ns */ static u32 read_clock_frequency(const struct intel_gt *gt) { @@ -21,19 +21,19 @@ static u32 read_clock_frequency(const struct intel_gt *gt) config >>= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; switch (config) { - case 0: return MHZ_24; + case 0: return MHZ_12; case 1: case 2: return MHZ_19_2; default: - case 3: return MHZ_25; + case 3: return MHZ_12_5; } } else if (INTEL_GEN(gt->i915) >= 9) { if (IS_GEN9_LP(gt->i915)) return MHZ_19_2; else - return MHZ_24; + return MHZ_12; } else { - return MHZ_25; + return MHZ_12_5; } } diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index e02fdec58826..242181a5214c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -53,6 +53,7 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_rc6_manual), + SUBTEST(live_rps_clock_interval), SUBTEST(live_rps_control), SUBTEST(live_rps_frequency_cs), SUBTEST(live_rps_frequency_srm), diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index e13cbcb82825..181b29fa5b58 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -208,6 +208,145 @@ static void show_pstate_limits(struct intel_rps *rps) } } +int live_rps_clock_interval(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_rps *rps = >->rps; + void (*saved_work)(struct work_struct *wrk); + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_spinner spin; + int err = 0; + + if (!rps->enabled) + return 0; + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + intel_gt_pm_wait_for_idle(gt); + saved_work = rps->work.func; + rps->work.func = dummy_rps_work; + + intel_gt_pm_get(gt); + intel_rps_disable(>->rps); + + intel_gt_check_clock_frequency(gt); + + for_each_engine(engine, gt, id) { + unsigned long saved_heartbeat; + struct i915_request *rq; + ktime_t dt; + u32 cycles; + + if (!intel_engine_can_store_dword(engine)) + continue; + + saved_heartbeat = engine_heartbeat_disable(engine); + + rq = igt_spinner_create_request(&spin, + engine->kernel_context, + MI_NOOP); + if (IS_ERR(rq)) { + engine_heartbeat_enable(engine, saved_heartbeat); + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + + if (!igt_wait_for_spinner(&spin, rq)) { + pr_err("%s: RPS spinner did not start\n", + engine->name); + igt_spinner_end(&spin); + engine_heartbeat_enable(engine, saved_heartbeat); + intel_gt_set_wedged(engine->gt); + err = -EIO; + break; + } + + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + + intel_uncore_write_fw(gt->uncore, GEN6_RP_CUR_UP_EI, 0); + + /* Set the evaluation interval to infinity! */ + intel_uncore_write_fw(gt->uncore, + GEN6_RP_UP_EI, 0xffffffff); + intel_uncore_write_fw(gt->uncore, + GEN6_RP_UP_THRESHOLD, 0xffffffff); + + intel_uncore_write_fw(gt->uncore, GEN6_RP_CONTROL, + GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG); + + if (wait_for(intel_uncore_read_fw(gt->uncore, + GEN6_RP_CUR_UP_EI), + 10)) { + /* Just skip the test; assume lack of HW support */ + pr_notice("%s: rps evalution interval not ticking\n", + engine->name); + err = -ENODEV; + } else { + preempt_disable(); + dt = ktime_get(); + cycles = -intel_uncore_read_fw(gt->uncore, + GEN6_RP_CUR_UP_EI); + udelay(1000); + dt = ktime_sub(ktime_get(), dt); + cycles += intel_uncore_read_fw(gt->uncore, + GEN6_RP_CUR_UP_EI); + preempt_enable(); + } + + intel_uncore_write_fw(gt->uncore, GEN6_RP_CONTROL, 0); + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); + + igt_spinner_end(&spin); + engine_heartbeat_enable(engine, saved_heartbeat); + + if (err == 0) { + u64 time = intel_gt_pm_interval_to_ns(gt, cycles); + u32 expected = + intel_gt_ns_to_pm_interval(gt, ktime_to_ns(dt)); + + pr_info("%s: rps counted %d C0 cycles [%lldns] in %lldns [%d cycles], using GT clock frequency of %uKHz\n", + engine->name, cycles, time, ktime_to_ns(dt), expected, + gt->clock_frequency / 1000); + + if (10 * time < 9 * ktime_to_ns(dt) || + 10 * time > 11 * ktime_to_ns(dt)) { + pr_err("%s: rps clock time does not match walltime!\n", + engine->name); + err = -EINVAL; + } + + if (10 * expected < 9 * cycles || + 10 * expected > 11 * cycles) { + pr_err("%s: walltime does not match rps clock ticks!\n", + engine->name); + err = -EINVAL; + } + } + + if (igt_flush_test(gt->i915)) + err = -EIO; + + break; /* once is enough */ + } + + intel_rps_enable(>->rps); + intel_gt_pm_put(gt); + + igt_spinner_fini(&spin); + + intel_gt_pm_wait_for_idle(gt); + rps->work.func = saved_work; + + if (err == -ENODEV) /* skipped, don't report a fail */ + err = 0; + + return err; +} + int live_rps_control(void *arg) { struct intel_gt *gt = arg; diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.h b/drivers/gpu/drm/i915/gt/selftest_rps.h index 76c4b19553e6..6e82a631cfa1 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.h +++ b/drivers/gpu/drm/i915/gt/selftest_rps.h @@ -7,6 +7,7 @@ #define SELFTEST_RPS_H int live_rps_control(void *arg); +int live_rps_clock_interval(void *arg); int live_rps_frequency_cs(void *arg); int live_rps_frequency_srm(void *arg); int live_rps_power(void *arg); From 869129ee0c624a78c74e50b51635e183196cd2c6 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Fri, 24 Apr 2020 16:14:23 -0700 Subject: [PATCH 103/121] drm/i915: Use proper fault mask in interrupt postinstall too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The IRQ postinstall handling had open-coded pipe fault mask selection that never got updated for gen11. Switch it to use gen8_de_pipe_fault_mask() to ensure we don't miss updates for new platforms. Cc: José Roberto de Souza Fixes: d506a65d56fd ("drm/i915: Catch GTT fault errors for gen11+ planes") Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20200424231423.4065231-1-matthew.d.roper@intel.com Reviewed-by: Ville Syrjälä --- drivers/gpu/drm/i915/i915_irq.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1502ab44f1a5..bd722d0650c8 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3358,7 +3358,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; + u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | + GEN8_PIPE_CDCLK_CRC_DONE; u32 de_pipe_enables; u32 de_port_masked = GEN8_AUX_CHANNEL_A; u32 de_port_enables; @@ -3369,13 +3370,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) de_misc_masked |= GEN8_DE_MISC_GSE; if (INTEL_GEN(dev_priv) >= 9) { - de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; if (IS_GEN9_LP(dev_priv)) de_port_masked |= BXT_DE_PORT_GMBUS; - } else { - de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; } if (INTEL_GEN(dev_priv) >= 11) From d631461d5cc3d6dd3b22bdfb810f668bc1935179 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 28 Apr 2020 09:49:20 +0100 Subject: [PATCH 104/121] drm/i915/gt: fix spelling mistake "evalution" -> "evaluation" There is a spelling mistaking in a pr_notice message. Fix it. Signed-off-by: Colin Ian King Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200428084920.1035125-1-colin.king@canonical.com --- drivers/gpu/drm/i915/gt/selftest_rps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 181b29fa5b58..6ce7003bf92f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -282,7 +282,7 @@ int live_rps_clock_interval(void *arg) GEN6_RP_CUR_UP_EI), 10)) { /* Just skip the test; assume lack of HW support */ - pr_notice("%s: rps evalution interval not ticking\n", + pr_notice("%s: rps evaluation interval not ticking\n", engine->name); err = -ENODEV; } else { From 96a4faf524fcfbb6c1f2ce91238e81062895ec09 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 Apr 2020 12:43:07 +0100 Subject: [PATCH 105/121] drm/i915/selftests: Tweak the tolerance for clock ticks to 12.5% Give a small bump for our tolerance on comparing the expected vs measured clock ticks/time from 10% to 12.5% to accommodate a bad result on Sandybridge that was off by 10.3%. Hopefully, that is the worst we will see. Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1802 Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200428114307.5153-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_rps.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 6ce7003bf92f..21338c1980de 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -312,15 +312,15 @@ int live_rps_clock_interval(void *arg) engine->name, cycles, time, ktime_to_ns(dt), expected, gt->clock_frequency / 1000); - if (10 * time < 9 * ktime_to_ns(dt) || - 10 * time > 11 * ktime_to_ns(dt)) { + if (10 * time < 8 * ktime_to_ns(dt) || + 8 * time > 10 * ktime_to_ns(dt)) { pr_err("%s: rps clock time does not match walltime!\n", engine->name); err = -EINVAL; } - if (10 * expected < 9 * cycles || - 10 * expected > 11 * cycles) { + if (10 * expected < 8 * cycles || + 8 * expected > 10 * cycles) { pr_err("%s: walltime does not match rps clock ticks!\n", engine->name); err = -EINVAL; From f9d77427c3fd27fd69ac54067f968784dc725eb6 Mon Sep 17 00:00:00 2001 From: Matt Atwood Date: Wed, 15 Apr 2020 15:35:35 -0400 Subject: [PATCH 106/121] drm/i915/tgl: Wa_14011059788 Reflect recent Bspec changes v2: fix whitespace, typo Signed-off-by: Matt Atwood Reviewed-by: Radhakrishna Sripada Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20200415193535.14597-1-matthew.s.atwood@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6f40bfee7304..bfb180fe8047 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6880,6 +6880,10 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0)) I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) | TGL_VRH_GATING_DIS); + + /* Wa_14011059788:tgl */ + intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN, + 0, DFR_DISABLE); } static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) From 2632f174a2e1a5fd40a70404fa8ccfd0b1f79ebd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 Apr 2020 19:47:49 +0100 Subject: [PATCH 107/121] drm/i915/execlists: Avoid reusing the same logical CCID The bspec is confusing on the nature of the upper 32bits of the LRC descriptor. Once upon a time, it said that it uses the upper 32b to decide if it should perform a lite-restore, and so we must ensure that each unique context submitted to HW is given a unique CCID [for the duration of it being on the HW]. Currently, this is achieved by using a small circular tag, and assigning every context submitted to HW a new id. However, this tag is being cleared on repinning an inflight context such that we end up re-using the 0 tag for multiple contexts. To avoid accidentally clearing the CCID in the upper 32bits of the LRC descriptor, split the descriptor into two dwords so we can update the GGTT address separately from the CCID. Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1796 Fixes: 2935ed5339c4 ("drm/i915: Remove logical HW ID") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: # v5.5+ Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200428184751.11257-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_context_types.h | 8 ++- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 4 +- drivers/gpu/drm/i915/gt/intel_engine_types.h | 5 ++ drivers/gpu/drm/i915/gt/intel_lrc.c | 57 ++++++++----------- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 2 +- drivers/gpu/drm/i915/gvt/scheduler.c | 4 +- drivers/gpu/drm/i915/i915_perf.c | 3 +- 7 files changed, 43 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index e0da7bdcbf01..4954b0df4864 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -69,7 +69,13 @@ struct intel_context { #define CONTEXT_NOPREEMPT 7 u32 *lrc_reg_state; - u64 lrc_desc; + union { + struct { + u32 lrca; + u32 ccid; + }; + u64 desc; + } lrc; u32 tag; /* cookie passed to HW to track this context on submission */ /* Time on GPU as tracked by the hw. */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index b1f8527f02c8..7c3cb5aedfdf 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1425,7 +1425,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, len = scnprintf(hdr, sizeof(hdr), "\t\tActive[%d]: ccid:%08x, ", (int)(port - execlists->active), - upper_32_bits(rq->context->lrc_desc)); + rq->context->lrc.ccid); len += print_ring(hdr + len, sizeof(hdr) - len, rq); scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); print_request(m, rq, hdr); @@ -1437,7 +1437,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, len = scnprintf(hdr, sizeof(hdr), "\t\tPending[%d]: ccid:%08x, ", (int)(port - execlists->pending), - upper_32_bits(rq->context->lrc_desc)); + rq->context->lrc.ccid); len += print_ring(hdr + len, sizeof(hdr) - len, rq); scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); print_request(m, rq, hdr); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index bf395227c99f..470bdc73220a 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -156,6 +156,11 @@ struct intel_engine_execlists { */ struct i915_priolist default_priolist; + /** + * @ccid: identifier for contexts submitted to this engine + */ + u32 ccid; + /** * @yield: CCID at the time of the last semaphore-wait interrupt. * diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 93a1b73ad96b..7d56207276d5 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -573,10 +573,10 @@ assert_priority_queue(const struct i915_request *prev, * engine info, SW context ID and SW counter need to form a unique number * (Context ID) per lrc. */ -static u64 +static u32 lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) { - u64 desc; + u32 desc; desc = INTEL_LEGACY_32B_CONTEXT; if (i915_vm_is_4lvl(ce->vm)) @@ -587,21 +587,7 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) if (IS_GEN(engine->i915, 8)) desc |= GEN8_CTX_L3LLC_COHERENT; - desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */ - /* - * The following 32bits are copied into the OA reports (dword 2). - * Consider updating oa_get_render_ctx_id in i915_perf.c when changing - * anything below. - */ - if (INTEL_GEN(engine->i915) >= 11) { - desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT; - /* bits 48-53 */ - - desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT; - /* bits 61-63 */ - } - - return desc; + return i915_ggtt_offset(ce->state) | desc; } static inline unsigned int dword_in_page(void *addr) @@ -1353,7 +1339,7 @@ static void reset_active(struct i915_request *rq, __execlists_update_reg_state(ce, engine, head); /* We've switched away, so this should be a no-op, but intent matters */ - ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; } static void st_update_runtime_underflow(struct intel_context *ce, s32 dt) @@ -1401,18 +1387,19 @@ __execlists_schedule_in(struct i915_request *rq) if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) execlists_check_context(ce, engine); - ce->lrc_desc &= ~GENMASK_ULL(47, 37); if (ce->tag) { /* Use a fixed tag for OA and friends */ - ce->lrc_desc |= (u64)ce->tag << 32; + ce->lrc.ccid = ce->tag; } else { /* We don't need a strict matching tag, just different values */ - ce->lrc_desc |= - (u64)(++engine->context_tag % NUM_CONTEXT_TAG) << - GEN11_SW_CTX_ID_SHIFT; + ce->lrc.ccid = + (++engine->context_tag % NUM_CONTEXT_TAG) << + (GEN11_SW_CTX_ID_SHIFT - 32); BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID); } + ce->lrc.ccid |= engine->execlists.ccid; + __intel_gt_pm_get(engine->gt); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); intel_engine_context_in(engine); @@ -1511,7 +1498,7 @@ execlists_schedule_out(struct i915_request *rq) static u64 execlists_update_context(struct i915_request *rq) { struct intel_context *ce = rq->context; - u64 desc = ce->lrc_desc; + u64 desc = ce->lrc.desc; u32 tail, prev; /* @@ -1550,7 +1537,7 @@ static u64 execlists_update_context(struct i915_request *rq) */ wmb(); - ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; + ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE; return desc; } @@ -1571,8 +1558,9 @@ dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq) if (!rq) return ""; - snprintf(buf, buflen, "%s%llx:%lld%s prio %d", + snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d", prefix, + rq->context->lrc.ccid, rq->fence.context, rq->fence.seqno, i915_request_completed(rq) ? "!" : i915_request_started(rq) ? "*" : @@ -1948,7 +1936,7 @@ timeslice_yield(const struct intel_engine_execlists *el, * safe, yield if it might be stuck -- it will be given a fresh * timeslice in the near future. */ - return upper_32_bits(rq->context->lrc_desc) == READ_ONCE(el->yield); + return rq->context->lrc.ccid == READ_ONCE(el->yield); } static bool @@ -2975,7 +2963,7 @@ active_context(struct intel_engine_cs *engine, u32 ccid) */ for (port = el->active; (rq = *port); port++) { - if (upper_32_bits(rq->context->lrc_desc) == ccid) { + if (rq->context->lrc.ccid == ccid) { ENGINE_TRACE(engine, "ccid found at active:%zd\n", port - el->active); @@ -2984,7 +2972,7 @@ active_context(struct intel_engine_cs *engine, u32 ccid) } for (port = el->pending; (rq = *port); port++) { - if (upper_32_bits(rq->context->lrc_desc) == ccid) { + if (rq->context->lrc.ccid == ccid) { ENGINE_TRACE(engine, "ccid found at pending:%zd\n", port - el->pending); @@ -3444,7 +3432,7 @@ __execlists_context_pin(struct intel_context *ce, if (IS_ERR(vaddr)) return PTR_ERR(vaddr); - ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; + ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET; __execlists_update_reg_state(ce, engine, ce->ring->tail); @@ -3473,7 +3461,7 @@ static void execlists_context_reset(struct intel_context *ce) ce, ce->engine, ce->ring, true); __execlists_update_reg_state(ce, ce->engine, ce->ring->tail); - ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; } static const struct intel_context_ops execlists_context_ops = { @@ -4184,7 +4172,7 @@ out_replay: head, ce->ring->tail); __execlists_reset_reg_state(ce, engine); __execlists_update_reg_state(ce, engine, head); - ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ + ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ unwind: /* Push back any incomplete requests for replay after the reset. */ @@ -4950,6 +4938,11 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) else execlists->csb_size = GEN11_CSB_ENTRIES; + if (INTEL_GEN(engine->i915) >= 11) { + execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32); + execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32); + } + /* Finally, take ownership and responsibility for cleanup! */ engine->sanitize = execlists_sanitize; engine->release = execlists_release; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index fe7778c28d2d..aa6d56e25a10 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -217,7 +217,7 @@ static void guc_wq_item_append(struct intel_guc *guc, static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; - u32 ctx_desc = lower_32_bits(rq->context->lrc_desc); + u32 ctx_desc = rq->context->lrc.ccid; u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); guc_wq_item_append(guc, engine->guc_id, ctx_desc, diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 2f5c59111821..38234073e0fc 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -290,7 +290,7 @@ static void shadow_context_descriptor_update(struct intel_context *ce, struct intel_vgpu_workload *workload) { - u64 desc = ce->lrc_desc; + u64 desc = ce->lrc.desc; /* * Update bits 0-11 of the context descriptor which includes flags @@ -300,7 +300,7 @@ shadow_context_descriptor_update(struct intel_context *ce, desc |= (u64)workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; - ce->lrc_desc = desc; + ce->lrc.desc = desc; } static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index dec1b33e4da8..04ad21960688 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1263,8 +1263,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) * dropped by GuC. They won't be part of the context * ID in the OA reports, so squash those lower bits. */ - stream->specific_ctx_id = - lower_32_bits(ce->lrc_desc) >> 12; + stream->specific_ctx_id = ce->lrc.lrca >> 12; /* * GuC uses the top bit to signal proxy submission, so From 5c4a53e3b1cbc38d0906e382f1037290658759bb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 Apr 2020 19:47:50 +0100 Subject: [PATCH 108/121] drm/i915/execlists: Track inflight CCID The presumption is that by using a circular counter that is twice as large as the maximum ELSP submission, we would never reuse the same CCID for two inflight contexts. However, if we continually preempt an active context such that it always remains inflight, it can be resubmitted with an arbitrary number of paired contexts. As each of its paired contexts will use a new CCID, eventually it will wrap and submit two ELSP with the same CCID. Rather than use a simple circular counter, switch over to a small bitmap of inflight ids so we can avoid reusing one that is still potentially active. Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1796 Fixes: 2935ed5339c4 ("drm/i915: Remove logical HW ID") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: # v5.5+ Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200428184751.11257-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_types.h | 3 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 29 +++++++++++++++----- drivers/gpu/drm/i915/i915_perf.c | 3 +- drivers/gpu/drm/i915/selftests/i915_vma.c | 2 +- 4 files changed, 25 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 470bdc73220a..cfe4feaee982 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -309,8 +309,7 @@ struct intel_engine_cs { u32 context_size; u32 mmio_base; - unsigned int context_tag; -#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS) + unsigned long context_tag; struct rb_node uabi_node; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7d56207276d5..05e2bb483db3 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1389,13 +1389,17 @@ __execlists_schedule_in(struct i915_request *rq) if (ce->tag) { /* Use a fixed tag for OA and friends */ + GEM_BUG_ON(ce->tag <= BITS_PER_LONG); ce->lrc.ccid = ce->tag; } else { /* We don't need a strict matching tag, just different values */ - ce->lrc.ccid = - (++engine->context_tag % NUM_CONTEXT_TAG) << - (GEN11_SW_CTX_ID_SHIFT - 32); - BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID); + unsigned int tag = ffs(engine->context_tag); + + GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG); + clear_bit(tag - 1, &engine->context_tag); + ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32); + + BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID); } ce->lrc.ccid |= engine->execlists.ccid; @@ -1439,7 +1443,8 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce) static inline void __execlists_schedule_out(struct i915_request *rq, - struct intel_engine_cs * const engine) + struct intel_engine_cs * const engine, + unsigned int ccid) { struct intel_context * const ce = rq->context; @@ -1457,6 +1462,14 @@ __execlists_schedule_out(struct i915_request *rq, i915_request_completed(rq)) intel_engine_add_retire(engine, ce->timeline); + ccid >>= GEN11_SW_CTX_ID_SHIFT - 32; + ccid &= GEN12_MAX_CONTEXT_HW_ID; + if (ccid < BITS_PER_LONG) { + GEM_BUG_ON(ccid == 0); + GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag)); + set_bit(ccid - 1, &engine->context_tag); + } + intel_context_update_runtime(ce); intel_engine_context_out(engine); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); @@ -1482,15 +1495,17 @@ execlists_schedule_out(struct i915_request *rq) { struct intel_context * const ce = rq->context; struct intel_engine_cs *cur, *old; + u32 ccid; trace_i915_request_out(rq); + ccid = rq->context->lrc.ccid; old = READ_ONCE(ce->inflight); do cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL; while (!try_cmpxchg(&ce->inflight, &old, cur)); if (!cur) - __execlists_schedule_out(rq, old); + __execlists_schedule_out(rq, old, ccid); i915_request_put(rq); } @@ -3990,7 +4005,7 @@ static void enable_execlists(struct intel_engine_cs *engine) enable_error_interrupt(engine); - engine->context_tag = 0; + engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0); } static bool unexpected_starting_state(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 04ad21960688..c533f569dd42 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1280,11 +1280,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); /* * Pick an unused context id - * 0 - (NUM_CONTEXT_TAG - 1) are used by other contexts + * 0 - BITS_PER_LONG are used by other contexts * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context */ stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); - BUILD_BUG_ON((GEN12_MAX_CONTEXT_HW_ID - 1) < NUM_CONTEXT_TAG); break; } diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 58b5f40a07dd..af89c7fc8f59 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -173,7 +173,7 @@ static int igt_vma_create(void *arg) } nc = 0; - for_each_prime_number(num_ctx, 2 * NUM_CONTEXT_TAG) { + for_each_prime_number(num_ctx, 2 * BITS_PER_LONG) { for (; nc < num_ctx; nc++) { ctx = mock_context(i915, "mock"); if (!ctx) From f6a7c21c991062da9ac93d547f07a5368c8d8d57 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 Apr 2020 19:47:51 +0100 Subject: [PATCH 109/121] drm/i915/execlists: Verify we don't submit two identical CCIDs Check that we do not submit two contexts into ELSP with the same CCID [upper portion of the descriptor]. References: https://gitlab.freedesktop.org/drm/intel/-/issues/1793 Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200428184751.11257-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 37 ++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 05e2bb483db3..90acc3d2440b 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1612,9 +1612,12 @@ static __maybe_unused bool assert_pending_valid(const struct intel_engine_execlists *execlists, const char *msg) { + struct intel_engine_cs *engine = + container_of(execlists, typeof(*engine), execlists); struct i915_request * const *port, *rq; struct intel_context *ce = NULL; bool sentinel = false; + u32 ccid = -1; trace_ports(execlists, msg, execlists->pending); @@ -1623,13 +1626,14 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, return true; if (!execlists->pending[0]) { - GEM_TRACE_ERR("Nothing pending for promotion!\n"); + GEM_TRACE_ERR("%s: Nothing pending for promotion!\n", + engine->name); return false; } if (execlists->pending[execlists_num_ports(execlists)]) { - GEM_TRACE_ERR("Excess pending[%d] for promotion!\n", - execlists_num_ports(execlists)); + GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n", + engine->name, execlists_num_ports(execlists)); return false; } @@ -1641,20 +1645,31 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, GEM_BUG_ON(!i915_request_is_active(rq)); if (ce == rq->context) { - GEM_TRACE_ERR("Dup context:%llx in pending[%zd]\n", + GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n", + engine->name, ce->timeline->fence_context, port - execlists->pending); return false; } ce = rq->context; + if (ccid == ce->lrc.ccid) { + GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n", + engine->name, + ccid, ce->timeline->fence_context, + port - execlists->pending); + return false; + } + ccid = ce->lrc.ccid; + /* * Sentinels are supposed to be lonely so they flush the * current exection off the HW. Check that they are the * only request in the pending submission. */ if (sentinel) { - GEM_TRACE_ERR("context:%llx after sentinel in pending[%zd]\n", + GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n", + engine->name, ce->timeline->fence_context, port - execlists->pending); return false; @@ -1662,7 +1677,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, sentinel = i915_request_has_sentinel(rq); if (sentinel && port != execlists->pending) { - GEM_TRACE_ERR("sentinel context:%llx not in prime position[%zd]\n", + GEM_TRACE_ERR("%s: sentinel context:%llx not in prime position[%zd]\n", + engine->name, ce->timeline->fence_context, port - execlists->pending); return false; @@ -1677,7 +1693,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, if (i915_active_is_idle(&ce->active) && !intel_context_is_barrier(ce)) { - GEM_TRACE_ERR("Inactive context:%llx in pending[%zd]\n", + GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n", + engine->name, ce->timeline->fence_context, port - execlists->pending); ok = false; @@ -1685,7 +1702,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, } if (!i915_vma_is_pinned(ce->state)) { - GEM_TRACE_ERR("Unpinned context:%llx in pending[%zd]\n", + GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n", + engine->name, ce->timeline->fence_context, port - execlists->pending); ok = false; @@ -1693,7 +1711,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, } if (!i915_vma_is_pinned(ce->ring->vma)) { - GEM_TRACE_ERR("Unpinned ring:%llx in pending[%zd]\n", + GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n", + engine->name, ce->timeline->fence_context, port - execlists->pending); ok = false; From 2ea4a7ba9bf6e3bc1af899d1af481eb9b4ae8707 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 28 Apr 2020 20:00:52 -0700 Subject: [PATCH 110/121] drm/i915/gt: Avoid uninitialized use of rpcurupei in frequency_show When building with clang + -Wuninitialized: drivers/gpu/drm/i915/gt/debugfs_gt_pm.c:407:7: warning: variable 'rpcurupei' is uninitialized when used here [-Wuninitialized] rpcurupei, ^~~~~~~~~ drivers/gpu/drm/i915/gt/debugfs_gt_pm.c:304:16: note: initialize the variable 'rpcurupei' to silence this warning u32 rpcurupei, rpcurup, rpprevup; ^ = 0 1 warning generated. rpupei is assigned twice; based on the second argument to intel_uncore_read, it seems this one should have been assigned to rpcurupei. Fixes: 9c878557b1eb ("drm/i915/gt: Use the RPM config register to determine clk frequencies") Link: https://github.com/ClangBuiltLinux/linux/issues/1016 Signed-off-by: Nathan Chancellor Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200429030051.920203-1-natechancellor@gmail.com --- drivers/gpu/drm/i915/gt/debugfs_gt_pm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c index 3d3ef62ed89f..f6ba66206273 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c @@ -336,7 +336,7 @@ static int frequency_show(struct seq_file *m, void *unused) rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1); - rpupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; + rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; From 24aac336ff78eb55aedda043ed982c61dc3ac49a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 Apr 2020 10:02:55 +0100 Subject: [PATCH 111/121] drm/i915: Avoid dereferencing a dead context Once the intel_context is closed, the GEM context may be freed and so the link from intel_context.gem_context is invalid. <3>[ 219.782944] BUG: KASAN: use-after-free in intel_engine_coredump_alloc+0x1bc3/0x2250 [i915] <3>[ 219.782996] Read of size 8 at addr ffff8881d7dff0b8 by task kworker/0:1/12 <4>[ 219.783052] CPU: 0 PID: 12 Comm: kworker/0:1 Tainted: G U 5.7.0-rc2-g1f3ffd7683d54-kasan_118+ #1 <4>[ 219.783055] Hardware name: System manufacturer System Product Name/Z170 PRO GAMING, BIOS 3402 04/26/2017 <4>[ 219.783105] Workqueue: events heartbeat [i915] <4>[ 219.783109] Call Trace: <4>[ 219.783113] <4>[ 219.783119] dump_stack+0x96/0xdb <4>[ 219.783177] ? intel_engine_coredump_alloc+0x1bc3/0x2250 [i915] <4>[ 219.783182] print_address_description.constprop.6+0x16/0x310 <4>[ 219.783239] ? intel_engine_coredump_alloc+0x1bc3/0x2250 [i915] <4>[ 219.783295] ? intel_engine_coredump_alloc+0x1bc3/0x2250 [i915] <4>[ 219.783300] __kasan_report+0x137/0x190 <4>[ 219.783359] ? intel_engine_coredump_alloc+0x1bc3/0x2250 [i915] <4>[ 219.783366] kasan_report+0x32/0x50 <4>[ 219.783426] intel_engine_coredump_alloc+0x1bc3/0x2250 [i915] <4>[ 219.783481] execlists_reset+0x39c/0x13d0 [i915] <4>[ 219.783494] ? mark_held_locks+0x9e/0xe0 <4>[ 219.783546] ? execlists_hold+0xfc0/0xfc0 [i915] <4>[ 219.783551] ? lockdep_hardirqs_on+0x348/0x5f0 <4>[ 219.783557] ? _raw_spin_unlock_irqrestore+0x34/0x60 <4>[ 219.783606] ? execlists_submission_tasklet+0x118/0x3a0 [i915] <4>[ 219.783615] tasklet_action_common.isra.14+0x13b/0x410 <4>[ 219.783623] ? __do_softirq+0x1e4/0x9a7 <4>[ 219.783630] __do_softirq+0x226/0x9a7 <4>[ 219.783643] do_softirq_own_stack+0x2a/0x40 <4>[ 219.783647] <4>[ 219.783692] ? heartbeat+0x3e2/0x10f0 [i915] <4>[ 219.783696] do_softirq.part.13+0x49/0x50 <4>[ 219.783700] __local_bh_enable_ip+0x1a2/0x1e0 <4>[ 219.783748] heartbeat+0x409/0x10f0 [i915] <4>[ 219.783801] ? __live_idle_pulse+0x9f0/0x9f0 [i915] <4>[ 219.783806] ? lock_acquire+0x1ac/0x8a0 <4>[ 219.783811] ? process_one_work+0x811/0x1870 <4>[ 219.783827] ? rcu_read_lock_sched_held+0x9c/0xd0 <4>[ 219.783832] ? rcu_read_lock_bh_held+0xb0/0xb0 <4>[ 219.783836] ? _raw_spin_unlock_irq+0x1f/0x40 <4>[ 219.783845] process_one_work+0x8ca/0x1870 <4>[ 219.783848] ? lock_acquire+0x1ac/0x8a0 <4>[ 219.783852] ? worker_thread+0x1d0/0xb80 <4>[ 219.783864] ? pwq_dec_nr_in_flight+0x2c0/0x2c0 <4>[ 219.783870] ? do_raw_spin_lock+0x129/0x290 <4>[ 219.783886] worker_thread+0x82/0xb80 <4>[ 219.783895] ? __kthread_parkme+0xaf/0x1b0 <4>[ 219.783902] ? process_one_work+0x1870/0x1870 <4>[ 219.783906] kthread+0x34e/0x420 <4>[ 219.783911] ? kthread_create_on_node+0xc0/0xc0 <4>[ 219.783918] ret_from_fork+0x3a/0x50 <3>[ 219.783950] Allocated by task 1264: <4>[ 219.783975] save_stack+0x19/0x40 <4>[ 219.783978] __kasan_kmalloc.constprop.3+0xa0/0xd0 <4>[ 219.784029] i915_gem_create_context+0xa2/0xab8 [i915] <4>[ 219.784081] i915_gem_context_create_ioctl+0x1fa/0x450 [i915] <4>[ 219.784085] drm_ioctl_kernel+0x1d8/0x270 <4>[ 219.784088] drm_ioctl+0x676/0x930 <4>[ 219.784092] ksys_ioctl+0xb7/0xe0 <4>[ 219.784096] __x64_sys_ioctl+0x6a/0xb0 <4>[ 219.784100] do_syscall_64+0x94/0x530 <4>[ 219.784103] entry_SYSCALL_64_after_hwframe+0x49/0xb3 <3>[ 219.784120] Freed by task 12: <4>[ 219.784141] save_stack+0x19/0x40 <4>[ 219.784145] __kasan_slab_free+0x130/0x180 <4>[ 219.784148] kmem_cache_free_bulk+0x1bd/0x500 <4>[ 219.784152] kfree_rcu_work+0x1d8/0x890 <4>[ 219.784155] process_one_work+0x8ca/0x1870 <4>[ 219.784158] worker_thread+0x82/0xb80 <4>[ 219.784162] kthread+0x34e/0x420 <4>[ 219.784165] ret_from_fork+0x3a/0x50 Fixes: 2e46a2a0b014 ("drm/i915: Use explicit flag to mark unreachable intel_context") Signed-off-by: Chris Wilson Acked-by: Akeem G Abodunrin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200428090255.10035-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gpu_error.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 4d54dba35302..a976cd67b3b3 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1207,8 +1207,6 @@ static void engine_record_registers(struct intel_engine_coredump *ee) static void record_request(const struct i915_request *request, struct i915_request_coredump *erq) { - const struct i915_gem_context *ctx; - erq->flags = request->fence.flags; erq->context = request->fence.context; erq->seqno = request->fence.seqno; @@ -1218,9 +1216,13 @@ static void record_request(const struct i915_request *request, erq->pid = 0; rcu_read_lock(); - ctx = rcu_dereference(request->context->gem_context); - if (ctx) - erq->pid = pid_nr(ctx->pid); + if (!intel_context_is_closed(request->context)) { + const struct i915_gem_context *ctx; + + ctx = rcu_dereference(request->context->gem_context); + if (ctx) + erq->pid = pid_nr(ctx->pid); + } rcu_read_unlock(); } From 8c35a195761107f314fce8e558699322027c3017 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 29 Apr 2020 16:24:25 +0300 Subject: [PATCH 112/121] drm/i915/selftests: fix error handling in __live_lrc_indirect_ctx_bb() If intel_context_create() fails then it leads to an error pointer dereference. I shuffled things around to make error handling easier. Fixes: 1dd47b54baea ("drm/i915: Add live selftests for indirect ctx batchbuffers") Signed-off-by: Dan Carpenter Reviewed-by: Chris Wilson Reviewed-by: Andi Shyti Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200429132425.GE815283@mwanda --- drivers/gpu/drm/i915/gt/selftest_lrc.c | 32 +++++++++++++++----------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index d3fa91aed7de..c4bfad5c49de 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -5795,26 +5795,29 @@ static int indirect_ctx_bb_check(struct intel_context *ce) static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine) { struct intel_context *a, *b; - int err = 0; + int err; a = intel_context_create(engine); - b = intel_context_create(engine); - + if (IS_ERR(a)) + return PTR_ERR(a); err = intel_context_pin(a); if (err) - return err; + goto put_a; - err = intel_context_pin(b); - if (err) { - intel_context_put(a); - return err; + b = intel_context_create(engine); + if (IS_ERR(b)) { + err = PTR_ERR(b); + goto unpin_a; } + err = intel_context_pin(b); + if (err) + goto put_b; /* We use the already reserved extra page in context state */ if (!a->wa_bb_page) { GEM_BUG_ON(b->wa_bb_page); GEM_BUG_ON(INTEL_GEN(engine->i915) == 12); - goto out; + goto unpin_b; } /* @@ -5829,14 +5832,17 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine) err = indirect_ctx_bb_check(a); if (err) - goto out; + goto unpin_b; err = indirect_ctx_bb_check(b); -out: - intel_context_unpin(b); - intel_context_put(b); +unpin_b: + intel_context_unpin(b); +put_b: + intel_context_put(b); +unpin_a: intel_context_unpin(a); +put_a: intel_context_put(a); return err; From be1cb55a07bfc528d826962f1e421a44ed5f8311 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Apr 2020 18:24:29 +0100 Subject: [PATCH 113/121] drm/i915/gt: Keep a no-frills swappable copy of the default context state We need to keep the default context state around to instantiate new contexts (aka golden rendercontext), and we also keep it pinned while the engine is active so that we can quickly reset a hanging context. However, the default contexts are large enough to merit keeping in swappable memory as opposed to kernel memory, so we store them inside shmemfs. Currently, we use the normal GEM objects to create the default context image, but we can throw away all but the shmemfs file. This greatly simplifies the tricky power management code which wants to run underneath the normal GT locking, and we definitely do not want to use any high level objects that may appear to recurse back into the GT. Though perhaps the primary advantage of the complex GEM object is that we aggressively cache the mapping, but here we are recreating the vm_area everytime time we unpark. At the worst, we add a lightweight cache, but first find a microbenchmark that is impacted. Having started to create some utility functions to make working with shmemfs objects easier, we can start putting them to wider use, where GEM objects are overkill, such as storing persistent error state. Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Ramalingam C Cc: Tvrtko Ursulin Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20200429172429.6054-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 10 +- drivers/gpu/drm/i915/gt/intel_engine_types.h | 2 +- drivers/gpu/drm/i915/gt/intel_gt.c | 60 +----- drivers/gpu/drm/i915/gt/intel_lrc.c | 25 +-- .../gpu/drm/i915/gt/intel_ring_submission.c | 16 +- drivers/gpu/drm/i915/gt/selftest_context.c | 2 +- drivers/gpu/drm/i915/gt/selftest_lrc.c | 10 +- drivers/gpu/drm/i915/gt/shmem_utils.c | 173 ++++++++++++++++++ drivers/gpu/drm/i915/gt/shmem_utils.h | 23 +++ drivers/gpu/drm/i915/gt/st_shmem_utils.c | 63 +++++++ drivers/gpu/drm/i915/i915_gpu_error.c | 26 --- .../drm/i915/selftests/i915_mock_selftests.h | 1 + 14 files changed, 291 insertions(+), 123 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/shmem_utils.c create mode 100644 drivers/gpu/drm/i915/gt/shmem_utils.h create mode 100644 drivers/gpu/drm/i915/gt/st_shmem_utils.c diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 21bb2fb5a6b8..caf00d92ea9d 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -111,6 +111,7 @@ gt-y += \ gt/intel_sseu.o \ gt/intel_timeline.o \ gt/intel_workarounds.o \ + gt/shmem_utils.o \ gt/sysfs_engines.o # autogenerated null render state gt-y += \ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 7c3cb5aedfdf..80bf71636c0f 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -834,7 +834,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) intel_engine_cleanup_cmd_parser(engine); if (engine->default_state) - i915_gem_object_put(engine->default_state); + fput(engine->default_state); if (engine->kernel_context) { intel_context_unpin(engine->kernel_context); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 3be679741d22..446e35ac0224 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -15,6 +15,7 @@ #include "intel_gt_pm.h" #include "intel_rc6.h" #include "intel_ring.h" +#include "shmem_utils.h" static int __engine_unpark(struct intel_wakeref *wf) { @@ -30,10 +31,8 @@ static int __engine_unpark(struct intel_wakeref *wf) /* Pin the default state for fast resets from atomic context. */ map = NULL; if (engine->default_state) - map = i915_gem_object_pin_map(engine->default_state, - I915_MAP_WB); - if (!IS_ERR_OR_NULL(map)) - engine->pinned_default_state = map; + map = shmem_pin_map(engine->default_state); + engine->pinned_default_state = map; /* Discard stale context state from across idling */ ce = engine->kernel_context; @@ -264,7 +263,8 @@ static int __engine_park(struct intel_wakeref *wf) engine->park(engine); if (engine->pinned_default_state) { - i915_gem_object_unpin_map(engine->default_state); + shmem_unpin_map(engine->default_state, + engine->pinned_default_state); engine->pinned_default_state = NULL; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index cfe4feaee982..483d8ff39a0d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -339,7 +339,7 @@ struct intel_engine_cs { unsigned long wakeref_serial; struct intel_wakeref wakeref; - struct drm_i915_gem_object *default_state; + struct file *default_state; void *pinned_default_state; struct { diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index d9cf8194c997..52593edf8aa0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -16,6 +16,7 @@ #include "intel_rps.h" #include "intel_uncore.h" #include "intel_pm.h" +#include "shmem_utils.h" void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) { @@ -371,18 +372,6 @@ static struct i915_address_space *kernel_vm(struct intel_gt *gt) return i915_vm_get(>->ggtt->vm); } -static int __intel_context_flush_retire(struct intel_context *ce) -{ - struct intel_timeline *tl; - - tl = intel_context_timeline_lock(ce); - if (IS_ERR(tl)) - return PTR_ERR(tl); - - intel_context_timeline_unlock(tl); - return 0; -} - static int __engines_record_defaults(struct intel_gt *gt) { struct i915_request *requests[I915_NUM_ENGINES] = {}; @@ -448,8 +437,7 @@ err_rq: for (id = 0; id < ARRAY_SIZE(requests); id++) { struct i915_request *rq; - struct i915_vma *state; - void *vaddr; + struct file *state; rq = requests[id]; if (!rq) @@ -461,48 +449,16 @@ err_rq: } GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags)); - state = rq->context->state; - if (!state) + if (!rq->context->state) continue; - /* Serialise with retirement on another CPU */ - GEM_BUG_ON(!i915_request_completed(rq)); - err = __intel_context_flush_retire(rq->context); - if (err) - goto out; - - /* We want to be able to unbind the state from the GGTT */ - GEM_BUG_ON(intel_context_is_pinned(rq->context)); - - /* - * As we will hold a reference to the logical state, it will - * not be torn down with the context, and importantly the - * object will hold onto its vma (making it possible for a - * stray GTT write to corrupt our defaults). Unmap the vma - * from the GTT to prevent such accidents and reclaim the - * space. - */ - err = i915_vma_unbind(state); - if (err) - goto out; - - i915_gem_object_lock(state->obj); - err = i915_gem_object_set_to_cpu_domain(state->obj, false); - i915_gem_object_unlock(state->obj); - if (err) - goto out; - - i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC); - - /* Check we can acquire the image of the context state */ - vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); + /* Keep a copy of the state's backing pages; free the obj */ + state = shmem_create_from_object(rq->context->state->obj); + if (IS_ERR(state)) { + err = PTR_ERR(state); goto out; } - - rq->engine->default_state = i915_gem_object_get(state->obj); - i915_gem_object_unpin_map(state->obj); + rq->engine->default_state = state; } out: diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 90acc3d2440b..7fc4081c34fe 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -147,6 +147,7 @@ #include "intel_reset.h" #include "intel_ring.h" #include "intel_workarounds.h" +#include "shmem_utils.h" #define RING_EXECLIST_QFULL (1 << 0x2) #define RING_EXECLIST1_VALID (1 << 0x3) @@ -5083,30 +5084,18 @@ populate_lr_context(struct intel_context *ce, { bool inhibit = true; void *vaddr; - int ret; vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - drm_dbg(&engine->i915->drm, - "Could not map object pages! (%d)\n", ret); - return ret; + drm_dbg(&engine->i915->drm, "Could not map object pages!\n"); + return PTR_ERR(vaddr); } set_redzone(vaddr, engine); if (engine->default_state) { - void *defaults; - - defaults = i915_gem_object_pin_map(engine->default_state, - I915_MAP_WB); - if (IS_ERR(defaults)) { - ret = PTR_ERR(defaults); - goto err_unpin_ctx; - } - - memcpy(vaddr, defaults, engine->context_size); - i915_gem_object_unpin_map(engine->default_state); + shmem_read(engine->default_state, 0, + vaddr, engine->context_size); __set_bit(CONTEXT_VALID_BIT, &ce->flags); inhibit = false; } @@ -5121,11 +5110,9 @@ populate_lr_context(struct intel_context *ce, execlists_init_reg_state(vaddr + LRC_STATE_OFFSET, ce, engine, ring, inhibit); - ret = 0; -err_unpin_ctx: __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size); i915_gem_object_unpin_map(ctx_obj); - return ret; + return 0; } static int __execlists_context_alloc(struct intel_context *ce, diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index d015f7b8b28e..ca7286e58409 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -42,6 +42,7 @@ #include "intel_reset.h" #include "intel_ring.h" #include "intel_workarounds.h" +#include "shmem_utils.h" /* Rough estimate of the typical request size, performing a flush, * set-context and then emitting the batch. @@ -1241,7 +1242,7 @@ alloc_context_vma(struct intel_engine_cs *engine) i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); if (engine->default_state) { - void *defaults, *vaddr; + void *vaddr; vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(vaddr)) { @@ -1249,15 +1250,8 @@ alloc_context_vma(struct intel_engine_cs *engine) goto err_obj; } - defaults = i915_gem_object_pin_map(engine->default_state, - I915_MAP_WB); - if (IS_ERR(defaults)) { - err = PTR_ERR(defaults); - goto err_map; - } - - memcpy(vaddr, defaults, engine->context_size); - i915_gem_object_unpin_map(engine->default_state); + shmem_read(engine->default_state, 0, + vaddr, engine->context_size); i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); @@ -1271,8 +1265,6 @@ alloc_context_vma(struct intel_engine_cs *engine) return vma; -err_map: - i915_gem_object_unpin_map(obj); err_obj: i915_gem_object_put(obj); return ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c index e874dfaa5316..b8ed3cbe1277 100644 --- a/drivers/gpu/drm/i915/gt/selftest_context.c +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -155,7 +155,7 @@ static int live_context_size(void *arg) for_each_engine(engine, gt, id) { struct { - struct drm_i915_gem_object *state; + struct file *state; void *pinned; } saved; diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index c4bfad5c49de..7529df92f6a2 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -4452,8 +4452,7 @@ static int live_lrc_layout(void *arg) if (!engine->default_state) continue; - hw = i915_gem_object_pin_map(engine->default_state, - I915_MAP_WB); + hw = shmem_pin_map(engine->default_state); if (IS_ERR(hw)) { err = PTR_ERR(hw); break; @@ -4525,7 +4524,7 @@ static int live_lrc_layout(void *arg) hexdump(lrc, PAGE_SIZE); } - i915_gem_object_unpin_map(engine->default_state); + shmem_unpin_map(engine->default_state, hw); if (err) break; } @@ -4630,8 +4629,7 @@ static int live_lrc_fixed(void *arg) if (!engine->default_state) continue; - hw = i915_gem_object_pin_map(engine->default_state, - I915_MAP_WB); + hw = shmem_pin_map(engine->default_state); if (IS_ERR(hw)) { err = PTR_ERR(hw); break; @@ -4652,7 +4650,7 @@ static int live_lrc_fixed(void *arg) } } - i915_gem_object_unpin_map(engine->default_state); + shmem_unpin_map(engine->default_state, hw); } return err; diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c new file mode 100644 index 000000000000..43c7acbdc79d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/shmem_utils.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include +#include +#include + +#include "gem/i915_gem_object.h" +#include "shmem_utils.h" + +struct file *shmem_create_from_data(const char *name, void *data, size_t len) +{ + struct file *file; + int err; + + file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE); + if (IS_ERR(file)) + return file; + + err = shmem_write(file, 0, data, len); + if (err) { + fput(file); + return ERR_PTR(err); + } + + return file; +} + +struct file *shmem_create_from_object(struct drm_i915_gem_object *obj) +{ + struct file *file; + void *ptr; + + if (obj->ops == &i915_gem_shmem_ops) { + file = obj->base.filp; + atomic_long_inc(&file->f_count); + return file; + } + + ptr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(ptr)) + return ERR_CAST(ptr); + + file = shmem_create_from_data("", ptr, obj->base.size); + i915_gem_object_unpin_map(obj); + + return file; +} + +static size_t shmem_npte(struct file *file) +{ + return file->f_mapping->host->i_size >> PAGE_SHIFT; +} + +static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte) +{ + unsigned long pfn; + + vunmap(ptr); + + for (pfn = 0; pfn < n_pte; pfn++) { + struct page *page; + + page = shmem_read_mapping_page_gfp(file->f_mapping, pfn, + GFP_KERNEL); + if (!WARN_ON(IS_ERR(page))) { + put_page(page); + put_page(page); + } + } +} + +void *shmem_pin_map(struct file *file) +{ + const size_t n_pte = shmem_npte(file); + pte_t *stack[32], **ptes, **mem; + struct vm_struct *area; + unsigned long pfn; + + mem = stack; + if (n_pte > ARRAY_SIZE(stack)) { + mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL); + if (!mem) + return NULL; + } + + area = alloc_vm_area(n_pte << PAGE_SHIFT, mem); + if (!area) { + if (mem != stack) + kvfree(mem); + return NULL; + } + + ptes = mem; + for (pfn = 0; pfn < n_pte; pfn++) { + struct page *page; + + page = shmem_read_mapping_page_gfp(file->f_mapping, pfn, + GFP_KERNEL); + if (IS_ERR(page)) + goto err_page; + + **ptes++ = mk_pte(page, PAGE_KERNEL); + } + + if (mem != stack) + kvfree(mem); + + mapping_set_unevictable(file->f_mapping); + return area->addr; + +err_page: + if (mem != stack) + kvfree(mem); + + __shmem_unpin_map(file, area->addr, pfn); + return NULL; +} + +void shmem_unpin_map(struct file *file, void *ptr) +{ + mapping_clear_unevictable(file->f_mapping); + __shmem_unpin_map(file, ptr, shmem_npte(file)); +} + +static int __shmem_rw(struct file *file, loff_t off, + void *ptr, size_t len, + bool write) +{ + unsigned long pfn; + + for (pfn = off >> PAGE_SHIFT; len; pfn++) { + unsigned int this = + min_t(size_t, PAGE_SIZE - offset_in_page(off), len); + struct page *page; + void *vaddr; + + page = shmem_read_mapping_page_gfp(file->f_mapping, pfn, + GFP_KERNEL); + if (IS_ERR(page)) + return PTR_ERR(page); + + vaddr = kmap(page); + if (write) + memcpy(vaddr + offset_in_page(off), ptr, this); + else + memcpy(ptr, vaddr + offset_in_page(off), this); + kunmap(page); + put_page(page); + + len -= this; + ptr += this; + off = 0; + } + + return 0; +} + +int shmem_read(struct file *file, loff_t off, void *dst, size_t len) +{ + return __shmem_rw(file, off, dst, len, false); +} + +int shmem_write(struct file *file, loff_t off, void *src, size_t len) +{ + return __shmem_rw(file, off, src, len, true); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "st_shmem_utils.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.h b/drivers/gpu/drm/i915/gt/shmem_utils.h new file mode 100644 index 000000000000..c1669170c351 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/shmem_utils.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef SHMEM_UTILS_H +#define SHMEM_UTILS_H + +#include + +struct drm_i915_gem_object; +struct file; + +struct file *shmem_create_from_data(const char *name, void *data, size_t len); +struct file *shmem_create_from_object(struct drm_i915_gem_object *obj); + +void *shmem_pin_map(struct file *file); +void shmem_unpin_map(struct file *file, void *ptr); + +int shmem_read(struct file *file, loff_t off, void *dst, size_t len); +int shmem_write(struct file *file, loff_t off, void *src, size_t len); + +#endif /* SHMEM_UTILS_H */ diff --git a/drivers/gpu/drm/i915/gt/st_shmem_utils.c b/drivers/gpu/drm/i915/gt/st_shmem_utils.c new file mode 100644 index 000000000000..b279fe88b70e --- /dev/null +++ b/drivers/gpu/drm/i915/gt/st_shmem_utils.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +/* Just a quick and causal check of the shmem_utils API */ + +static int igt_shmem_basic(void *ignored) +{ + u32 datum = 0xdeadbeef, result; + struct file *file; + u32 *map; + int err; + + file = shmem_create_from_data("mock", &datum, sizeof(datum)); + if (IS_ERR(file)) + return PTR_ERR(file); + + result = 0; + err = shmem_read(file, 0, &result, sizeof(result)); + if (err) + goto out_file; + + if (result != datum) { + pr_err("Incorrect read back from shmemfs: %x != %x\n", + result, datum); + err = -EINVAL; + goto out_file; + } + + result = 0xc0ffee; + err = shmem_write(file, 0, &result, sizeof(result)); + if (err) + goto out_file; + + map = shmem_pin_map(file); + if (!map) { + err = -ENOMEM; + goto out_file; + } + + if (*map != result) { + pr_err("Incorrect read back via mmap of last write: %x != %x\n", + *map, result); + err = -EINVAL; + goto out_map; + } + +out_map: + shmem_unpin_map(file, map); +out_file: + fput(file); + return err; +} + +int shmem_utils_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_shmem_basic), + }; + + return i915_subtests(tests, NULL); +} diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index a976cd67b3b3..eec292d06f11 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1320,26 +1320,6 @@ capture_user(struct intel_engine_capture_vma *capture, return capture; } -static struct i915_vma_coredump * -capture_object(const struct intel_gt *gt, - struct drm_i915_gem_object *obj, - const char *name, - struct i915_vma_compress *compress) -{ - if (obj && i915_gem_object_has_pages(obj)) { - struct i915_vma fake = { - .node = { .start = U64_MAX, .size = obj->base.size }, - .size = obj->base.size, - .pages = obj->mm.pages, - .obj = obj, - }; - - return i915_vma_coredump_create(gt, &fake, name, compress); - } else { - return NULL; - } -} - static void add_vma(struct intel_engine_coredump *ee, struct i915_vma_coredump *vma) { @@ -1428,12 +1408,6 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, engine->wa_ctx.vma, "WA context", compress)); - - add_vma(ee, - capture_object(engine->gt, - engine->default_state, - "NULL context", - compress)); } static struct intel_engine_coredump * diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index 5b39bab4da1d..6a2be7d0dd95 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -16,6 +16,7 @@ * Tests are executed in order by igt/drv_selftest */ selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */ +selftest(shmem, shmem_utils_mock_selftests) selftest(fence, i915_sw_fence_mock_selftests) selftest(scatterlist, scatterlist_mock_selftests) selftest(syncmap, i915_syncmap_mock_selftests) From 426d0073fb6d1a9513978cea4c9e8396f3721fba Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Apr 2020 21:54:41 +0100 Subject: [PATCH 114/121] drm/i915/gt: Always enable busy-stats for execlists In the near future, we will utilize the busy-stats on each engine to approximate the C0 cycles of each, and use that as an input to a manual RPS mechanism. That entails having busy-stats always enabled and so we can remove the enable/disable routines and simplify the pmu setup. As a consequence of always having the stats enabled, we can also show the current active time via sysfs/engine/xcs/active_time_ns. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200429205446.3259-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine.h | 3 - drivers/gpu/drm/i915/gt/intel_engine_cs.c | 76 +------------------ drivers/gpu/drm/i915/gt/intel_engine_types.h | 31 ++++---- drivers/gpu/drm/i915/gt/intel_lrc.c | 44 +++-------- drivers/gpu/drm/i915/i915_pmu.c | 32 +------- drivers/gpu/drm/i915/selftests/i915_request.c | 16 +--- 6 files changed, 30 insertions(+), 172 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index d9ee64e2ef79..d10e52ff059f 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -310,9 +310,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...); -int intel_enable_engine_stats(struct intel_engine_cs *engine); -void intel_disable_engine_stats(struct intel_engine_cs *engine); - ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine); struct i915_request * diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 80bf71636c0f..c9e46c5ced43 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1589,58 +1589,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, intel_engine_print_breadcrumbs(engine, m); } -/** - * intel_enable_engine_stats() - Enable engine busy tracking on engine - * @engine: engine to enable stats collection - * - * Start collecting the engine busyness data for @engine. - * - * Returns 0 on success or a negative error code. - */ -int intel_enable_engine_stats(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists *execlists = &engine->execlists; - unsigned long flags; - int err = 0; - - if (!intel_engine_supports_stats(engine)) - return -ENODEV; - - execlists_active_lock_bh(execlists); - write_seqlock_irqsave(&engine->stats.lock, flags); - - if (unlikely(engine->stats.enabled == ~0)) { - err = -EBUSY; - goto unlock; - } - - if (engine->stats.enabled++ == 0) { - struct i915_request * const *port; - struct i915_request *rq; - - engine->stats.enabled_at = ktime_get(); - - /* XXX submission method oblivious? */ - for (port = execlists->active; (rq = *port); port++) - engine->stats.active++; - - for (port = execlists->pending; (rq = *port); port++) { - /* Exclude any contexts already counted in active */ - if (!intel_context_inflight_count(rq->context)) - engine->stats.active++; - } - - if (engine->stats.active) - engine->stats.start = engine->stats.enabled_at; - } - -unlock: - write_sequnlock_irqrestore(&engine->stats.lock, flags); - execlists_active_unlock_bh(execlists); - - return err; -} - static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) { ktime_t total = engine->stats.total; @@ -1649,7 +1597,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) * If the engine is executing something at the moment * add it to the total. */ - if (engine->stats.active) + if (atomic_read(&engine->stats.active)) total = ktime_add(total, ktime_sub(ktime_get(), engine->stats.start)); @@ -1675,28 +1623,6 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine) return total; } -/** - * intel_disable_engine_stats() - Disable engine busy tracking on engine - * @engine: engine to disable stats collection - * - * Stops collecting the engine busyness data for @engine. - */ -void intel_disable_engine_stats(struct intel_engine_cs *engine) -{ - unsigned long flags; - - if (!intel_engine_supports_stats(engine)) - return; - - write_seqlock_irqsave(&engine->stats.lock, flags); - WARN_ON_ONCE(engine->stats.enabled == 0); - if (--engine->stats.enabled == 0) { - engine->stats.total = __intel_engine_get_busy_time(engine); - engine->stats.active = 0; - } - write_sequnlock_irqrestore(&engine->stats.lock, flags); -} - static bool match_ring(struct i915_request *rq) { u32 ring = ENGINE_READ(rq->engine, RING_START); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 483d8ff39a0d..83b1f95ebf12 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -531,28 +531,16 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); struct { + /** + * @active: Number of contexts currently scheduled in. + */ + atomic_t active; + /** * @lock: Lock protecting the below fields. */ seqlock_t lock; - /** - * @enabled: Reference count indicating number of listeners. - */ - unsigned int enabled; - /** - * @active: Number of contexts currently scheduled in. - */ - unsigned int active; - /** - * @enabled_at: Timestamp when busy stats were enabled. - */ - ktime_t enabled_at; - /** - * @start: Timestamp of the last idle to active transition. - * - * Idle is defined as active == 0, active is active > 0. - */ - ktime_t start; + /** * @total: Total time this engine was busy. * @@ -560,6 +548,13 @@ struct intel_engine_cs { * where engine is currently busy (active > 0). */ ktime_t total; + + /** + * @start: Timestamp of the last idle to active transition. + * + * Idle is defined as active == 0, active is active > 0. + */ + ktime_t start; } stats; struct { diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7fc4081c34fe..4311b12542fb 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1199,17 +1199,14 @@ static void intel_engine_context_in(struct intel_engine_cs *engine) { unsigned long flags; - if (READ_ONCE(engine->stats.enabled) == 0) + if (atomic_add_unless(&engine->stats.active, 1, 0)) return; write_seqlock_irqsave(&engine->stats.lock, flags); - - if (engine->stats.enabled > 0) { - if (engine->stats.active++ == 0) - engine->stats.start = ktime_get(); - GEM_BUG_ON(engine->stats.active == 0); + if (!atomic_add_unless(&engine->stats.active, 1, 0)) { + engine->stats.start = ktime_get(); + atomic_inc(&engine->stats.active); } - write_sequnlock_irqrestore(&engine->stats.lock, flags); } @@ -1217,36 +1214,17 @@ static void intel_engine_context_out(struct intel_engine_cs *engine) { unsigned long flags; - if (READ_ONCE(engine->stats.enabled) == 0) + GEM_BUG_ON(!atomic_read(&engine->stats.active)); + + if (atomic_add_unless(&engine->stats.active, -1, 1)) return; write_seqlock_irqsave(&engine->stats.lock, flags); - - if (engine->stats.enabled > 0) { - ktime_t last; - - if (engine->stats.active && --engine->stats.active == 0) { - /* - * Decrement the active context count and in case GPU - * is now idle add up to the running total. - */ - last = ktime_sub(ktime_get(), engine->stats.start); - - engine->stats.total = ktime_add(engine->stats.total, - last); - } else if (engine->stats.active == 0) { - /* - * After turning on engine stats, context out might be - * the first event in which case we account from the - * time stats gathering was turned on. - */ - last = ktime_sub(ktime_get(), engine->stats.enabled_at); - - engine->stats.total = ktime_add(engine->stats.total, - last); - } + if (atomic_dec_and_test(&engine->stats.active)) { + engine->stats.total = + ktime_add(engine->stats.total, + ktime_sub(ktime_get(), engine->stats.start)); } - write_sequnlock_irqrestore(&engine->stats.lock, flags); } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 230e9256ab30..83c6a8ccd2cb 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -439,29 +439,9 @@ static u64 count_interrupts(struct drm_i915_private *i915) return sum; } -static void engine_event_destroy(struct perf_event *event) -{ - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct intel_engine_cs *engine; - - engine = intel_engine_lookup_user(i915, - engine_event_class(event), - engine_event_instance(event)); - if (drm_WARN_ON_ONCE(&i915->drm, !engine)) - return; - - if (engine_event_sample(event) == I915_SAMPLE_BUSY && - intel_engine_supports_stats(engine)) - intel_disable_engine_stats(engine); -} - static void i915_pmu_event_destroy(struct perf_event *event) { WARN_ON(event->parent); - - if (is_engine_event(event)) - engine_event_destroy(event); } static int @@ -514,23 +494,13 @@ static int engine_event_init(struct perf_event *event) struct drm_i915_private *i915 = container_of(event->pmu, typeof(*i915), pmu.base); struct intel_engine_cs *engine; - u8 sample; - int ret; engine = intel_engine_lookup_user(i915, engine_event_class(event), engine_event_instance(event)); if (!engine) return -ENODEV; - sample = engine_event_sample(event); - ret = engine_event_status(engine, sample); - if (ret) - return ret; - - if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) - ret = intel_enable_engine_stats(engine); - - return ret; + return engine_event_status(engine, engine_event_sample(event)); } static int i915_pmu_event_init(struct perf_event *event) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 3b319c0953cb..15b1ca9f7a01 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -1679,8 +1679,7 @@ static int perf_series_engines(void *arg) p->engine = ps->ce[idx]->engine; intel_engine_pm_get(p->engine); - if (intel_engine_supports_stats(p->engine) && - !intel_enable_engine_stats(p->engine)) + if (intel_engine_supports_stats(p->engine)) p->busy = intel_engine_get_busy_time(p->engine) + 1; p->runtime = -intel_context_get_total_runtime_ns(ce); p->time = ktime_get(); @@ -1700,7 +1699,6 @@ static int perf_series_engines(void *arg) if (p->busy) { p->busy = ktime_sub(intel_engine_get_busy_time(p->engine), p->busy - 1); - intel_disable_engine_stats(p->engine); } err = switch_to_kernel_sync(ce, err); @@ -1762,8 +1760,7 @@ static int p_sync0(void *arg) } busy = false; - if (intel_engine_supports_stats(engine) && - !intel_enable_engine_stats(engine)) { + if (intel_engine_supports_stats(engine)) { p->busy = intel_engine_get_busy_time(engine); busy = true; } @@ -1796,7 +1793,6 @@ static int p_sync0(void *arg) if (busy) { p->busy = ktime_sub(intel_engine_get_busy_time(engine), p->busy); - intel_disable_engine_stats(engine); } err = switch_to_kernel_sync(ce, err); @@ -1830,8 +1826,7 @@ static int p_sync1(void *arg) } busy = false; - if (intel_engine_supports_stats(engine) && - !intel_enable_engine_stats(engine)) { + if (intel_engine_supports_stats(engine)) { p->busy = intel_engine_get_busy_time(engine); busy = true; } @@ -1866,7 +1861,6 @@ static int p_sync1(void *arg) if (busy) { p->busy = ktime_sub(intel_engine_get_busy_time(engine), p->busy); - intel_disable_engine_stats(engine); } err = switch_to_kernel_sync(ce, err); @@ -1899,8 +1893,7 @@ static int p_many(void *arg) } busy = false; - if (intel_engine_supports_stats(engine) && - !intel_enable_engine_stats(engine)) { + if (intel_engine_supports_stats(engine)) { p->busy = intel_engine_get_busy_time(engine); busy = true; } @@ -1924,7 +1917,6 @@ static int p_many(void *arg) if (busy) { p->busy = ktime_sub(intel_engine_get_busy_time(engine), p->busy); - intel_disable_engine_stats(engine); } err = switch_to_kernel_sync(ce, err); From 9bad2adbddfee476185f1d77a8bf496f841d2088 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Apr 2020 21:54:42 +0100 Subject: [PATCH 115/121] drm/i915/gt: Move rps.enabled/active to flags Pull the boolean intel_rps.enabled and intel_rps.active into a single flags field, in preparation for more. Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200429205446.3259-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/debugfs_gt_pm.c | 5 +-- drivers/gpu/drm/i915/gt/intel_rps.c | 43 ++++++++++++++--------- drivers/gpu/drm/i915/gt/intel_rps.h | 30 ++++++++++++++++ drivers/gpu/drm/i915/gt/intel_rps_types.h | 8 +++-- drivers/gpu/drm/i915/gt/selftest_rps.c | 22 ++++++------ drivers/gpu/drm/i915/i915_debugfs.c | 5 +-- 6 files changed, 79 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c index f6ba66206273..174a24553322 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c @@ -556,7 +556,8 @@ static int rps_boost_show(struct seq_file *m, void *data) struct drm_i915_private *i915 = gt->i915; struct intel_rps *rps = >->rps; - seq_printf(m, "RPS enabled? %d\n", rps->enabled); + seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps))); + seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps))); seq_printf(m, "GPU busy? %s\n", yesno(gt->awake)); seq_printf(m, "Boosts outstanding? %d\n", atomic_read(&rps->num_waiters)); @@ -576,7 +577,7 @@ static int rps_boost_show(struct seq_file *m, void *data) seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); - if (INTEL_GEN(i915) >= 6 && rps->enabled && gt->awake) { + if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) { struct intel_uncore *uncore = gt->uncore; u32 rpup, rpupei; u32 rpdown, rpdownei; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 2ce006e58b4a..a27e989a08eb 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -648,7 +648,7 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) mutex_lock(&rps->power.mutex); if (interactive) { - if (!rps->power.interactive++ && READ_ONCE(rps->active)) + if (!rps->power.interactive++ && intel_rps_is_active(rps)) rps_set_power(rps, HIGH_POWER); } else { GEM_BUG_ON(!rps->power.interactive); @@ -721,7 +721,7 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update) void intel_rps_unpark(struct intel_rps *rps) { - if (!rps->enabled) + if (!intel_rps_is_enabled(rps)) return; GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq); @@ -732,8 +732,7 @@ void intel_rps_unpark(struct intel_rps *rps) */ mutex_lock(&rps->lock); - WRITE_ONCE(rps->active, true); - + intel_rps_set_active(rps); intel_rps_set(rps, clamp(rps->cur_freq, rps->min_freq_softlimit, @@ -754,13 +753,12 @@ void intel_rps_park(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); - if (!rps->enabled) + if (!intel_rps_clear_active(rps)) return; if (INTEL_GEN(i915) >= 6) rps_disable_interrupts(rps); - WRITE_ONCE(rps->active, false); if (rps->last_freq <= rps->idle_freq) return; @@ -802,7 +800,7 @@ void intel_rps_boost(struct i915_request *rq) struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; unsigned long flags; - if (i915_request_signaled(rq) || !READ_ONCE(rps->active)) + if (i915_request_signaled(rq) || !intel_rps_is_active(rps)) return; /* Serializes with i915_request_retire() */ @@ -831,7 +829,7 @@ int intel_rps_set(struct intel_rps *rps, u8 val) GEM_BUG_ON(val > rps->max_freq); GEM_BUG_ON(val < rps->min_freq); - if (rps->active) { + if (intel_rps_is_active(rps)) { err = rps_set(rps, val, true); if (err) return err; @@ -1219,6 +1217,7 @@ void intel_rps_enable(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); struct intel_uncore *uncore = rps_to_uncore(rps); + bool enabled = false; if (!HAS_RPS(i915)) return; @@ -1226,20 +1225,24 @@ void intel_rps_enable(struct intel_rps *rps) intel_gt_check_clock_frequency(rps_to_gt(rps)); intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - if (IS_CHERRYVIEW(i915)) - rps->enabled = chv_rps_enable(rps); + if (rps->max_freq <= rps->min_freq) + /* leave disabled, no room for dynamic reclocking */; + else if (IS_CHERRYVIEW(i915)) + enabled = chv_rps_enable(rps); else if (IS_VALLEYVIEW(i915)) - rps->enabled = vlv_rps_enable(rps); + enabled = vlv_rps_enable(rps); else if (INTEL_GEN(i915) >= 9) - rps->enabled = gen9_rps_enable(rps); + enabled = gen9_rps_enable(rps); else if (INTEL_GEN(i915) >= 8) - rps->enabled = gen8_rps_enable(rps); + enabled = gen8_rps_enable(rps); else if (INTEL_GEN(i915) >= 6) - rps->enabled = gen6_rps_enable(rps); + enabled = gen6_rps_enable(rps); else if (IS_IRONLAKE_M(i915)) - rps->enabled = gen5_rps_enable(rps); + enabled = gen5_rps_enable(rps); + else + MISSING_CASE(INTEL_GEN(i915)); intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); - if (!rps->enabled) + if (!enabled) return; GT_TRACE(rps_to_gt(rps), @@ -1253,6 +1256,8 @@ void intel_rps_enable(struct intel_rps *rps) GEM_BUG_ON(rps->efficient_freq < rps->min_freq); GEM_BUG_ON(rps->efficient_freq > rps->max_freq); + + intel_rps_set_enabled(rps); } static void gen6_rps_disable(struct intel_rps *rps) @@ -1264,7 +1269,7 @@ void intel_rps_disable(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); - rps->enabled = false; + intel_rps_clear_enabled(rps); if (INTEL_GEN(i915) >= 6) gen6_rps_disable(rps); @@ -1511,6 +1516,10 @@ static void rps_work(struct work_struct *work) goto out; mutex_lock(&rps->lock); + if (!intel_rps_is_active(rps)) { + mutex_unlock(&rps->lock); + return; + } pm_iir |= vlv_wa_c0_ei(rps, pm_iir); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h index dfa98194f3b2..a01decf70f31 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.h +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -36,4 +36,34 @@ void gen5_rps_irq_handler(struct intel_rps *rps); void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); +static inline bool intel_rps_is_enabled(const struct intel_rps *rps) +{ + return test_bit(INTEL_RPS_ENABLED, &rps->flags); +} + +static inline void intel_rps_set_enabled(struct intel_rps *rps) +{ + set_bit(INTEL_RPS_ENABLED, &rps->flags); +} + +static inline void intel_rps_clear_enabled(struct intel_rps *rps) +{ + clear_bit(INTEL_RPS_ENABLED, &rps->flags); +} + +static inline bool intel_rps_is_active(const struct intel_rps *rps) +{ + return test_bit(INTEL_RPS_ACTIVE, &rps->flags); +} + +static inline void intel_rps_set_active(struct intel_rps *rps) +{ + set_bit(INTEL_RPS_ACTIVE, &rps->flags); +} + +static inline bool intel_rps_clear_active(struct intel_rps *rps) +{ + return test_and_clear_bit(INTEL_RPS_ACTIVE, &rps->flags); +} + #endif /* INTEL_RPS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h index c2e279154bd5..1ec44f994bc5 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps_types.h +++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h @@ -31,6 +31,11 @@ struct intel_rps_ei { u32 media_c0; }; +enum { + INTEL_RPS_ENABLED = 0, + INTEL_RPS_ACTIVE, +}; + struct intel_rps { struct mutex lock; /* protects enabling and the worker */ @@ -39,8 +44,7 @@ struct intel_rps { * dev_priv->irq_lock */ struct work_struct work; - bool enabled; - bool active; + unsigned long flags; u32 pm_iir; /* PM interrupt bits that should never be masked */ diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 21338c1980de..e4cc1c84d206 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -183,7 +183,7 @@ static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms) static u8 rps_set_check(struct intel_rps *rps, u8 freq) { mutex_lock(&rps->lock); - GEM_BUG_ON(!rps->active); + GEM_BUG_ON(!intel_rps_is_active(rps)); intel_rps_set(rps, freq); GEM_BUG_ON(rps->last_freq != freq); mutex_unlock(&rps->lock); @@ -218,7 +218,7 @@ int live_rps_clock_interval(void *arg) struct igt_spinner spin; int err = 0; - if (!rps->enabled) + if (!intel_rps_is_enabled(rps)) return 0; if (igt_spinner_init(&spin, gt)) @@ -364,7 +364,7 @@ int live_rps_control(void *arg) * will be lowered than requested. */ - if (!rps->enabled || rps->max_freq <= rps->min_freq) + if (!intel_rps_is_enabled(rps)) return 0; if (IS_CHERRYVIEW(gt->i915)) /* XXX fragile PCU */ @@ -595,7 +595,7 @@ int live_rps_frequency_cs(void *arg) * frequency, the actual frequency, and the observed clock rate. */ - if (!rps->enabled || rps->max_freq <= rps->min_freq) + if (!intel_rps_is_enabled(rps)) return 0; if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */ @@ -737,7 +737,7 @@ int live_rps_frequency_srm(void *arg) * frequency, the actual frequency, and the observed clock rate. */ - if (!rps->enabled || rps->max_freq <= rps->min_freq) + if (!intel_rps_is_enabled(rps)) return 0; if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */ @@ -904,7 +904,7 @@ static int __rps_up_interrupt(struct intel_rps *rps, return -EIO; } - if (!rps->active) { + if (!intel_rps_is_active(rps)) { pr_err("%s: RPS not enabled on starting spinner\n", engine->name); igt_spinner_end(spin); @@ -1017,7 +1017,7 @@ int live_rps_interrupt(void *arg) * First, let's check whether or not we are receiving interrupts. */ - if (!rps->enabled || rps->max_freq <= rps->min_freq) + if (!intel_rps_is_enabled(rps)) return 0; intel_gt_pm_get(gt); @@ -1041,7 +1041,7 @@ int live_rps_interrupt(void *arg) unsigned long saved_heartbeat; intel_gt_pm_wait_for_idle(engine->gt); - GEM_BUG_ON(rps->active); + GEM_BUG_ON(intel_rps_is_active(rps)); saved_heartbeat = engine_heartbeat_disable(engine); @@ -1126,7 +1126,7 @@ int live_rps_power(void *arg) * that theory. */ - if (!rps->enabled || rps->max_freq <= rps->min_freq) + if (!intel_rps_is_enabled(rps)) return 0; if (!librapl_energy_uJ()) @@ -1231,7 +1231,7 @@ int live_rps_dynamic(void *arg) * moving parts into dynamic reclocking based on load. */ - if (!rps->enabled || rps->max_freq <= rps->min_freq) + if (!intel_rps_is_enabled(rps)) return 0; if (igt_spinner_init(&spin, gt)) @@ -1248,7 +1248,7 @@ int live_rps_dynamic(void *arg) continue; intel_gt_pm_wait_for_idle(gt); - GEM_BUG_ON(rps->active); + GEM_BUG_ON(intel_rps_is_active(rps)); rps->cur_freq = rps->min_freq; intel_engine_pm_get(engine); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 037e9e0275b6..23857a7512b7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1199,7 +1199,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_rps *rps = &dev_priv->gt.rps; - seq_printf(m, "RPS enabled? %d\n", rps->enabled); + seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps))); + seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps))); seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake)); seq_printf(m, "Boosts outstanding? %d\n", atomic_read(&rps->num_waiters)); @@ -1219,7 +1220,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); - if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) { + if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) { u32 rpup, rpupei; u32 rpdown, rpdownei; From 8e99299a04bc79bfd4fad71e05c0bd80e48afdb5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Apr 2020 21:54:43 +0100 Subject: [PATCH 116/121] drm/i915/gt: Track use of RPS interrupts in flags Use the new intel_rps.flags field to store whether or not interrupts are being used with RPS. Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200429205446.3259-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_rps.c | 17 ++++++++++++----- drivers/gpu/drm/i915/gt/intel_rps.h | 15 +++++++++++++++ drivers/gpu/drm/i915/gt/intel_rps_types.h | 1 + drivers/gpu/drm/i915/gt/selftest_rps.c | 2 +- 4 files changed, 29 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index a27e989a08eb..52151001d7ab 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -742,7 +742,7 @@ void intel_rps_unpark(struct intel_rps *rps) mutex_unlock(&rps->lock); - if (INTEL_GEN(rps_to_i915(rps)) >= 6) + if (intel_rps_has_interrupts(rps)) rps_enable_interrupts(rps); if (IS_GEN(rps_to_i915(rps), 5)) @@ -751,12 +751,10 @@ void intel_rps_unpark(struct intel_rps *rps) void intel_rps_park(struct intel_rps *rps) { - struct drm_i915_private *i915 = rps_to_i915(rps); - if (!intel_rps_clear_active(rps)) return; - if (INTEL_GEN(i915) >= 6) + if (intel_rps_has_interrupts(rps)) rps_disable_interrupts(rps); if (rps->last_freq <= rps->idle_freq) @@ -838,7 +836,7 @@ int intel_rps_set(struct intel_rps *rps, u8 val) * Make sure we continue to get interrupts * until we hit the minimum or maximum frequencies. */ - if (INTEL_GEN(rps_to_i915(rps)) >= 6) { + if (intel_rps_has_interrupts(rps)) { struct intel_uncore *uncore = rps_to_uncore(rps); set(uncore, @@ -1257,6 +1255,11 @@ void intel_rps_enable(struct intel_rps *rps) GEM_BUG_ON(rps->efficient_freq < rps->min_freq); GEM_BUG_ON(rps->efficient_freq > rps->max_freq); + if (INTEL_GEN(i915) >= 6) + intel_rps_set_interrupts(rps); + else + /* Ironlake currently uses intel_ips.ko */ {} + intel_rps_set_enabled(rps); } @@ -1270,6 +1273,7 @@ void intel_rps_disable(struct intel_rps *rps) struct drm_i915_private *i915 = rps_to_i915(rps); intel_rps_clear_enabled(rps); + intel_rps_clear_interrupts(rps); if (INTEL_GEN(i915) >= 6) gen6_rps_disable(rps); @@ -1741,6 +1745,9 @@ void intel_rps_init(struct intel_rps *rps) if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11) rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; + + if (INTEL_GEN(i915) >= 6) + rps_disable_interrupts(rps); } u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h index a01decf70f31..0ce6a0e492ea 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.h +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -66,4 +66,19 @@ static inline bool intel_rps_clear_active(struct intel_rps *rps) return test_and_clear_bit(INTEL_RPS_ACTIVE, &rps->flags); } +static inline bool intel_rps_has_interrupts(const struct intel_rps *rps) +{ + return test_bit(INTEL_RPS_INTERRUPTS, &rps->flags); +} + +static inline void intel_rps_set_interrupts(struct intel_rps *rps) +{ + set_bit(INTEL_RPS_INTERRUPTS, &rps->flags); +} + +static inline void intel_rps_clear_interrupts(struct intel_rps *rps) +{ + clear_bit(INTEL_RPS_INTERRUPTS, &rps->flags); +} + #endif /* INTEL_RPS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h index 1ec44f994bc5..624e93108da4 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps_types.h +++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h @@ -34,6 +34,7 @@ struct intel_rps_ei { enum { INTEL_RPS_ENABLED = 0, INTEL_RPS_ACTIVE, + INTEL_RPS_INTERRUPTS, }; struct intel_rps { diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index e4cc1c84d206..b89a7d7611f6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -1017,7 +1017,7 @@ int live_rps_interrupt(void *arg) * First, let's check whether or not we are receiving interrupts. */ - if (!intel_rps_is_enabled(rps)) + if (!intel_rps_has_interrupts(rps)) return 0; intel_gt_pm_get(gt); From 36d516be867ce3657401ab26b05a39a20f56fa13 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Apr 2020 21:54:44 +0100 Subject: [PATCH 117/121] drm/i915/gt: Switch to manual evaluation of RPS As with the realisation for soft-rc6, we respond to idling the engines within microseconds, far faster than the response times for HW RC6 and RPS. Furthermore, our fast parking upon idle, prevents HW RPS from running for many desktop workloads, as the RPS evaluation intervals are on the order of tens of milliseconds, but the typical workload is just a couple of milliseconds, but yet we still need to determine the best frequency for user latency versus power. Recognising that the HW evaluation intervals are a poor fit, and that they were deprecated [in bspec at least] from gen10, start to wean ourselves off them and replace the EI with a timer and our accurate busy-stats. The principle benefit of manually evaluating RPS intervals is that we can be more responsive for better performance and powersaving for both spiky workloads and steady-state. Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1698 Fixes: 98479ada421a ("drm/i915/gt: Treat idling as a RPS downclock event") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Andi Shyti Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200429205446.3259-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_types.h | 5 + drivers/gpu/drm/i915/gt/intel_rps.c | 138 ++++++++++++++++--- drivers/gpu/drm/i915/gt/intel_rps.h | 15 ++ drivers/gpu/drm/i915/gt/intel_rps_types.h | 5 + 4 files changed, 147 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 83b1f95ebf12..f760e2ef285b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -555,6 +555,11 @@ struct intel_engine_cs { * Idle is defined as active == 0, active is active > 0. */ ktime_t start; + + /** + * @rps: Utilisation at last RPS sampling. + */ + ktime_t rps; } stats; struct { diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 52151001d7ab..8b2991de1c97 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -15,6 +15,8 @@ #include "intel_sideband.h" #include "../../../platform/x86/intel_ips.h" +#define BUSY_MAX_EI 20u /* ms */ + /* * Lock protecting IPS related data structures */ @@ -45,6 +47,100 @@ static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) intel_uncore_write_fw(uncore, reg, val); } +static void rps_timer(struct timer_list *t) +{ + struct intel_rps *rps = from_timer(rps, t, timer); + struct intel_engine_cs *engine; + enum intel_engine_id id; + s64 max_busy[3] = {}; + ktime_t dt, last; + + for_each_engine(engine, rps_to_gt(rps), id) { + s64 busy; + int i; + + dt = intel_engine_get_busy_time(engine); + last = engine->stats.rps; + engine->stats.rps = dt; + + busy = ktime_to_ns(ktime_sub(dt, last)); + for (i = 0; i < ARRAY_SIZE(max_busy); i++) { + if (busy > max_busy[i]) + swap(busy, max_busy[i]); + } + } + + dt = ktime_get(); + last = rps->pm_timestamp; + rps->pm_timestamp = dt; + + if (intel_rps_is_active(rps)) { + s64 busy; + int i; + + dt = ktime_sub(dt, last); + + /* + * Our goal is to evaluate each engine independently, so we run + * at the lowest clocks required to sustain the heaviest + * workload. However, a task may be split into sequential + * dependent operations across a set of engines, such that + * the independent contributions do not account for high load, + * but overall the task is GPU bound. For example, consider + * video decode on vcs followed by colour post-processing + * on vecs, followed by general post-processing on rcs. + * Since multi-engines being active does imply a single + * continuous workload across all engines, we hedge our + * bets by only contributing a factor of the distributed + * load into our busyness calculation. + */ + busy = max_busy[0]; + for (i = 1; i < ARRAY_SIZE(max_busy); i++) { + if (!max_busy[i]) + break; + + busy += div_u64(max_busy[i], 1 << i); + } + GT_TRACE(rps_to_gt(rps), + "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n", + busy, (int)div64_u64(100 * busy, dt), + max_busy[0], max_busy[1], max_busy[2], + rps->pm_interval); + + if (100 * busy > rps->power.up_threshold * dt && + rps->cur_freq < rps->max_freq_softlimit) { + rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD; + rps->pm_interval = 1; + schedule_work(&rps->work); + } else if (100 * busy < rps->power.down_threshold * dt && + rps->cur_freq > rps->min_freq_softlimit) { + rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD; + rps->pm_interval = 1; + schedule_work(&rps->work); + } else { + rps->last_adj = 0; + } + + mod_timer(&rps->timer, + jiffies + msecs_to_jiffies(rps->pm_interval)); + rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI); + } +} + +static void rps_start_timer(struct intel_rps *rps) +{ + rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); + rps->pm_interval = 1; + mod_timer(&rps->timer, jiffies + 1); +} + +static void rps_stop_timer(struct intel_rps *rps) +{ + del_timer_sync(&rps->timer); + rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); + cancel_work_sync(&rps->work); +} + static u32 rps_pm_mask(struct intel_rps *rps, u8 val) { u32 mask = 0; @@ -535,36 +631,24 @@ static void rps_set_power(struct intel_rps *rps, int new_power) if (new_power == rps->power.mode) return; + threshold_up = 95; + threshold_down = 85; + /* Note the units here are not exactly 1us, but 1280ns. */ switch (new_power) { case LOW_POWER: - /* Upclock if more than 95% busy over 16ms */ ei_up = 16000; - threshold_up = 95; - - /* Downclock if less than 85% busy over 32ms */ ei_down = 32000; - threshold_down = 85; break; case BETWEEN: - /* Upclock if more than 90% busy over 13ms */ ei_up = 13000; - threshold_up = 90; - - /* Downclock if less than 75% busy over 32ms */ ei_down = 32000; - threshold_down = 75; break; case HIGH_POWER: - /* Upclock if more than 85% busy over 10ms */ ei_up = 10000; - threshold_up = 85; - - /* Downclock if less than 60% busy over 32ms */ ei_down = 32000; - threshold_down = 60; break; } @@ -742,8 +826,11 @@ void intel_rps_unpark(struct intel_rps *rps) mutex_unlock(&rps->lock); + rps->pm_iir = 0; if (intel_rps_has_interrupts(rps)) rps_enable_interrupts(rps); + if (intel_rps_uses_timer(rps)) + rps_start_timer(rps); if (IS_GEN(rps_to_i915(rps), 5)) gen5_rps_update(rps); @@ -754,6 +841,8 @@ void intel_rps_park(struct intel_rps *rps) if (!intel_rps_clear_active(rps)) return; + if (intel_rps_uses_timer(rps)) + rps_stop_timer(rps); if (intel_rps_has_interrupts(rps)) rps_disable_interrupts(rps); @@ -1211,6 +1300,19 @@ static unsigned long __ips_gfx_val(struct intel_ips *ips) return ips->gfx_power + state2; } +static bool has_busy_stats(struct intel_rps *rps) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, rps_to_gt(rps), id) { + if (!intel_engine_supports_stats(engine)) + return false; + } + + return true; +} + void intel_rps_enable(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); @@ -1255,7 +1357,9 @@ void intel_rps_enable(struct intel_rps *rps) GEM_BUG_ON(rps->efficient_freq < rps->min_freq); GEM_BUG_ON(rps->efficient_freq > rps->max_freq); - if (INTEL_GEN(i915) >= 6) + if (has_busy_stats(rps)) + intel_rps_set_timer(rps); + else if (INTEL_GEN(i915) >= 6) intel_rps_set_interrupts(rps); else /* Ironlake currently uses intel_ips.ko */ {} @@ -1274,6 +1378,7 @@ void intel_rps_disable(struct intel_rps *rps) intel_rps_clear_enabled(rps); intel_rps_clear_interrupts(rps); + intel_rps_clear_timer(rps); if (INTEL_GEN(i915) >= 6) gen6_rps_disable(rps); @@ -1689,6 +1794,7 @@ void intel_rps_init_early(struct intel_rps *rps) mutex_init(&rps->power.mutex); INIT_WORK(&rps->work, rps_work); + timer_setup(&rps->timer, rps_timer, 0); atomic_set(&rps->num_waiters, 0); } diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h index 0ce6a0e492ea..af07fa5b7584 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.h +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -81,4 +81,19 @@ static inline void intel_rps_clear_interrupts(struct intel_rps *rps) clear_bit(INTEL_RPS_INTERRUPTS, &rps->flags); } +static inline bool intel_rps_uses_timer(const struct intel_rps *rps) +{ + return test_bit(INTEL_RPS_TIMER, &rps->flags); +} + +static inline void intel_rps_set_timer(struct intel_rps *rps) +{ + set_bit(INTEL_RPS_TIMER, &rps->flags); +} + +static inline void intel_rps_clear_timer(struct intel_rps *rps) +{ + clear_bit(INTEL_RPS_TIMER, &rps->flags); +} + #endif /* INTEL_RPS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h index 624e93108da4..38083f0402d9 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps_types.h +++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h @@ -35,6 +35,7 @@ enum { INTEL_RPS_ENABLED = 0, INTEL_RPS_ACTIVE, INTEL_RPS_INTERRUPTS, + INTEL_RPS_TIMER, }; struct intel_rps { @@ -44,8 +45,12 @@ struct intel_rps { * work, interrupts_enabled and pm_iir are protected by * dev_priv->irq_lock */ + struct timer_list timer; struct work_struct work; unsigned long flags; + + ktime_t pm_timestamp; + u32 pm_interval; u32 pm_iir; /* PM interrupt bits that should never be masked */ From 3f88dde6ee630983b2d4be9967049306a90a2d40 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Apr 2020 21:54:45 +0100 Subject: [PATCH 118/121] drm/i915/gt: Apply the aggressive downclocking to parking We treat parking as a manual RPS timeout event, and downclock the GPU for the next unpark and batch execution. However, having restored the aggressive downclocking and observed that we have very light workloads whose only interaction is through the manual parking events, carry over the aggressive downclocking to the fake RPS events. References: 21abf0bf168d ("drm/i915/gt: Treat idling as a RPS downclock event") Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200429205446.3259-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_rps.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 8b2991de1c97..1716d6d2c76f 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -822,8 +822,6 @@ void intel_rps_unpark(struct intel_rps *rps) rps->min_freq_softlimit, rps->max_freq_softlimit)); - rps->last_adj = 0; - mutex_unlock(&rps->lock); rps->pm_iir = 0; @@ -838,6 +836,8 @@ void intel_rps_unpark(struct intel_rps *rps) void intel_rps_park(struct intel_rps *rps) { + int adj; + if (!intel_rps_clear_active(rps)) return; @@ -876,8 +876,13 @@ void intel_rps_park(struct intel_rps *rps) * (Note we accommodate Cherryview's limitation of only using an * even bin by applying it to all.) */ - rps->cur_freq = - max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq); + adj = rps->last_adj; + if (adj < 0) + adj *= 2; + else /* CHV needs even encode values */ + adj = -2; + rps->last_adj = adj; + rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); } From de3b4d9361c8a527aa91f49f779e2e378019d0f6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Apr 2020 21:54:46 +0100 Subject: [PATCH 119/121] drm/i915/gt: Restore aggressive post-boost downclocking We reduced the clocks slowly after a boost event based on the observation that the smoothness of animations suffered. However, since reducing the evalution intervals, we should be able to respond to the rapidly fluctuating workload of a simple desktop animation and so restore the more aggressive downclocking. References: 2a8862d2f3da ("drm/i915: Reduce the RPS shock") Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200429205446.3259-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_rps.c | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 1716d6d2c76f..c682355ec79e 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -1680,30 +1680,18 @@ static void rps_work(struct work_struct *work) adj = 0; } - rps->last_adj = adj; - /* - * Limit deboosting and boosting to keep ourselves at the extremes - * when in the respective power modes (i.e. slowly decrease frequencies - * while in the HIGH_POWER zone and slowly increase frequencies while - * in the LOW_POWER zone). On idle, we will hit the timeout and drop - * to the next level quickly, and conversely if busy we expect to - * hit a waitboost and rapidly switch into max power. - */ - if ((adj < 0 && rps->power.mode == HIGH_POWER) || - (adj > 0 && rps->power.mode == LOW_POWER)) - rps->last_adj = 0; - - /* sysfs frequency interfaces may have snuck in while servicing the - * interrupt + * sysfs frequency limits may have snuck in while + * servicing the interrupt */ new_freq += adj; new_freq = clamp_t(int, new_freq, min, max); if (intel_rps_set(rps, new_freq)) { drm_dbg(&i915->drm, "Failed to set new GPU frequency\n"); - rps->last_adj = 0; + adj = 0; } + rps->last_adj = adj; mutex_unlock(&rps->lock); From 79eb8c7f015a6c2b307b86ca4176c5f0ed987219 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Zbigniew=20Kempczy=C5=84ski?= Date: Thu, 30 Apr 2020 07:49:57 +0100 Subject: [PATCH 120/121] drm/i915/selftests: Add tiled blits selftest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extend coverage of the blitter client by exercising conversion to and from tiled sources. In the process we perform spot checks to verify that the tiling/detiling is being applied correctly, along with position invariance of the tiling parameters. Signed-off-by: Zbigniew Kempczyński Cc: Chris Wilson Cc: Joonas Lahtinen Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200430064957.14942-1-chris@chris-wilson.co.uk --- .../i915/gem/selftests/i915_gem_client_blt.c | 595 ++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 2 + 2 files changed, 597 insertions(+) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index b972be165e85..be268511cb6d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -7,9 +7,12 @@ #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" +#include "gt/intel_gpu_commands.h" +#include "gem/i915_gem_lmem.h" #include "selftests/igt_flush_test.h" #include "selftests/mock_drm.h" +#include "selftests/i915_random.h" #include "huge_gem_object.h" #include "mock_context.h" @@ -127,10 +130,602 @@ static int igt_client_fill(void *arg) } while (1); } +#define WIDTH 512 +#define HEIGHT 32 + +struct blit_buffer { + struct i915_vma *vma; + u32 start_val; + u32 tiling; +}; + +struct tiled_blits { + struct intel_context *ce; + struct blit_buffer buffers[3]; + struct blit_buffer scratch; + struct i915_vma *batch; + u64 hole; + u32 width; + u32 height; +}; + +static int prepare_blit(const struct tiled_blits *t, + struct blit_buffer *dst, + struct blit_buffer *src, + struct drm_i915_gem_object *batch) +{ + const int gen = INTEL_GEN(to_i915(batch->base.dev)); + bool use_64b_reloc = gen >= 8; + u32 src_pitch, dst_pitch; + u32 cmd, *cs; + + cs = i915_gem_object_pin_map(batch, I915_MAP_WC); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(BCS_SWCTRL); + cmd = (BCS_SRC_Y | BCS_DST_Y) << 16; + if (src->tiling == I915_TILING_Y) + cmd |= BCS_SRC_Y; + if (dst->tiling == I915_TILING_Y) + cmd |= BCS_DST_Y; + *cs++ = cmd; + + cmd = MI_FLUSH_DW; + if (gen >= 8) + cmd++; + *cs++ = cmd; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + + cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2); + if (gen >= 8) + cmd += 2; + + src_pitch = t->width * 4; + if (src->tiling) { + cmd |= XY_SRC_COPY_BLT_SRC_TILED; + src_pitch /= 4; + } + + dst_pitch = t->width * 4; + if (dst->tiling) { + cmd |= XY_SRC_COPY_BLT_DST_TILED; + dst_pitch /= 4; + } + + *cs++ = cmd; + *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch; + *cs++ = 0; + *cs++ = t->height << 16 | t->width; + *cs++ = lower_32_bits(dst->vma->node.start); + if (use_64b_reloc) + *cs++ = upper_32_bits(dst->vma->node.start); + *cs++ = 0; + *cs++ = src_pitch; + *cs++ = lower_32_bits(src->vma->node.start); + if (use_64b_reloc) + *cs++ = upper_32_bits(src->vma->node.start); + + *cs++ = MI_BATCH_BUFFER_END; + + i915_gem_object_flush_map(batch); + i915_gem_object_unpin_map(batch); + + return 0; +} + +static void tiled_blits_destroy_buffers(struct tiled_blits *t) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(t->buffers); i++) + i915_vma_put(t->buffers[i].vma); + + i915_vma_put(t->scratch.vma); + i915_vma_put(t->batch); +} + +static struct i915_vma * +__create_vma(struct tiled_blits *t, size_t size, bool lmem) +{ + struct drm_i915_private *i915 = t->ce->vm->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + if (lmem) + obj = i915_gem_object_create_lmem(i915, size, 0); + else + obj = i915_gem_object_create_shmem(i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, t->ce->vm, NULL); + if (IS_ERR(vma)) + i915_gem_object_put(obj); + + return vma; +} + +static struct i915_vma *create_vma(struct tiled_blits *t, bool lmem) +{ + return __create_vma(t, PAGE_ALIGN(t->width * t->height * 4), lmem); +} + +static int tiled_blits_create_buffers(struct tiled_blits *t, + int width, int height, + struct rnd_state *prng) +{ + struct drm_i915_private *i915 = t->ce->engine->i915; + int i; + + t->width = width; + t->height = height; + + t->batch = __create_vma(t, PAGE_SIZE, false); + if (IS_ERR(t->batch)) + return PTR_ERR(t->batch); + + t->scratch.vma = create_vma(t, false); + if (IS_ERR(t->scratch.vma)) { + i915_vma_put(t->batch); + return PTR_ERR(t->scratch.vma); + } + + for (i = 0; i < ARRAY_SIZE(t->buffers); i++) { + struct i915_vma *vma; + + vma = create_vma(t, HAS_LMEM(i915) && i % 2); + if (IS_ERR(vma)) { + tiled_blits_destroy_buffers(t); + return PTR_ERR(vma); + } + + t->buffers[i].vma = vma; + t->buffers[i].tiling = + i915_prandom_u32_max_state(I915_TILING_Y + 1, prng); + } + + return 0; +} + +static void fill_scratch(struct tiled_blits *t, u32 *vaddr, u32 val) +{ + int i; + + t->scratch.start_val = val; + for (i = 0; i < t->width * t->height; i++) + vaddr[i] = val++; + + i915_gem_object_flush_map(t->scratch.vma->obj); +} + +static void hexdump(const void *buf, size_t len) +{ + const size_t rowsize = 8 * sizeof(u32); + const void *prev = NULL; + bool skip = false; + size_t pos; + + for (pos = 0; pos < len; pos += rowsize) { + char line[128]; + + if (prev && !memcmp(prev, buf + pos, rowsize)) { + if (!skip) { + pr_info("*\n"); + skip = true; + } + continue; + } + + WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos, + rowsize, sizeof(u32), + line, sizeof(line), + false) >= sizeof(line)); + pr_info("[%04zx] %s\n", pos, line); + + prev = buf + pos; + skip = false; + } +} + +static u64 swizzle_bit(unsigned int bit, u64 offset) +{ + return (offset & BIT_ULL(bit)) >> (bit - 6); +} + +static u64 tiled_offset(const struct intel_gt *gt, + u64 v, + unsigned int stride, + unsigned int tiling) +{ + unsigned int swizzle; + u64 x, y; + + if (tiling == I915_TILING_NONE) + return v; + + y = div64_u64_rem(v, stride, &x); + + if (tiling == I915_TILING_X) { + v = div64_u64_rem(y, 8, &y) * stride * 8; + v += y * 512; + v += div64_u64_rem(x, 512, &x) << 12; + v += x; + + swizzle = gt->ggtt->bit_6_swizzle_x; + } else { + const unsigned int ytile_span = 16; + const unsigned int ytile_height = 512; + + v = div64_u64_rem(y, 32, &y) * stride * 32; + v += y * ytile_span; + v += div64_u64_rem(x, ytile_span, &x) * ytile_height; + v += x; + + swizzle = gt->ggtt->bit_6_swizzle_y; + } + + switch (swizzle) { + case I915_BIT_6_SWIZZLE_9: + v ^= swizzle_bit(9, v); + break; + case I915_BIT_6_SWIZZLE_9_10: + v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v); + break; + case I915_BIT_6_SWIZZLE_9_11: + v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v); + break; + case I915_BIT_6_SWIZZLE_9_10_11: + v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v); + break; + } + + return v; +} + +static const char *repr_tiling(int tiling) +{ + switch (tiling) { + case I915_TILING_NONE: return "linear"; + case I915_TILING_X: return "X"; + case I915_TILING_Y: return "Y"; + default: return "unknown"; + } +} + +static int verify_buffer(const struct tiled_blits *t, + struct blit_buffer *buf, + struct rnd_state *prng) +{ + const u32 *vaddr; + int ret = 0; + int x, y, p; + + x = i915_prandom_u32_max_state(t->width, prng); + y = i915_prandom_u32_max_state(t->height, prng); + p = y * t->width + x; + + vaddr = i915_gem_object_pin_map(buf->vma->obj, I915_MAP_WC); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + if (vaddr[0] != buf->start_val) { + ret = -EINVAL; + } else { + u64 v = tiled_offset(buf->vma->vm->gt, + p * 4, t->width * 4, + buf->tiling); + + if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p) + ret = -EINVAL; + } + if (ret) { + pr_err("Invalid %s tiling detected at (%d, %d), start_val %x\n", + repr_tiling(buf->tiling), + x, y, buf->start_val); + hexdump(vaddr, 4096); + } + + i915_gem_object_unpin_map(buf->vma->obj); + return ret; +} + +static int move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) +{ + int err; + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, false); + if (err == 0) + err = i915_vma_move_to_active(vma, rq, flags); + i915_vma_unlock(vma); + + return err; +} + +static int pin_buffer(struct i915_vma *vma, u64 addr) +{ + int err; + + if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) { + err = i915_vma_unbind(vma); + if (err) + return err; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED | addr); + if (err) + return err; + + return 0; +} + +static int +tiled_blit(struct tiled_blits *t, + struct blit_buffer *dst, u64 dst_addr, + struct blit_buffer *src, u64 src_addr) +{ + struct i915_request *rq; + int err; + + err = pin_buffer(src->vma, src_addr); + if (err) { + pr_err("Cannot pin src @ %llx\n", src_addr); + return err; + } + + err = pin_buffer(dst->vma, dst_addr); + if (err) { + pr_err("Cannot pin dst @ %llx\n", dst_addr); + goto err_src; + } + + err = i915_vma_pin(t->batch, 0, 0, PIN_USER | PIN_HIGH); + if (err) { + pr_err("cannot pin batch\n"); + goto err_dst; + } + + err = prepare_blit(t, dst, src, t->batch->obj); + if (err) + goto err_bb; + + rq = intel_context_create_request(t->ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_bb; + } + + err = move_to_active(t->batch, rq, 0); + if (!err) + err = move_to_active(src->vma, rq, 0); + if (!err) + err = move_to_active(dst->vma, rq, 0); + if (!err) + err = rq->engine->emit_bb_start(rq, + t->batch->node.start, + t->batch->node.size, + 0); + i915_request_get(rq); + i915_request_add(rq); + if (i915_request_wait(rq, 0, HZ / 2) < 0) + err = -ETIME; + i915_request_put(rq); + + dst->start_val = src->start_val; +err_bb: + i915_vma_unpin(t->batch); +err_dst: + i915_vma_unpin(dst->vma); +err_src: + i915_vma_unpin(src->vma); + return err; +} + +static struct tiled_blits * +tiled_blits_create(struct intel_engine_cs *engine, struct rnd_state *prng) +{ + struct drm_mm_node hole; + struct tiled_blits *t; + u64 hole_size; + int err; + + t = kzalloc(sizeof(*t), GFP_KERNEL); + if (!t) + return ERR_PTR(-ENOMEM); + + t->ce = intel_context_create(engine); + if (IS_ERR(t->ce)) { + err = PTR_ERR(t->ce); + goto err_free; + } + + hole_size = 2 * PAGE_ALIGN(WIDTH * HEIGHT * 4); + hole_size *= 2; /* room to maneuver */ + hole_size += 2 * I915_GTT_MIN_ALIGNMENT; + + mutex_lock(&t->ce->vm->mutex); + memset(&hole, 0, sizeof(hole)); + err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole, + hole_size, 0, I915_COLOR_UNEVICTABLE, + 0, U64_MAX, + DRM_MM_INSERT_BEST); + if (!err) + drm_mm_remove_node(&hole); + mutex_unlock(&t->ce->vm->mutex); + if (err) { + err = -ENODEV; + goto err_put; + } + + t->hole = hole.start + I915_GTT_MIN_ALIGNMENT; + pr_info("Using hole at %llx\n", t->hole); + + err = tiled_blits_create_buffers(t, WIDTH, HEIGHT, prng); + if (err) + goto err_put; + + return t; + +err_put: + intel_context_put(t->ce); +err_free: + kfree(t); + return ERR_PTR(err); +} + +static void tiled_blits_destroy(struct tiled_blits *t) +{ + tiled_blits_destroy_buffers(t); + + intel_context_put(t->ce); + kfree(t); +} + +static int tiled_blits_prepare(struct tiled_blits *t, + struct rnd_state *prng) +{ + u64 offset = PAGE_ALIGN(t->width * t->height * 4); + u32 *map; + int err; + int i; + + map = i915_gem_object_pin_map(t->scratch.vma->obj, I915_MAP_WC); + if (IS_ERR(map)) + return PTR_ERR(map); + + /* Use scratch to fill objects */ + for (i = 0; i < ARRAY_SIZE(t->buffers); i++) { + fill_scratch(t, map, prandom_u32_state(prng)); + GEM_BUG_ON(verify_buffer(t, &t->scratch, prng)); + + err = tiled_blit(t, + &t->buffers[i], t->hole + offset, + &t->scratch, t->hole); + if (err == 0) + err = verify_buffer(t, &t->buffers[i], prng); + if (err) { + pr_err("Failed to create buffer %d\n", i); + break; + } + } + + i915_gem_object_unpin_map(t->scratch.vma->obj); + return err; +} + +static int tiled_blits_bounce(struct tiled_blits *t, struct rnd_state *prng) +{ + u64 offset = + round_up(t->width * t->height * 4, 2 * I915_GTT_MIN_ALIGNMENT); + int err; + + /* We want to check position invariant tiling across GTT eviction */ + + err = tiled_blit(t, + &t->buffers[1], t->hole + offset / 2, + &t->buffers[0], t->hole + 2 * offset); + if (err) + return err; + + /* Reposition so that we overlap the old addresses, and slightly off */ + err = tiled_blit(t, + &t->buffers[2], t->hole + I915_GTT_MIN_ALIGNMENT, + &t->buffers[1], t->hole + 3 * offset / 2); + if (err) + return err; + + err = verify_buffer(t, &t->buffers[2], prng); + if (err) + return err; + + return 0; +} + +static int __igt_client_tiled_blits(struct intel_engine_cs *engine, + struct rnd_state *prng) +{ + struct tiled_blits *t; + int err; + + t = tiled_blits_create(engine, prng); + if (IS_ERR(t)) + return PTR_ERR(t); + + err = tiled_blits_prepare(t, prng); + if (err) + goto out; + + err = tiled_blits_bounce(t, prng); + if (err) + goto out; + +out: + tiled_blits_destroy(t); + return err; +} + +static bool has_bit17_swizzle(int sw) +{ + return (sw == I915_BIT_6_SWIZZLE_9_10_17 || + sw == I915_BIT_6_SWIZZLE_9_17); +} + +static bool bad_swizzling(struct drm_i915_private *i915) +{ + struct i915_ggtt *ggtt = &i915->ggtt; + + if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + return true; + + if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) || + has_bit17_swizzle(ggtt->bit_6_swizzle_y)) + return true; + + return false; +} + +static int igt_client_tiled_blits(void *arg) +{ + struct drm_i915_private *i915 = arg; + I915_RND_STATE(prng); + int inst = 0; + + /* Test requires explicit BLT tiling controls */ + if (INTEL_GEN(i915) < 4) + return 0; + + if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */ + return 0; + + do { + struct intel_engine_cs *engine; + int err; + + engine = intel_engine_lookup_user(i915, + I915_ENGINE_CLASS_COPY, + inst++); + if (!engine) + return 0; + + err = __igt_client_tiled_blits(engine, &prng); + if (err == -ENODEV) + err = 0; + if (err) + return err; + } while (1); +} + int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_client_fill), + SUBTEST(igt_client_tiled_blits), }; if (intel_gt_is_wedged(&i915->gt)) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 96d9f8853343..fd9f2904d93c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -561,6 +561,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) * Registers used only by the command parser */ #define BCS_SWCTRL _MMIO(0x22200) +#define BCS_SRC_Y REG_BIT(0) +#define BCS_DST_Y REG_BIT(1) /* There are 16 GPR registers */ #define BCS_GPR(n) _MMIO(0x22600 + (n) * 8) From 230982d8d8df7f9d9aa216840ea2db1df6ad5d37 Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Thu, 30 Apr 2020 11:13:21 +0300 Subject: [PATCH 121/121] drm/i915: Update DRIVER_DATE to 20200430 Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8b80b9d23be9..7af48641dbc9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -108,8 +108,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20200417" -#define DRIVER_TIMESTAMP 1587105300 +#define DRIVER_DATE "20200430" +#define DRIVER_TIMESTAMP 1588234401 struct drm_i915_gem_object;